aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-12-31 07:35:57 -0500
committerRusty Russell <rusty@rustcorp.com.au>2008-12-31 07:35:57 -0500
commit2ca1a615835d9f4990f42102ab1f2ef434e7e89c (patch)
tree726cf3d5f29a6c66c44e4bd68e7ebed2fd83d059 /drivers
parente12f0102ac81d660c9f801d0a0e10ccf4537a9de (diff)
parent6a94cb73064c952255336cc57731904174b2c58f (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: arch/x86/kernel/io_apic.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/ahci.c8
-rw-r--r--drivers/ata/ata_generic.c5
-rw-r--r--drivers/ata/ata_piix.c21
-rw-r--r--drivers/ata/libata-acpi.c19
-rw-r--r--drivers/ata/libata-core.c204
-rw-r--r--drivers/ata/libata-eh.c116
-rw-r--r--drivers/ata/libata-pmp.c22
-rw-r--r--drivers/ata/libata-scsi.c26
-rw-r--r--drivers/ata/pata_bf54x.c1
-rw-r--r--drivers/ata/pata_it821x.c34
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c14
-rw-r--r--drivers/ata/pata_legacy.c18
-rw-r--r--drivers/ata/pata_oldpiix.c1
-rw-r--r--drivers/ata/pata_pdc2027x.c29
-rw-r--r--drivers/ata/pata_platform.c14
-rw-r--r--drivers/ata/pata_radisys.c1
-rw-r--r--drivers/ata/pata_rz1000.c16
-rw-r--r--drivers/ata/pata_scc.c1
-rw-r--r--drivers/ata/pata_serverworks.c1
-rw-r--r--drivers/ata/pata_sis.c1
-rw-r--r--drivers/ata/sata_mv.c1
-rw-r--r--drivers/ata/sata_sil.c98
-rw-r--r--drivers/block/cciss.c88
-rw-r--r--drivers/block/cciss.h4
-rw-r--r--drivers/block/cciss_cmd.h3
-rw-r--r--drivers/block/loop.c39
-rw-r--r--drivers/block/nbd.c10
-rw-r--r--drivers/block/virtio_blk.c43
-rw-r--r--drivers/block/xen-blkfront.c8
-rw-r--r--drivers/cdrom/cdrom.c703
-rw-r--r--drivers/char/agp/intel-agp.c11
-rw-r--r--drivers/char/ds1620.c25
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hvc_console.c1
-rw-r--r--drivers/char/nwflash.c8
-rw-r--r--drivers/char/random.c22
-rw-r--r--drivers/char/virtio_console.c30
-rw-r--r--drivers/clocksource/acpi_pm.c10
-rw-r--r--drivers/firmware/dmi_scan.c16
-rw-r--r--drivers/gpu/drm/Kconfig17
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_auth.c29
-rw-r--r--drivers/gpu/drm/drm_bufs.c27
-rw-r--r--drivers/gpu/drm/drm_context.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c2446
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c826
-rw-r--r--drivers/gpu/drm/drm_drv.c95
-rw-r--r--drivers/gpu/drm/drm_edid.c732
-rw-r--r--drivers/gpu/drm/drm_fops.c223
-rw-r--r--drivers/gpu/drm/drm_gem.c111
-rw-r--r--drivers/gpu/drm/drm_hashtab.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c61
-rw-r--r--drivers/gpu/drm/drm_irq.c73
-rw-r--r--drivers/gpu/drm/drm_lock.c42
-rw-r--r--drivers/gpu/drm/drm_mm.c1
-rw-r--r--drivers/gpu/drm/drm_modes.c576
-rw-r--r--drivers/gpu/drm/drm_proc.c71
-rw-r--r--drivers/gpu/drm/drm_stub.c142
-rw-r--r--drivers/gpu/drm/drm_sysfs.c329
-rw-r--r--drivers/gpu/drm/drm_vm.c7
-rw-r--r--drivers/gpu/drm/i915/Makefile17
-rw-r--r--drivers/gpu/drm/i915/dvo.h157
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c454
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c368
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c442
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c302
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c335
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c338
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c42
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h93
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c628
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c51
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h20
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c193
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h405
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c284
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1618
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h146
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c495
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c925
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c184
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c525
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c83
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1128
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h327
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c1725
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c166
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/ide/Kconfig10
-rw-r--r--drivers/ide/Makefile3
-rw-r--r--drivers/ide/cmd64x.c4
-rw-r--r--drivers/ide/cy82c693.c1
-rw-r--r--drivers/ide/gayle.c6
-rw-r--r--drivers/ide/hpt366.c8
-rw-r--r--drivers/ide/ide-acpi.c4
-rw-r--r--drivers/ide/ide-cd.c142
-rw-r--r--drivers/ide/ide-cd.h2
-rw-r--r--drivers/ide/ide-dma-sff.c54
-rw-r--r--drivers/ide/ide-io.c292
-rw-r--r--drivers/ide/ide-ioctls.c5
-rw-r--r--drivers/ide/ide-iops.c48
-rw-r--r--drivers/ide/ide-legacy.c58
-rw-r--r--drivers/ide/ide-lib.c105
-rw-r--r--drivers/ide/ide-park.c16
-rw-r--r--drivers/ide/ide-pm.c235
-rw-r--r--drivers/ide/ide-probe.c166
-rw-r--r--drivers/ide/ide-proc.c29
-rw-r--r--drivers/ide/ide.c89
-rw-r--r--drivers/ide/ide_arm.c11
-rw-r--r--drivers/ide/pdc202xx_old.c9
-rw-r--r--drivers/ide/rz1000.c36
-rw-r--r--drivers/ide/trm290.c4
-rw-r--r--drivers/ide/tx4938ide.c4
-rw-r--r--drivers/ide/tx4939ide.c10
-rw-r--r--drivers/ide/umc8672.c11
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/addr.c47
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c48
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c5
-rw-r--r--drivers/input/keyboard/omap-keypad.c8
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c2
-rw-r--r--drivers/input/serio/Kconfig2
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c2
-rw-r--r--drivers/lguest/lg.h2
-rw-r--r--drivers/lguest/lguest_device.c8
-rw-r--r--drivers/lguest/lguest_user.c13
-rw-r--r--drivers/lguest/page_tables.c72
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/media/common/ir-keymaps.c93
-rw-r--r--drivers/media/common/saa7146_fops.c2
-rw-r--r--drivers/media/common/saa7146_video.c10
-rw-r--r--drivers/media/common/tuners/mxl5005s.c6
-rw-r--r--drivers/media/common/tuners/tda827x.c15
-rw-r--r--drivers/media/common/tuners/tda8290.c63
-rw-r--r--drivers/media/common/tuners/tda9887.c5
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c35
-rw-r--r--drivers/media/common/tuners/xc5000.c7
-rw-r--r--drivers/media/dvb/Kconfig13
-rw-r--r--drivers/media/dvb/b2c2/Kconfig1
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c77
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h134
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c71
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.h1
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c33
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h140
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c2
-rw-r--r--drivers/media/dvb/dvb-usb/cinergyT2-core.c3
-rw-r--r--drivers/media/dvb/dvb-usb/cinergyT2.h10
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c15
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk-fe.c140
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c16
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.h1
-rw-r--r--drivers/media/dvb/dvb-usb/usb-urb.c3
-rw-r--r--drivers/media/dvb/frontends/Kconfig53
-rw-r--r--drivers/media/dvb/frontends/Makefile10
-rw-r--r--drivers/media/dvb/frontends/af9013.c14
-rw-r--r--drivers/media/dvb/frontends/cx24113.c616
-rw-r--r--drivers/media/dvb/frontends/cx24113.h11
-rw-r--r--drivers/media/dvb/frontends/cx24116.c43
-rw-r--r--drivers/media/dvb/frontends/dib7000p.h9
-rw-r--r--drivers/media/dvb/frontends/drx397xD.c12
-rw-r--r--drivers/media/dvb/frontends/drx397xD_fw.h4
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.c2
-rw-r--r--drivers/media/dvb/frontends/lgdt3304.c378
-rw-r--r--drivers/media/dvb/frontends/lgdt3304.h45
-rw-r--r--drivers/media/dvb/frontends/s5h1411.c3
-rw-r--r--drivers/media/dvb/frontends/s921_core.c216
-rw-r--r--drivers/media/dvb/frontends/s921_core.h114
-rw-r--r--drivers/media/dvb/frontends/s921_module.c190
-rw-r--r--drivers/media/dvb/frontends/s921_module.h49
-rw-r--r--drivers/media/dvb/frontends/si21xx.c1
-rw-r--r--drivers/media/dvb/frontends/stb0899_algo.c1519
-rw-r--r--drivers/media/dvb/frontends/stb0899_cfg.h287
-rw-r--r--drivers/media/dvb/frontends/stb0899_drv.c1684
-rw-r--r--drivers/media/dvb/frontends/stb0899_drv.h162
-rw-r--r--drivers/media/dvb/frontends/stb0899_priv.h267
-rw-r--r--drivers/media/dvb/frontends/stb0899_reg.h2027
-rw-r--r--drivers/media/dvb/frontends/stb6100.c545
-rw-r--r--drivers/media/dvb/frontends/stb6100.h115
-rw-r--r--drivers/media/dvb/frontends/stb6100_cfg.h108
-rw-r--r--drivers/media/dvb/frontends/tda8261.c230
-rw-r--r--drivers/media/dvb/frontends/tda8261.h55
-rw-r--r--drivers/media/dvb/frontends/tda8261_cfg.h84
-rw-r--r--drivers/media/dvb/frontends/zl10353.c3
-rw-r--r--drivers/media/dvb/siano/sms-cards.c110
-rw-r--r--drivers/media/dvb/siano/sms-cards.h13
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c78
-rw-r--r--drivers/media/dvb/siano/smscoreapi.h36
-rw-r--r--drivers/media/dvb/siano/smsdvb.c56
-rw-r--r--drivers/media/dvb/siano/smsusb.c45
-rw-r--r--drivers/media/dvb/ttpci/Kconfig4
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c298
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c311
-rw-r--r--drivers/media/dvb/ttpci/budget.h1
-rw-r--r--drivers/media/radio/dsbr100.c381
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-cadet.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-mr800.c123
-rw-r--r--drivers/media/radio/radio-rtrack2.c2
-rw-r--r--drivers/media/radio/radio-sf16fmi.c2
-rw-r--r--drivers/media/video/Kconfig48
-rw-r--r--drivers/media/video/Makefile12
-rw-r--r--drivers/media/video/arv.c5
-rw-r--r--drivers/media/video/bt8xx/bt832.c274
-rw-r--r--drivers/media/video/bt8xx/bt832.h305
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c72
-rw-r--r--drivers/media/video/bt8xx/bttv-gpio.c7
-rw-r--r--drivers/media/video/bt8xx/bttv.h10
-rw-r--r--drivers/media/video/bt8xx/bttvp.h2
-rw-r--r--drivers/media/video/bw-qcam.c5
-rw-r--r--drivers/media/video/c-qcam.c7
-rw-r--r--drivers/media/video/cpia.c9
-rw-r--r--drivers/media/video/cpia2/cpia2_core.c2
-rw-r--r--drivers/media/video/cpia2/cpia2_usb.c2
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c15
-rw-r--r--drivers/media/video/cs5345.c227
-rw-r--r--drivers/media/video/cs53l32a.c188
-rw-r--r--drivers/media/video/cx18/cx18-av-audio.c231
-rw-r--r--drivers/media/video/cx18/cx18-av-core.c106
-rw-r--r--drivers/media/video/cx18/cx18-av-core.h5
-rw-r--r--drivers/media/video/cx18/cx18-av-firmware.c28
-rw-r--r--drivers/media/video/cx18/cx18-av-vbi.c5
-rw-r--r--drivers/media/video/cx18/cx18-cards.c9
-rw-r--r--drivers/media/video/cx18/cx18-cards.h6
-rw-r--r--drivers/media/video/cx18/cx18-controls.c5
-rw-r--r--drivers/media/video/cx18/cx18-driver.c284
-rw-r--r--drivers/media/video/cx18/cx18-driver.h78
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c59
-rw-r--r--drivers/media/video/cx18/cx18-dvb.h1
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c38
-rw-r--r--drivers/media/video/cx18/cx18-firmware.c229
-rw-r--r--drivers/media/video/cx18/cx18-gpio.c23
-rw-r--r--drivers/media/video/cx18/cx18-gpio.h1
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c31
-rw-r--r--drivers/media/video/cx18/cx18-io.c198
-rw-r--r--drivers/media/video/cx18/cx18-io.h326
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c12
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.h1
-rw-r--r--drivers/media/video/cx18/cx18-irq.c163
-rw-r--r--drivers/media/video/cx18/cx18-irq.h4
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c527
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.h29
-rw-r--r--drivers/media/video/cx18/cx18-queue.c118
-rw-r--r--drivers/media/video/cx18/cx18-queue.h22
-rw-r--r--drivers/media/video/cx18/cx18-scb.c2
-rw-r--r--drivers/media/video/cx18/cx18-scb.h9
-rw-r--r--drivers/media/video/cx18/cx18-streams.c140
-rw-r--r--drivers/media/video/cx18/cx18-streams.h5
-rw-r--r--drivers/media/video/cx18/cx18-vbi.c5
-rw-r--r--drivers/media/video/cx18/cx18-version.h2
-rw-r--r--drivers/media/video/cx18/cx23418.h6
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c12
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c1
-rw-r--r--drivers/media/video/cx23885/cx23885.h1
-rw-r--r--drivers/media/video/cx25840/Kconfig2
-rw-r--r--drivers/media/video/cx25840/cx25840-audio.c14
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c447
-rw-r--r--drivers/media/video/cx25840/cx25840-core.h7
-rw-r--r--drivers/media/video/cx25840/cx25840-firmware.c2
-rw-r--r--drivers/media/video/cx25840/cx25840-vbi.c2
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c3
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c16
-rw-r--r--drivers/media/video/cx88/cx88-cards.c86
-rw-r--r--drivers/media/video/cx88/cx88-core.c3
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c230
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c4
-rw-r--r--drivers/media/video/cx88/cx88.h14
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c21
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c1464
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c628
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c14
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c49
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c311
-rw-r--r--drivers/media/video/em28xx/em28xx-reg.h153
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c851
-rw-r--r--drivers/media/video/em28xx/em28xx.h148
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c2
-rw-r--r--drivers/media/video/gspca/Kconfig23
-rw-r--r--drivers/media/video/gspca/Makefile4
-rw-r--r--drivers/media/video/gspca/conex.c2
-rw-r--r--drivers/media/video/gspca/etoms.c4
-rw-r--r--drivers/media/video/gspca/finepix.c5
-rw-r--r--drivers/media/video/gspca/gspca.c210
-rw-r--r--drivers/media/video/gspca/gspca.h25
-rw-r--r--drivers/media/video/gspca/m5602/m5602_bridge.h119
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c100
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.c135
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.h14
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.c316
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.h195
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.c166
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.h10
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c235
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.h47
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k83a.c213
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k83a.h25
-rw-r--r--drivers/media/video/gspca/m5602/m5602_sensor.h14
-rw-r--r--drivers/media/video/gspca/mars.c4
-rw-r--r--drivers/media/video/gspca/ov519.c172
-rw-r--r--drivers/media/video/gspca/ov534.c601
-rw-r--r--drivers/media/video/gspca/pac207.c8
-rw-r--r--drivers/media/video/gspca/pac7311.c5
-rw-r--r--drivers/media/video/gspca/sonixb.c25
-rw-r--r--drivers/media/video/gspca/sonixj.c508
-rw-r--r--drivers/media/video/gspca/spca500.c8
-rw-r--r--drivers/media/video/gspca/spca501.c148
-rw-r--r--drivers/media/video/gspca/spca505.c2
-rw-r--r--drivers/media/video/gspca/spca506.c2
-rw-r--r--drivers/media/video/gspca/spca508.c2
-rw-r--r--drivers/media/video/gspca/spca561.c522
-rw-r--r--drivers/media/video/gspca/stk014.c8
-rw-r--r--drivers/media/video/gspca/stv06xx/Kconfig9
-rw-r--r--drivers/media/video/gspca/stv06xx/Makefile9
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c522
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.h107
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c535
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h263
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c430
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h275
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_sensor.h92
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c251
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h315
-rw-r--r--drivers/media/video/gspca/sunplus.c6
-rw-r--r--drivers/media/video/gspca/t613.c4
-rw-r--r--drivers/media/video/gspca/tv8532.c142
-rw-r--r--drivers/media/video/gspca/vc032x.c819
-rw-r--r--drivers/media/video/gspca/zc3xx-reg.h8
-rw-r--r--drivers/media/video/gspca/zc3xx.c1012
-rw-r--r--drivers/media/video/ir-kbd-i2c.c6
-rw-r--r--drivers/media/video/ivtv/ivtv-cards.c16
-rw-r--r--drivers/media/video/ivtv/ivtv-controls.c16
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c214
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h52
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c44
-rw-r--r--drivers/media/video/ivtv/ivtv-gpio.c324
-rw-r--r--drivers/media/video/ivtv/ivtv-gpio.h3
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c314
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.h13
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c73
-rw-r--r--drivers/media/video/ivtv/ivtv-routing.c12
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c15
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c17
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c91
-rw-r--r--drivers/media/video/m52790.c176
-rw-r--r--drivers/media/video/msp3400-driver.c402
-rw-r--r--drivers/media/video/msp3400-driver.h7
-rw-r--r--drivers/media/video/msp3400-kthreads.c34
-rw-r--r--drivers/media/video/mt9m001.c60
-rw-r--r--drivers/media/video/mt9m111.c121
-rw-r--r--drivers/media/video/mt9t031.c736
-rw-r--r--drivers/media/video/mt9v022.c46
-rw-r--r--drivers/media/video/omap24xxcam-dma.c601
-rw-r--r--drivers/media/video/omap24xxcam.c1908
-rw-r--r--drivers/media/video/omap24xxcam.h593
-rw-r--r--drivers/media/video/ov511.c5
-rw-r--r--drivers/media/video/ov772x.c1012
-rw-r--r--drivers/media/video/pms.c9
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c13
-rw-r--r--drivers/media/video/pwc/pwc-if.c2
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c3
-rw-r--r--drivers/media/video/pwc/pwc.h3
-rw-r--r--drivers/media/video/pxa_camera.c545
-rw-r--r--drivers/media/video/pxa_camera.h95
-rw-r--r--drivers/media/video/saa5246a.c7
-rw-r--r--drivers/media/video/saa5249.c7
-rw-r--r--drivers/media/video/saa7115.c763
-rw-r--r--drivers/media/video/saa7127.c421
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c52
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c24
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c14
-rw-r--r--drivers/media/video/saa7134/saa7134-tvaudio.c2
-rw-r--r--drivers/media/video/saa7134/saa7134.h3
-rw-r--r--drivers/media/video/saa717x.c610
-rw-r--r--drivers/media/video/se401.c5
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c309
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c2
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h8
-rw-r--r--drivers/media/video/soc_camera.c389
-rw-r--r--drivers/media/video/soc_camera_platform.c17
-rw-r--r--drivers/media/video/stk-webcam.c27
-rw-r--r--drivers/media/video/stv680.c5
-rw-r--r--drivers/media/video/tda7432.c252
-rw-r--r--drivers/media/video/tda9840.c188
-rw-r--r--drivers/media/video/tda9875.c348
-rw-r--r--drivers/media/video/tea6415c.c49
-rw-r--r--drivers/media/video/tea6420.c49
-rw-r--r--drivers/media/video/tlv320aic23b.c141
-rw-r--r--drivers/media/video/tuner-core.c397
-rw-r--r--drivers/media/video/tvaudio.c707
-rw-r--r--drivers/media/video/tvp514x.c1569
-rw-r--r--drivers/media/video/tvp514x_regs.h297
-rw-r--r--drivers/media/video/tvp5150.c827
-rw-r--r--drivers/media/video/tw9910.c951
-rw-r--r--drivers/media/video/upd64031a.c193
-rw-r--r--drivers/media/video/upd64083.c166
-rw-r--r--drivers/media/video/usbvideo/ibmcam.c4
-rw-r--r--drivers/media/video/usbvideo/konicawc.c4
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c9
-rw-r--r--drivers/media/video/usbvideo/ultracam.c4
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c7
-rw-r--r--drivers/media/video/usbvideo/vicam.c3
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c11
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c143
-rw-r--r--drivers/media/video/uvc/uvc_driver.c332
-rw-r--r--drivers/media/video/uvc/uvc_queue.c23
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c115
-rw-r--r--drivers/media/video/uvc/uvc_video.c214
-rw-r--r--drivers/media/video/uvc/uvcvideo.h34
-rw-r--r--drivers/media/video/v4l2-common.c203
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c (renamed from drivers/media/video/compat_ioctl32.c)835
-rw-r--r--drivers/media/video/v4l2-dev.c365
-rw-r--r--drivers/media/video/v4l2-device.c86
-rw-r--r--drivers/media/video/v4l2-ioctl.c98
-rw-r--r--drivers/media/video/v4l2-subdev.c110
-rw-r--r--drivers/media/video/vino.c5
-rw-r--r--drivers/media/video/vp27smpx.c126
-rw-r--r--drivers/media/video/w9966.c5
-rw-r--r--drivers/media/video/wm8739.c188
-rw-r--r--drivers/media/video/wm8775.c221
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c2
-rw-r--r--drivers/media/video/zoran/zoran_card.c6
-rw-r--r--drivers/media/video/zoran/zoran_driver.c8
-rw-r--r--drivers/message/fusion/mptbase.c15
-rw-r--r--drivers/mfd/asic3.c6
-rw-r--r--drivers/mfd/mcp-core.c2
-rw-r--r--drivers/mfd/mcp-sa11x0.c2
-rw-r--r--drivers/mfd/ucb1x00-assabet.c2
-rw-r--r--drivers/mfd/ucb1x00-core.c2
-rw-r--r--drivers/mfd/ucb1x00-ts.c2
-rw-r--r--drivers/mmc/host/imxmmc.c459
-rw-r--r--drivers/mmc/host/imxmmc.h37
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/omap.c7
-rw-r--r--drivers/mmc/host/pxamci.c7
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mtd/maps/dc21285.c7
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/ixp4xx.c2
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c4
-rw-r--r--drivers/mtd/nand/s3c2410.c8
-rw-r--r--drivers/mtd/onenand/omap2.c10
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/arm/ixp4xx_eth.c2
-rw-r--r--drivers/net/arm/ks8695net.c1
-rw-r--r--drivers/net/cs89x0.c6
-rw-r--r--drivers/net/eexpress.h56
-rw-r--r--drivers/net/irda/pxaficp_ir.c46
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/mlx4/en_main.c11
-rw-r--r--drivers/net/mlx4/en_netdev.c8
-rw-r--r--drivers/net/mlx4/en_params.c87
-rw-r--r--drivers/net/mlx4/mlx4_en.h6
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/smc911x.h3
-rw-r--r--drivers/net/smc91x.h3
-rw-r--r--drivers/net/smsc911x.c4
-rw-r--r--drivers/net/spider_net.c1
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/wan/ixp4xx_hss.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c3
-rw-r--r--drivers/oprofile/buffer_sync.c117
-rw-r--r--drivers/oprofile/cpu_buffer.c197
-rw-r--r--drivers/oprofile/cpu_buffer.h69
-rw-r--r--drivers/oprofile/oprofile_files.c15
-rw-r--r--drivers/pci/intr_remapping.c77
-rw-r--r--drivers/pci/msi.c55
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/pxa2xx_e740.c176
-rw-r--r--drivers/rtc/rtc-at91sam9.c1
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c12
-rw-r--r--drivers/s390/kvm/kvm_virtio.c34
-rw-r--r--drivers/s390/scsi/zfcp_aux.c110
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c6
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c17
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h32
-rw-r--r--drivers/s390/scsi/zfcp_erp.c10
-rw-r--r--drivers/s390/scsi/zfcp_fc.c88
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c77
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c4
-rw-r--r--drivers/scsi/Kconfig22
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aacraid/commctrl.c14
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/aacraid/rkt.c2
-rw-r--r--drivers/scsi/aacraid/rx.c2
-rw-r--r--drivers/scsi/aacraid/sa.c2
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c8
-rw-r--r--drivers/scsi/atp870u.c4
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/cxgb3i/Kbuild4
-rw-r--r--drivers/scsi/cxgb3i/Kconfig7
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h139
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c770
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h306
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c107
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c951
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c1810
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h231
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c402
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.h59
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c18
-rw-r--r--drivers/scsi/eata.c15
-rw-r--r--drivers/scsi/eata_pio.c4
-rw-r--r--drivers/scsi/esp_scsi.c6
-rw-r--r--drivers/scsi/fcoe/Makefile8
-rw-r--r--drivers/scsi/fcoe/fc_transport_fcoe.c446
-rw-r--r--drivers/scsi/fcoe/fcoe_sw.c494
-rw-r--r--drivers/scsi/fcoe/libfcoe.c1510
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c293
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h32
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/ide-scsi.c32
-rw-r--r--drivers/scsi/in2000.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/initio.h2
-rw-r--r--drivers/scsi/ipr.c6
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/iscsi_tcp.c1621
-rw-r--r--drivers/scsi/iscsi_tcp.h88
-rw-r--r--drivers/scsi/libfc/Makefile12
-rw-r--r--drivers/scsi/libfc/fc_disc.c845
-rw-r--r--drivers/scsi/libfc/fc_elsct.c71
-rw-r--r--drivers/scsi/libfc/fc_exch.c1970
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2131
-rw-r--r--drivers/scsi/libfc/fc_frame.c89
-rw-r--r--drivers/scsi/libfc/fc_lport.c1604
-rw-r--r--drivers/scsi/libfc/fc_rport.c1291
-rw-r--r--drivers/scsi/libiscsi.c236
-rw-r--r--drivers/scsi/libiscsi_tcp.c1163
-rw-r--r--drivers/scsi/lpfc/lpfc.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c169
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c164
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h249
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c685
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1235
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c159
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c28
-rw-r--r--drivers/scsi/mac_esp.c100
-rw-r--r--drivers/scsi/nsp32.c3
-rw-r--r--drivers/scsi/qla1280.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c328
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h584
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h56
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c481
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1251
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c358
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c840
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c886
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c516
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1471
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c554
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_ioctl.c6
-rw-r--r--drivers/scsi/scsi_lib.c149
-rw-r--r--drivers/scsi/scsi_scan.c17
-rw-r--r--drivers/scsi/scsi_transport_fc.c13
-rw-r--r--drivers/scsi/scsi_transport_spi.c5
-rw-r--r--drivers/scsi/sd.c11
-rw-r--r--drivers/scsi/ses.c9
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/scsi/st.c245
-rw-r--r--drivers/scsi/stex.c3
-rw-r--r--drivers/scsi/sym53c416.c2
-rw-r--r--drivers/scsi/tmscsim.c3
-rw-r--r--drivers/scsi/u14-34f.c3
-rw-r--r--drivers/scsi/wd7000.c4
-rw-r--r--drivers/serial/Kconfig25
-rw-r--r--drivers/serial/Makefile2
-rw-r--r--drivers/serial/amba-pl010.c2
-rw-r--r--drivers/serial/amba-pl011.c2
-rw-r--r--drivers/serial/imx.c8
-rw-r--r--drivers/serial/pxa.c3
-rw-r--r--drivers/serial/s3c24a0.c118
-rw-r--r--drivers/serial/s3c6400.c151
-rw-r--r--drivers/serial/samsung.c184
-rw-r--r--drivers/serial/samsung.h9
-rw-r--r--drivers/serial/serial_lh7a40x.c3
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/spi_s3c24xx.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c4
-rw-r--r--drivers/usb/host/ehci-orion.c17
-rw-r--r--drivers/usb/host/ohci-omap.c8
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/video/Kconfig7
-rw-r--r--drivers/video/amba-clcd.c4
-rw-r--r--drivers/video/console/vgacon.c17
-rw-r--r--drivers/video/cyber2000fb.c2
-rw-r--r--drivers/video/imxfb.c468
-rw-r--r--drivers/video/imxfb.h73
-rw-r--r--drivers/video/pxafb.c981
-rw-r--r--drivers/video/pxafb.h82
-rw-r--r--drivers/video/sa1100fb.c2
-rw-r--r--drivers/virtio/virtio.c2
-rw-r--r--drivers/virtio/virtio_balloon.c13
-rw-r--r--drivers/virtio/virtio_pci.c43
-rw-r--r--drivers/virtio/virtio_ring.c3
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/sa1100_wdt.c12
-rw-r--r--drivers/xen/events.c17
657 files changed, 82905 insertions, 19874 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index a67b8e7c712d..656448c7fef9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1119,14 +1119,14 @@ static void ahci_start_port(struct ata_port *ap)
1119 1119
1120 /* turn on LEDs */ 1120 /* turn on LEDs */
1121 if (ap->flags & ATA_FLAG_EM) { 1121 if (ap->flags & ATA_FLAG_EM) {
1122 ata_port_for_each_link(link, ap) { 1122 ata_for_each_link(link, ap, EDGE) {
1123 emp = &pp->em_priv[link->pmp]; 1123 emp = &pp->em_priv[link->pmp];
1124 ahci_transmit_led_message(ap, emp->led_state, 4); 1124 ahci_transmit_led_message(ap, emp->led_state, 4);
1125 } 1125 }
1126 } 1126 }
1127 1127
1128 if (ap->flags & ATA_FLAG_SW_ACTIVITY) 1128 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1129 ata_port_for_each_link(link, ap) 1129 ata_for_each_link(link, ap, EDGE)
1130 ahci_init_sw_activity(link); 1130 ahci_init_sw_activity(link);
1131 1131
1132} 1132}
@@ -1361,7 +1361,7 @@ static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1361 struct ahci_em_priv *emp; 1361 struct ahci_em_priv *emp;
1362 int rc = 0; 1362 int rc = 0;
1363 1363
1364 ata_port_for_each_link(link, ap) { 1364 ata_for_each_link(link, ap, EDGE) {
1365 emp = &pp->em_priv[link->pmp]; 1365 emp = &pp->em_priv[link->pmp];
1366 rc += sprintf(buf, "%lx\n", emp->led_state); 1366 rc += sprintf(buf, "%lx\n", emp->led_state);
1367 } 1367 }
@@ -1941,7 +1941,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1941 u32 serror; 1941 u32 serror;
1942 1942
1943 /* determine active link */ 1943 /* determine active link */
1944 ata_port_for_each_link(link, ap) 1944 ata_for_each_link(link, ap, EDGE)
1945 if (ata_link_active(link)) 1945 if (ata_link_active(link))
1946 break; 1946 break;
1947 if (!link) 1947 if (!link)
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 5c33767e66de..dc48a6398abe 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -57,10 +57,7 @@ static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
57 if (pdev->vendor == PCI_VENDOR_ID_CENATEK) 57 if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
58 dma_enabled = 0xFF; 58 dma_enabled = 0xFF;
59 59
60 ata_link_for_each_dev(dev, link) { 60 ata_for_each_dev(dev, link, ENABLED) {
61 if (!ata_dev_enabled(dev))
62 continue;
63
64 /* We don't really care */ 61 /* We don't really care */
65 dev->pio_mode = XFER_PIO_0; 62 dev->pio_mode = XFER_PIO_0;
66 dev->dma_mode = XFER_MW_DMA_0; 63 dev->dma_mode = XFER_MW_DMA_0;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index c11936e13dd3..5fdf1678d0cc 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1072,20 +1072,13 @@ static int piix_broken_suspend(void)
1072 * matching is necessary because dmi_system_id.matches is 1072 * matching is necessary because dmi_system_id.matches is
1073 * limited to four entries. 1073 * limited to four entries.
1074 */ 1074 */
1075 if (dmi_get_system_info(DMI_SYS_VENDOR) && 1075 if (dmi_match(DMI_SYS_VENDOR, "TOSHIBA") &&
1076 dmi_get_system_info(DMI_PRODUCT_NAME) && 1076 dmi_match(DMI_PRODUCT_NAME, "000000") &&
1077 dmi_get_system_info(DMI_PRODUCT_VERSION) && 1077 dmi_match(DMI_PRODUCT_VERSION, "000000") &&
1078 dmi_get_system_info(DMI_PRODUCT_SERIAL) && 1078 dmi_match(DMI_PRODUCT_SERIAL, "000000") &&
1079 dmi_get_system_info(DMI_BOARD_VENDOR) && 1079 dmi_match(DMI_BOARD_VENDOR, "TOSHIBA") &&
1080 dmi_get_system_info(DMI_BOARD_NAME) && 1080 dmi_match(DMI_BOARD_NAME, "Portable PC") &&
1081 dmi_get_system_info(DMI_BOARD_VERSION) && 1081 dmi_match(DMI_BOARD_VERSION, "Version A0"))
1082 !strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
1083 !strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") &&
1084 !strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") &&
1085 !strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") &&
1086 !strcmp(dmi_get_system_info(DMI_BOARD_VENDOR), "TOSHIBA") &&
1087 !strcmp(dmi_get_system_info(DMI_BOARD_NAME), "Portable PC") &&
1088 !strcmp(dmi_get_system_info(DMI_BOARD_VERSION), "Version A0"))
1089 return 1; 1082 return 1;
1090 1083
1091 return 0; 1084 return 0;
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index c012307d0ba6..ef02e488d468 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -89,7 +89,7 @@ void ata_acpi_associate_sata_port(struct ata_port *ap)
89 89
90 ap->link.device->acpi_handle = NULL; 90 ap->link.device->acpi_handle = NULL;
91 91
92 ata_port_for_each_link(link, ap) { 92 ata_for_each_link(link, ap, EDGE) {
93 acpi_integer adr = SATA_ADR(ap->port_no, link->pmp); 93 acpi_integer adr = SATA_ADR(ap->port_no, link->pmp);
94 94
95 link->device->acpi_handle = 95 link->device->acpi_handle =
@@ -129,8 +129,8 @@ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
129 struct ata_link *tlink; 129 struct ata_link *tlink;
130 struct ata_device *tdev; 130 struct ata_device *tdev;
131 131
132 ata_port_for_each_link(tlink, ap) 132 ata_for_each_link(tlink, ap, EDGE)
133 ata_link_for_each_dev(tdev, tlink) 133 ata_for_each_dev(tdev, tlink, ALL)
134 tdev->flags |= ATA_DFLAG_DETACH; 134 tdev->flags |= ATA_DFLAG_DETACH;
135 } 135 }
136 136
@@ -588,12 +588,9 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
588{ 588{
589 struct ata_device *dev; 589 struct ata_device *dev;
590 590
591 ata_link_for_each_dev(dev, &ap->link) { 591 ata_for_each_dev(dev, &ap->link, ENABLED) {
592 unsigned long xfer_mask, udma_mask; 592 unsigned long xfer_mask, udma_mask;
593 593
594 if (!ata_dev_enabled(dev))
595 continue;
596
597 xfer_mask = ata_acpi_gtm_xfermask(dev, gtm); 594 xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
598 ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask); 595 ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
599 596
@@ -893,7 +890,7 @@ void ata_acpi_on_resume(struct ata_port *ap)
893 * use values set by _STM. Cache _GTF result and 890 * use values set by _STM. Cache _GTF result and
894 * schedule _GTF. 891 * schedule _GTF.
895 */ 892 */
896 ata_link_for_each_dev(dev, &ap->link) { 893 ata_for_each_dev(dev, &ap->link, ALL) {
897 ata_acpi_clear_gtf(dev); 894 ata_acpi_clear_gtf(dev);
898 if (ata_dev_enabled(dev) && 895 if (ata_dev_enabled(dev) &&
899 ata_dev_get_GTF(dev, NULL) >= 0) 896 ata_dev_get_GTF(dev, NULL) >= 0)
@@ -904,7 +901,7 @@ void ata_acpi_on_resume(struct ata_port *ap)
904 * there's no reason to evaluate IDE _GTF early 901 * there's no reason to evaluate IDE _GTF early
905 * without _STM. Clear cache and schedule _GTF. 902 * without _STM. Clear cache and schedule _GTF.
906 */ 903 */
907 ata_link_for_each_dev(dev, &ap->link) { 904 ata_for_each_dev(dev, &ap->link, ALL) {
908 ata_acpi_clear_gtf(dev); 905 ata_acpi_clear_gtf(dev);
909 if (ata_dev_enabled(dev)) 906 if (ata_dev_enabled(dev))
910 dev->flags |= ATA_DFLAG_ACPI_PENDING; 907 dev->flags |= ATA_DFLAG_ACPI_PENDING;
@@ -932,8 +929,8 @@ void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
932 if (state.event == PM_EVENT_ON) 929 if (state.event == PM_EVENT_ON)
933 acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D0); 930 acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D0);
934 931
935 ata_link_for_each_dev(dev, &ap->link) { 932 ata_for_each_dev(dev, &ap->link, ENABLED) {
936 if (dev->acpi_handle && ata_dev_enabled(dev)) 933 if (dev->acpi_handle)
937 acpi_bus_set_power(dev->acpi_handle, 934 acpi_bus_set_power(dev->acpi_handle,
938 state.event == PM_EVENT_ON ? 935 state.event == PM_EVENT_ON ?
939 ACPI_STATE_D0 : ACPI_STATE_D3); 936 ACPI_STATE_D0 : ACPI_STATE_D3);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index bc6695e3c848..fecca4223f8e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -163,43 +163,119 @@ MODULE_LICENSE("GPL");
163MODULE_VERSION(DRV_VERSION); 163MODULE_VERSION(DRV_VERSION);
164 164
165 165
166/* 166/**
167 * Iterator helpers. Don't use directly. 167 * ata_link_next - link iteration helper
168 * @link: the previous link, NULL to start
169 * @ap: ATA port containing links to iterate
170 * @mode: iteration mode, one of ATA_LITER_*
171 *
172 * LOCKING:
173 * Host lock or EH context.
168 * 174 *
169 * LOCKING: 175 * RETURNS:
170 * Host lock or EH context. 176 * Pointer to the next link.
171 */ 177 */
172struct ata_link *__ata_port_next_link(struct ata_port *ap, 178struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
173 struct ata_link *link, bool dev_only) 179 enum ata_link_iter_mode mode)
174{ 180{
181 BUG_ON(mode != ATA_LITER_EDGE &&
182 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
183
175 /* NULL link indicates start of iteration */ 184 /* NULL link indicates start of iteration */
176 if (!link) { 185 if (!link)
177 if (dev_only && sata_pmp_attached(ap)) 186 switch (mode) {
178 return ap->pmp_link; 187 case ATA_LITER_EDGE:
179 return &ap->link; 188 case ATA_LITER_PMP_FIRST:
180 } 189 if (sata_pmp_attached(ap))
190 return ap->pmp_link;
191 /* fall through */
192 case ATA_LITER_HOST_FIRST:
193 return &ap->link;
194 }
181 195
182 /* we just iterated over the host master link, what's next? */ 196 /* we just iterated over the host link, what's next? */
183 if (link == &ap->link) { 197 if (link == &ap->link)
184 if (!sata_pmp_attached(ap)) { 198 switch (mode) {
185 if (unlikely(ap->slave_link) && !dev_only) 199 case ATA_LITER_HOST_FIRST:
200 if (sata_pmp_attached(ap))
201 return ap->pmp_link;
202 /* fall through */
203 case ATA_LITER_PMP_FIRST:
204 if (unlikely(ap->slave_link))
186 return ap->slave_link; 205 return ap->slave_link;
206 /* fall through */
207 case ATA_LITER_EDGE:
187 return NULL; 208 return NULL;
188 } 209 }
189 return ap->pmp_link;
190 }
191 210
192 /* slave_link excludes PMP */ 211 /* slave_link excludes PMP */
193 if (unlikely(link == ap->slave_link)) 212 if (unlikely(link == ap->slave_link))
194 return NULL; 213 return NULL;
195 214
196 /* iterate to the next PMP link */ 215 /* we were over a PMP link */
197 if (++link < ap->pmp_link + ap->nr_pmp_links) 216 if (++link < ap->pmp_link + ap->nr_pmp_links)
198 return link; 217 return link;
218
219 if (mode == ATA_LITER_PMP_FIRST)
220 return &ap->link;
221
199 return NULL; 222 return NULL;
200} 223}
201 224
202/** 225/**
226 * ata_dev_next - device iteration helper
227 * @dev: the previous device, NULL to start
228 * @link: ATA link containing devices to iterate
229 * @mode: iteration mode, one of ATA_DITER_*
230 *
231 * LOCKING:
232 * Host lock or EH context.
233 *
234 * RETURNS:
235 * Pointer to the next device.
236 */
237struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
238 enum ata_dev_iter_mode mode)
239{
240 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
241 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
242
243 /* NULL dev indicates start of iteration */
244 if (!dev)
245 switch (mode) {
246 case ATA_DITER_ENABLED:
247 case ATA_DITER_ALL:
248 dev = link->device;
249 goto check;
250 case ATA_DITER_ENABLED_REVERSE:
251 case ATA_DITER_ALL_REVERSE:
252 dev = link->device + ata_link_max_devices(link) - 1;
253 goto check;
254 }
255
256 next:
257 /* move to the next one */
258 switch (mode) {
259 case ATA_DITER_ENABLED:
260 case ATA_DITER_ALL:
261 if (++dev < link->device + ata_link_max_devices(link))
262 goto check;
263 return NULL;
264 case ATA_DITER_ENABLED_REVERSE:
265 case ATA_DITER_ALL_REVERSE:
266 if (--dev >= link->device)
267 goto check;
268 return NULL;
269 }
270
271 check:
272 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
273 !ata_dev_enabled(dev))
274 goto next;
275 return dev;
276}
277
278/**
203 * ata_dev_phys_link - find physical link for a device 279 * ata_dev_phys_link - find physical link for a device
204 * @dev: ATA device to look up physical link for 280 * @dev: ATA device to look up physical link for
205 * 281 *
@@ -1107,8 +1183,8 @@ static void ata_lpm_enable(struct ata_host *host)
1107 1183
1108 for (i = 0; i < host->n_ports; i++) { 1184 for (i = 0; i < host->n_ports; i++) {
1109 ap = host->ports[i]; 1185 ap = host->ports[i];
1110 ata_port_for_each_link(link, ap) { 1186 ata_for_each_link(link, ap, EDGE) {
1111 ata_link_for_each_dev(dev, link) 1187 ata_for_each_dev(dev, link, ALL)
1112 ata_dev_disable_pm(dev); 1188 ata_dev_disable_pm(dev);
1113 } 1189 }
1114 } 1190 }
@@ -2594,11 +2670,11 @@ int ata_bus_probe(struct ata_port *ap)
2594 2670
2595 ata_port_probe(ap); 2671 ata_port_probe(ap);
2596 2672
2597 ata_link_for_each_dev(dev, &ap->link) 2673 ata_for_each_dev(dev, &ap->link, ALL)
2598 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2674 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2599 2675
2600 retry: 2676 retry:
2601 ata_link_for_each_dev(dev, &ap->link) { 2677 ata_for_each_dev(dev, &ap->link, ALL) {
2602 /* If we issue an SRST then an ATA drive (not ATAPI) 2678 /* If we issue an SRST then an ATA drive (not ATAPI)
2603 * may change configuration and be in PIO0 timing. If 2679 * may change configuration and be in PIO0 timing. If
2604 * we do a hard reset (or are coming from power on) 2680 * we do a hard reset (or are coming from power on)
@@ -2620,7 +2696,7 @@ int ata_bus_probe(struct ata_port *ap)
2620 /* reset and determine device classes */ 2696 /* reset and determine device classes */
2621 ap->ops->phy_reset(ap); 2697 ap->ops->phy_reset(ap);
2622 2698
2623 ata_link_for_each_dev(dev, &ap->link) { 2699 ata_for_each_dev(dev, &ap->link, ALL) {
2624 if (!(ap->flags & ATA_FLAG_DISABLED) && 2700 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2625 dev->class != ATA_DEV_UNKNOWN) 2701 dev->class != ATA_DEV_UNKNOWN)
2626 classes[dev->devno] = dev->class; 2702 classes[dev->devno] = dev->class;
@@ -2636,7 +2712,7 @@ int ata_bus_probe(struct ata_port *ap)
2636 specific sequence bass-ackwards so that PDIAG- is released by 2712 specific sequence bass-ackwards so that PDIAG- is released by
2637 the slave device */ 2713 the slave device */
2638 2714
2639 ata_link_for_each_dev_reverse(dev, &ap->link) { 2715 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2640 if (tries[dev->devno]) 2716 if (tries[dev->devno])
2641 dev->class = classes[dev->devno]; 2717 dev->class = classes[dev->devno];
2642 2718
@@ -2653,24 +2729,19 @@ int ata_bus_probe(struct ata_port *ap)
2653 if (ap->ops->cable_detect) 2729 if (ap->ops->cable_detect)
2654 ap->cbl = ap->ops->cable_detect(ap); 2730 ap->cbl = ap->ops->cable_detect(ap);
2655 2731
2656 /* We may have SATA bridge glue hiding here irrespective of the 2732 /* We may have SATA bridge glue hiding here irrespective of
2657 reported cable types and sensed types */ 2733 * the reported cable types and sensed types. When SATA
2658 ata_link_for_each_dev(dev, &ap->link) { 2734 * drives indicate we have a bridge, we don't know which end
2659 if (!ata_dev_enabled(dev)) 2735 * of the link the bridge is which is a problem.
2660 continue; 2736 */
2661 /* SATA drives indicate we have a bridge. We don't know which 2737 ata_for_each_dev(dev, &ap->link, ENABLED)
2662 end of the link the bridge is which is a problem */
2663 if (ata_id_is_sata(dev->id)) 2738 if (ata_id_is_sata(dev->id))
2664 ap->cbl = ATA_CBL_SATA; 2739 ap->cbl = ATA_CBL_SATA;
2665 }
2666 2740
2667 /* After the identify sequence we can now set up the devices. We do 2741 /* After the identify sequence we can now set up the devices. We do
2668 this in the normal order so that the user doesn't get confused */ 2742 this in the normal order so that the user doesn't get confused */
2669 2743
2670 ata_link_for_each_dev(dev, &ap->link) { 2744 ata_for_each_dev(dev, &ap->link, ENABLED) {
2671 if (!ata_dev_enabled(dev))
2672 continue;
2673
2674 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2745 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2675 rc = ata_dev_configure(dev); 2746 rc = ata_dev_configure(dev);
2676 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2747 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
@@ -2683,9 +2754,8 @@ int ata_bus_probe(struct ata_port *ap)
2683 if (rc) 2754 if (rc)
2684 goto fail; 2755 goto fail;
2685 2756
2686 ata_link_for_each_dev(dev, &ap->link) 2757 ata_for_each_dev(dev, &ap->link, ENABLED)
2687 if (ata_dev_enabled(dev)) 2758 return 0;
2688 return 0;
2689 2759
2690 /* no device present, disable port */ 2760 /* no device present, disable port */
2691 ata_port_disable(ap); 2761 ata_port_disable(ap);
@@ -3331,13 +3401,10 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3331 int rc = 0, used_dma = 0, found = 0; 3401 int rc = 0, used_dma = 0, found = 0;
3332 3402
3333 /* step 1: calculate xfer_mask */ 3403 /* step 1: calculate xfer_mask */
3334 ata_link_for_each_dev(dev, link) { 3404 ata_for_each_dev(dev, link, ENABLED) {
3335 unsigned long pio_mask, dma_mask; 3405 unsigned long pio_mask, dma_mask;
3336 unsigned int mode_mask; 3406 unsigned int mode_mask;
3337 3407
3338 if (!ata_dev_enabled(dev))
3339 continue;
3340
3341 mode_mask = ATA_DMA_MASK_ATA; 3408 mode_mask = ATA_DMA_MASK_ATA;
3342 if (dev->class == ATA_DEV_ATAPI) 3409 if (dev->class == ATA_DEV_ATAPI)
3343 mode_mask = ATA_DMA_MASK_ATAPI; 3410 mode_mask = ATA_DMA_MASK_ATAPI;
@@ -3366,10 +3433,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3366 goto out; 3433 goto out;
3367 3434
3368 /* step 2: always set host PIO timings */ 3435 /* step 2: always set host PIO timings */
3369 ata_link_for_each_dev(dev, link) { 3436 ata_for_each_dev(dev, link, ENABLED) {
3370 if (!ata_dev_enabled(dev))
3371 continue;
3372
3373 if (dev->pio_mode == 0xff) { 3437 if (dev->pio_mode == 0xff) {
3374 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); 3438 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3375 rc = -EINVAL; 3439 rc = -EINVAL;
@@ -3383,8 +3447,8 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3383 } 3447 }
3384 3448
3385 /* step 3: set host DMA timings */ 3449 /* step 3: set host DMA timings */
3386 ata_link_for_each_dev(dev, link) { 3450 ata_for_each_dev(dev, link, ENABLED) {
3387 if (!ata_dev_enabled(dev) || !ata_dma_enabled(dev)) 3451 if (!ata_dma_enabled(dev))
3388 continue; 3452 continue;
3389 3453
3390 dev->xfer_mode = dev->dma_mode; 3454 dev->xfer_mode = dev->dma_mode;
@@ -3394,11 +3458,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3394 } 3458 }
3395 3459
3396 /* step 4: update devices' xfer mode */ 3460 /* step 4: update devices' xfer mode */
3397 ata_link_for_each_dev(dev, link) { 3461 ata_for_each_dev(dev, link, ENABLED) {
3398 /* don't update suspended devices' xfer mode */
3399 if (!ata_dev_enabled(dev))
3400 continue;
3401
3402 rc = ata_dev_set_mode(dev); 3462 rc = ata_dev_set_mode(dev);
3403 if (rc) 3463 if (rc)
3404 goto out; 3464 goto out;
@@ -4048,6 +4108,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4048 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4108 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4049 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4109 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4050 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4110 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4111 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4051 4112
4052 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4113 /* Seagate NCQ + FLUSH CACHE firmware bug */
4053 { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ | 4114 { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ |
@@ -4263,9 +4324,9 @@ static int cable_is_40wire(struct ata_port *ap)
4263 * - if you have a non detect capable drive you don't want it 4324 * - if you have a non detect capable drive you don't want it
4264 * to colour the choice 4325 * to colour the choice
4265 */ 4326 */
4266 ata_port_for_each_link(link, ap) { 4327 ata_for_each_link(link, ap, EDGE) {
4267 ata_link_for_each_dev(dev, link) { 4328 ata_for_each_dev(dev, link, ENABLED) {
4268 if (ata_dev_enabled(dev) && !ata_is_40wire(dev)) 4329 if (!ata_is_40wire(dev))
4269 return 0; 4330 return 0;
4270 } 4331 }
4271 } 4332 }
@@ -4672,7 +4733,6 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4672/** 4733/**
4673 * ata_qc_new_init - Request an available ATA command, and initialize it 4734 * ata_qc_new_init - Request an available ATA command, and initialize it
4674 * @dev: Device from whom we request an available command structure 4735 * @dev: Device from whom we request an available command structure
4675 * @tag: command tag
4676 * 4736 *
4677 * LOCKING: 4737 * LOCKING:
4678 * None. 4738 * None.
@@ -5218,7 +5278,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5218 } 5278 }
5219 5279
5220 ap->pflags |= ATA_PFLAG_PM_PENDING; 5280 ap->pflags |= ATA_PFLAG_PM_PENDING;
5221 __ata_port_for_each_link(link, ap) { 5281 ata_for_each_link(link, ap, HOST_FIRST) {
5222 link->eh_info.action |= action; 5282 link->eh_info.action |= action;
5223 link->eh_info.flags |= ehi_flags; 5283 link->eh_info.flags |= ehi_flags;
5224 } 5284 }
@@ -6047,8 +6107,6 @@ int ata_host_activate(struct ata_host *host, int irq,
6047static void ata_port_detach(struct ata_port *ap) 6107static void ata_port_detach(struct ata_port *ap)
6048{ 6108{
6049 unsigned long flags; 6109 unsigned long flags;
6050 struct ata_link *link;
6051 struct ata_device *dev;
6052 6110
6053 if (!ap->ops->error_handler) 6111 if (!ap->ops->error_handler)
6054 goto skip_eh; 6112 goto skip_eh;
@@ -6056,28 +6114,15 @@ static void ata_port_detach(struct ata_port *ap)
6056 /* tell EH we're leaving & flush EH */ 6114 /* tell EH we're leaving & flush EH */
6057 spin_lock_irqsave(ap->lock, flags); 6115 spin_lock_irqsave(ap->lock, flags);
6058 ap->pflags |= ATA_PFLAG_UNLOADING; 6116 ap->pflags |= ATA_PFLAG_UNLOADING;
6117 ata_port_schedule_eh(ap);
6059 spin_unlock_irqrestore(ap->lock, flags); 6118 spin_unlock_irqrestore(ap->lock, flags);
6060 6119
6120 /* wait till EH commits suicide */
6061 ata_port_wait_eh(ap); 6121 ata_port_wait_eh(ap);
6062 6122
6063 /* EH is now guaranteed to see UNLOADING - EH context belongs 6123 /* it better be dead now */
6064 * to us. Restore SControl and disable all existing devices. 6124 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6065 */
6066 __ata_port_for_each_link(link, ap) {
6067 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
6068 ata_link_for_each_dev(dev, link)
6069 ata_dev_disable(dev);
6070 }
6071 6125
6072 /* Final freeze & EH. All in-flight commands are aborted. EH
6073 * will be skipped and retrials will be terminated with bad
6074 * target.
6075 */
6076 spin_lock_irqsave(ap->lock, flags);
6077 ata_port_freeze(ap); /* won't be thawed */
6078 spin_unlock_irqrestore(ap->lock, flags);
6079
6080 ata_port_wait_eh(ap);
6081 cancel_rearming_delayed_work(&ap->hotplug_task); 6126 cancel_rearming_delayed_work(&ap->hotplug_task);
6082 6127
6083 skip_eh: 6128 skip_eh:
@@ -6528,7 +6573,8 @@ EXPORT_SYMBOL_GPL(ata_base_port_ops);
6528EXPORT_SYMBOL_GPL(sata_port_ops); 6573EXPORT_SYMBOL_GPL(sata_port_ops);
6529EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6574EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6530EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6575EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6531EXPORT_SYMBOL_GPL(__ata_port_next_link); 6576EXPORT_SYMBOL_GPL(ata_link_next);
6577EXPORT_SYMBOL_GPL(ata_dev_next);
6532EXPORT_SYMBOL_GPL(ata_std_bios_param); 6578EXPORT_SYMBOL_GPL(ata_std_bios_param);
6533EXPORT_SYMBOL_GPL(ata_host_init); 6579EXPORT_SYMBOL_GPL(ata_host_init);
6534EXPORT_SYMBOL_GPL(ata_host_alloc); 6580EXPORT_SYMBOL_GPL(ata_host_alloc);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 32da9a93ce44..8147a8386370 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -422,7 +422,7 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
422 422
423 if (!dev) { 423 if (!dev) {
424 ehi->action &= ~action; 424 ehi->action &= ~action;
425 ata_link_for_each_dev(tdev, link) 425 ata_for_each_dev(tdev, link, ALL)
426 ehi->dev_action[tdev->devno] &= ~action; 426 ehi->dev_action[tdev->devno] &= ~action;
427 } else { 427 } else {
428 /* doesn't make sense for port-wide EH actions */ 428 /* doesn't make sense for port-wide EH actions */
@@ -430,7 +430,7 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
430 430
431 /* break ehi->action into ehi->dev_action */ 431 /* break ehi->action into ehi->dev_action */
432 if (ehi->action & action) { 432 if (ehi->action & action) {
433 ata_link_for_each_dev(tdev, link) 433 ata_for_each_dev(tdev, link, ALL)
434 ehi->dev_action[tdev->devno] |= 434 ehi->dev_action[tdev->devno] |=
435 ehi->action & action; 435 ehi->action & action;
436 ehi->action &= ~action; 436 ehi->action &= ~action;
@@ -491,6 +491,31 @@ enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
491 return ret; 491 return ret;
492} 492}
493 493
494static void ata_eh_unload(struct ata_port *ap)
495{
496 struct ata_link *link;
497 struct ata_device *dev;
498 unsigned long flags;
499
500 /* Restore SControl IPM and SPD for the next driver and
501 * disable attached devices.
502 */
503 ata_for_each_link(link, ap, PMP_FIRST) {
504 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
505 ata_for_each_dev(dev, link, ALL)
506 ata_dev_disable(dev);
507 }
508
509 /* freeze and set UNLOADED */
510 spin_lock_irqsave(ap->lock, flags);
511
512 ata_port_freeze(ap); /* won't be thawed */
513 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
514 ap->pflags |= ATA_PFLAG_UNLOADED;
515
516 spin_unlock_irqrestore(ap->lock, flags);
517}
518
494/** 519/**
495 * ata_scsi_error - SCSI layer error handler callback 520 * ata_scsi_error - SCSI layer error handler callback
496 * @host: SCSI host on which error occurred 521 * @host: SCSI host on which error occurred
@@ -592,7 +617,7 @@ void ata_scsi_error(struct Scsi_Host *host)
592 /* fetch & clear EH info */ 617 /* fetch & clear EH info */
593 spin_lock_irqsave(ap->lock, flags); 618 spin_lock_irqsave(ap->lock, flags);
594 619
595 __ata_port_for_each_link(link, ap) { 620 ata_for_each_link(link, ap, HOST_FIRST) {
596 struct ata_eh_context *ehc = &link->eh_context; 621 struct ata_eh_context *ehc = &link->eh_context;
597 struct ata_device *dev; 622 struct ata_device *dev;
598 623
@@ -600,12 +625,9 @@ void ata_scsi_error(struct Scsi_Host *host)
600 link->eh_context.i = link->eh_info; 625 link->eh_context.i = link->eh_info;
601 memset(&link->eh_info, 0, sizeof(link->eh_info)); 626 memset(&link->eh_info, 0, sizeof(link->eh_info));
602 627
603 ata_link_for_each_dev(dev, link) { 628 ata_for_each_dev(dev, link, ENABLED) {
604 int devno = dev->devno; 629 int devno = dev->devno;
605 630
606 if (!ata_dev_enabled(dev))
607 continue;
608
609 ehc->saved_xfer_mode[devno] = dev->xfer_mode; 631 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
610 if (ata_ncq_enabled(dev)) 632 if (ata_ncq_enabled(dev))
611 ehc->saved_ncq_enabled |= 1 << devno; 633 ehc->saved_ncq_enabled |= 1 << devno;
@@ -621,8 +643,13 @@ void ata_scsi_error(struct Scsi_Host *host)
621 /* invoke EH, skip if unloading or suspended */ 643 /* invoke EH, skip if unloading or suspended */
622 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 644 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
623 ap->ops->error_handler(ap); 645 ap->ops->error_handler(ap);
624 else 646 else {
647 /* if unloading, commence suicide */
648 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
649 !(ap->pflags & ATA_PFLAG_UNLOADED))
650 ata_eh_unload(ap);
625 ata_eh_finish(ap); 651 ata_eh_finish(ap);
652 }
626 653
627 /* process port suspend request */ 654 /* process port suspend request */
628 ata_eh_handle_port_suspend(ap); 655 ata_eh_handle_port_suspend(ap);
@@ -644,7 +671,7 @@ void ata_scsi_error(struct Scsi_Host *host)
644 } 671 }
645 672
646 /* this run is complete, make sure EH info is clear */ 673 /* this run is complete, make sure EH info is clear */
647 __ata_port_for_each_link(link, ap) 674 ata_for_each_link(link, ap, HOST_FIRST)
648 memset(&link->eh_info, 0, sizeof(link->eh_info)); 675 memset(&link->eh_info, 0, sizeof(link->eh_info));
649 676
650 /* Clear host_eh_scheduled while holding ap->lock such 677 /* Clear host_eh_scheduled while holding ap->lock such
@@ -1025,7 +1052,7 @@ int sata_async_notification(struct ata_port *ap)
1025 struct ata_link *link; 1052 struct ata_link *link;
1026 1053
1027 /* check and notify ATAPI AN */ 1054 /* check and notify ATAPI AN */
1028 ata_port_for_each_link(link, ap) { 1055 ata_for_each_link(link, ap, EDGE) {
1029 if (!(sntf & (1 << link->pmp))) 1056 if (!(sntf & (1 << link->pmp)))
1030 continue; 1057 continue;
1031 1058
@@ -2005,7 +2032,7 @@ void ata_eh_autopsy(struct ata_port *ap)
2005{ 2032{
2006 struct ata_link *link; 2033 struct ata_link *link;
2007 2034
2008 ata_port_for_each_link(link, ap) 2035 ata_for_each_link(link, ap, EDGE)
2009 ata_eh_link_autopsy(link); 2036 ata_eh_link_autopsy(link);
2010 2037
2011 /* Handle the frigging slave link. Autopsy is done similarly 2038 /* Handle the frigging slave link. Autopsy is done similarly
@@ -2219,7 +2246,7 @@ void ata_eh_report(struct ata_port *ap)
2219{ 2246{
2220 struct ata_link *link; 2247 struct ata_link *link;
2221 2248
2222 __ata_port_for_each_link(link, ap) 2249 ata_for_each_link(link, ap, HOST_FIRST)
2223 ata_eh_link_report(link); 2250 ata_eh_link_report(link);
2224} 2251}
2225 2252
@@ -2230,7 +2257,7 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2230 struct ata_device *dev; 2257 struct ata_device *dev;
2231 2258
2232 if (clear_classes) 2259 if (clear_classes)
2233 ata_link_for_each_dev(dev, link) 2260 ata_for_each_dev(dev, link, ALL)
2234 classes[dev->devno] = ATA_DEV_UNKNOWN; 2261 classes[dev->devno] = ATA_DEV_UNKNOWN;
2235 2262
2236 return reset(link, classes, deadline); 2263 return reset(link, classes, deadline);
@@ -2294,7 +2321,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2294 2321
2295 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2322 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2296 2323
2297 ata_link_for_each_dev(dev, link) { 2324 ata_for_each_dev(dev, link, ALL) {
2298 /* If we issue an SRST then an ATA drive (not ATAPI) 2325 /* If we issue an SRST then an ATA drive (not ATAPI)
2299 * may change configuration and be in PIO0 timing. If 2326 * may change configuration and be in PIO0 timing. If
2300 * we do a hard reset (or are coming from power on) 2327 * we do a hard reset (or are coming from power on)
@@ -2355,7 +2382,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2355 "port disabled. ignoring.\n"); 2382 "port disabled. ignoring.\n");
2356 ehc->i.action &= ~ATA_EH_RESET; 2383 ehc->i.action &= ~ATA_EH_RESET;
2357 2384
2358 ata_link_for_each_dev(dev, link) 2385 ata_for_each_dev(dev, link, ALL)
2359 classes[dev->devno] = ATA_DEV_NONE; 2386 classes[dev->devno] = ATA_DEV_NONE;
2360 2387
2361 rc = 0; 2388 rc = 0;
@@ -2369,7 +2396,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2369 * bang classes and return. 2396 * bang classes and return.
2370 */ 2397 */
2371 if (reset && !(ehc->i.action & ATA_EH_RESET)) { 2398 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2372 ata_link_for_each_dev(dev, link) 2399 ata_for_each_dev(dev, link, ALL)
2373 classes[dev->devno] = ATA_DEV_NONE; 2400 classes[dev->devno] = ATA_DEV_NONE;
2374 rc = 0; 2401 rc = 0;
2375 goto out; 2402 goto out;
@@ -2454,7 +2481,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2454 /* 2481 /*
2455 * Post-reset processing 2482 * Post-reset processing
2456 */ 2483 */
2457 ata_link_for_each_dev(dev, link) { 2484 ata_for_each_dev(dev, link, ALL) {
2458 /* After the reset, the device state is PIO 0 and the 2485 /* After the reset, the device state is PIO 0 and the
2459 * controller state is undefined. Reset also wakes up 2486 * controller state is undefined. Reset also wakes up
2460 * drives from sleeping mode. 2487 * drives from sleeping mode.
@@ -2510,7 +2537,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2510 * can be reliably detected and retried. 2537 * can be reliably detected and retried.
2511 */ 2538 */
2512 nr_unknown = 0; 2539 nr_unknown = 0;
2513 ata_link_for_each_dev(dev, link) { 2540 ata_for_each_dev(dev, link, ALL) {
2514 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ 2541 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
2515 if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2542 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2516 classes[dev->devno] = ATA_DEV_NONE; 2543 classes[dev->devno] = ATA_DEV_NONE;
@@ -2619,8 +2646,8 @@ static inline void ata_eh_pull_park_action(struct ata_port *ap)
2619 2646
2620 spin_lock_irqsave(ap->lock, flags); 2647 spin_lock_irqsave(ap->lock, flags);
2621 INIT_COMPLETION(ap->park_req_pending); 2648 INIT_COMPLETION(ap->park_req_pending);
2622 ata_port_for_each_link(link, ap) { 2649 ata_for_each_link(link, ap, EDGE) {
2623 ata_link_for_each_dev(dev, link) { 2650 ata_for_each_dev(dev, link, ALL) {
2624 struct ata_eh_info *ehi = &link->eh_info; 2651 struct ata_eh_info *ehi = &link->eh_info;
2625 2652
2626 link->eh_context.i.dev_action[dev->devno] |= 2653 link->eh_context.i.dev_action[dev->devno] |=
@@ -2675,7 +2702,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
2675 * be done backwards such that PDIAG- is released by the slave 2702 * be done backwards such that PDIAG- is released by the slave
2676 * device before the master device is identified. 2703 * device before the master device is identified.
2677 */ 2704 */
2678 ata_link_for_each_dev_reverse(dev, link) { 2705 ata_for_each_dev(dev, link, ALL_REVERSE) {
2679 unsigned int action = ata_eh_dev_action(dev); 2706 unsigned int action = ata_eh_dev_action(dev);
2680 unsigned int readid_flags = 0; 2707 unsigned int readid_flags = 0;
2681 2708
@@ -2744,7 +2771,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
2744 /* Configure new devices forward such that user doesn't see 2771 /* Configure new devices forward such that user doesn't see
2745 * device detection messages backwards. 2772 * device detection messages backwards.
2746 */ 2773 */
2747 ata_link_for_each_dev(dev, link) { 2774 ata_for_each_dev(dev, link, ALL) {
2748 if (!(new_mask & (1 << dev->devno)) || 2775 if (!(new_mask & (1 << dev->devno)) ||
2749 dev->class == ATA_DEV_PMP) 2776 dev->class == ATA_DEV_PMP)
2750 continue; 2777 continue;
@@ -2793,10 +2820,7 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2793 int rc; 2820 int rc;
2794 2821
2795 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 2822 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
2796 ata_link_for_each_dev(dev, link) { 2823 ata_for_each_dev(dev, link, ENABLED) {
2797 if (!ata_dev_enabled(dev))
2798 continue;
2799
2800 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 2824 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
2801 struct ata_ering_entry *ent; 2825 struct ata_ering_entry *ent;
2802 2826
@@ -2813,14 +2837,11 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2813 rc = ata_do_set_mode(link, r_failed_dev); 2837 rc = ata_do_set_mode(link, r_failed_dev);
2814 2838
2815 /* if transfer mode has changed, set DUBIOUS_XFER on device */ 2839 /* if transfer mode has changed, set DUBIOUS_XFER on device */
2816 ata_link_for_each_dev(dev, link) { 2840 ata_for_each_dev(dev, link, ENABLED) {
2817 struct ata_eh_context *ehc = &link->eh_context; 2841 struct ata_eh_context *ehc = &link->eh_context;
2818 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 2842 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
2819 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 2843 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
2820 2844
2821 if (!ata_dev_enabled(dev))
2822 continue;
2823
2824 if (dev->xfer_mode != saved_xfer_mode || 2845 if (dev->xfer_mode != saved_xfer_mode ||
2825 ata_ncq_enabled(dev) != saved_ncq) 2846 ata_ncq_enabled(dev) != saved_ncq)
2826 dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 2847 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
@@ -2881,9 +2902,8 @@ static int ata_link_nr_enabled(struct ata_link *link)
2881 struct ata_device *dev; 2902 struct ata_device *dev;
2882 int cnt = 0; 2903 int cnt = 0;
2883 2904
2884 ata_link_for_each_dev(dev, link) 2905 ata_for_each_dev(dev, link, ENABLED)
2885 if (ata_dev_enabled(dev)) 2906 cnt++;
2886 cnt++;
2887 return cnt; 2907 return cnt;
2888} 2908}
2889 2909
@@ -2892,7 +2912,7 @@ static int ata_link_nr_vacant(struct ata_link *link)
2892 struct ata_device *dev; 2912 struct ata_device *dev;
2893 int cnt = 0; 2913 int cnt = 0;
2894 2914
2895 ata_link_for_each_dev(dev, link) 2915 ata_for_each_dev(dev, link, ALL)
2896 if (dev->class == ATA_DEV_UNKNOWN) 2916 if (dev->class == ATA_DEV_UNKNOWN)
2897 cnt++; 2917 cnt++;
2898 return cnt; 2918 return cnt;
@@ -2918,7 +2938,7 @@ static int ata_eh_skip_recovery(struct ata_link *link)
2918 return 0; 2938 return 0;
2919 2939
2920 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 2940 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
2921 ata_link_for_each_dev(dev, link) { 2941 ata_for_each_dev(dev, link, ALL) {
2922 if (dev->class == ATA_DEV_UNKNOWN && 2942 if (dev->class == ATA_DEV_UNKNOWN &&
2923 ehc->classes[dev->devno] != ATA_DEV_NONE) 2943 ehc->classes[dev->devno] != ATA_DEV_NONE)
2924 return 0; 2944 return 0;
@@ -3026,7 +3046,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3026 DPRINTK("ENTER\n"); 3046 DPRINTK("ENTER\n");
3027 3047
3028 /* prep for recovery */ 3048 /* prep for recovery */
3029 ata_port_for_each_link(link, ap) { 3049 ata_for_each_link(link, ap, EDGE) {
3030 struct ata_eh_context *ehc = &link->eh_context; 3050 struct ata_eh_context *ehc = &link->eh_context;
3031 3051
3032 /* re-enable link? */ 3052 /* re-enable link? */
@@ -3038,7 +3058,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3038 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3058 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3039 } 3059 }
3040 3060
3041 ata_link_for_each_dev(dev, link) { 3061 ata_for_each_dev(dev, link, ALL) {
3042 if (link->flags & ATA_LFLAG_NO_RETRY) 3062 if (link->flags & ATA_LFLAG_NO_RETRY)
3043 ehc->tries[dev->devno] = 1; 3063 ehc->tries[dev->devno] = 1;
3044 else 3064 else
@@ -3068,19 +3088,19 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3068 goto out; 3088 goto out;
3069 3089
3070 /* prep for EH */ 3090 /* prep for EH */
3071 ata_port_for_each_link(link, ap) { 3091 ata_for_each_link(link, ap, EDGE) {
3072 struct ata_eh_context *ehc = &link->eh_context; 3092 struct ata_eh_context *ehc = &link->eh_context;
3073 3093
3074 /* skip EH if possible. */ 3094 /* skip EH if possible. */
3075 if (ata_eh_skip_recovery(link)) 3095 if (ata_eh_skip_recovery(link))
3076 ehc->i.action = 0; 3096 ehc->i.action = 0;
3077 3097
3078 ata_link_for_each_dev(dev, link) 3098 ata_for_each_dev(dev, link, ALL)
3079 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 3099 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3080 } 3100 }
3081 3101
3082 /* reset */ 3102 /* reset */
3083 ata_port_for_each_link(link, ap) { 3103 ata_for_each_link(link, ap, EDGE) {
3084 struct ata_eh_context *ehc = &link->eh_context; 3104 struct ata_eh_context *ehc = &link->eh_context;
3085 3105
3086 if (!(ehc->i.action & ATA_EH_RESET)) 3106 if (!(ehc->i.action & ATA_EH_RESET))
@@ -3105,8 +3125,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3105 ata_eh_pull_park_action(ap); 3125 ata_eh_pull_park_action(ap);
3106 3126
3107 deadline = jiffies; 3127 deadline = jiffies;
3108 ata_port_for_each_link(link, ap) { 3128 ata_for_each_link(link, ap, EDGE) {
3109 ata_link_for_each_dev(dev, link) { 3129 ata_for_each_dev(dev, link, ALL) {
3110 struct ata_eh_context *ehc = &link->eh_context; 3130 struct ata_eh_context *ehc = &link->eh_context;
3111 unsigned long tmp; 3131 unsigned long tmp;
3112 3132
@@ -3134,8 +3154,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3134 deadline = wait_for_completion_timeout(&ap->park_req_pending, 3154 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3135 deadline - now); 3155 deadline - now);
3136 } while (deadline); 3156 } while (deadline);
3137 ata_port_for_each_link(link, ap) { 3157 ata_for_each_link(link, ap, EDGE) {
3138 ata_link_for_each_dev(dev, link) { 3158 ata_for_each_dev(dev, link, ALL) {
3139 if (!(link->eh_context.unloaded_mask & 3159 if (!(link->eh_context.unloaded_mask &
3140 (1 << dev->devno))) 3160 (1 << dev->devno)))
3141 continue; 3161 continue;
@@ -3146,7 +3166,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3146 } 3166 }
3147 3167
3148 /* the rest */ 3168 /* the rest */
3149 ata_port_for_each_link(link, ap) { 3169 ata_for_each_link(link, ap, EDGE) {
3150 struct ata_eh_context *ehc = &link->eh_context; 3170 struct ata_eh_context *ehc = &link->eh_context;
3151 3171
3152 /* revalidate existing devices and attach new ones */ 3172 /* revalidate existing devices and attach new ones */
@@ -3172,7 +3192,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3172 * disrupting the current users of the device. 3192 * disrupting the current users of the device.
3173 */ 3193 */
3174 if (ehc->i.flags & ATA_EHI_DID_RESET) { 3194 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3175 ata_link_for_each_dev(dev, link) { 3195 ata_for_each_dev(dev, link, ALL) {
3176 if (dev->class != ATA_DEV_ATAPI) 3196 if (dev->class != ATA_DEV_ATAPI)
3177 continue; 3197 continue;
3178 rc = atapi_eh_clear_ua(dev); 3198 rc = atapi_eh_clear_ua(dev);
@@ -3183,7 +3203,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3183 3203
3184 /* configure link power saving */ 3204 /* configure link power saving */
3185 if (ehc->i.action & ATA_EH_LPM) 3205 if (ehc->i.action & ATA_EH_LPM)
3186 ata_link_for_each_dev(dev, link) 3206 ata_for_each_dev(dev, link, ALL)
3187 ata_dev_enable_pm(dev, ap->pm_policy); 3207 ata_dev_enable_pm(dev, ap->pm_policy);
3188 3208
3189 /* this link is okay now */ 3209 /* this link is okay now */
@@ -3288,7 +3308,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3288 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 3308 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3289 NULL); 3309 NULL);
3290 if (rc) { 3310 if (rc) {
3291 ata_link_for_each_dev(dev, &ap->link) 3311 ata_for_each_dev(dev, &ap->link, ALL)
3292 ata_dev_disable(dev); 3312 ata_dev_disable(dev);
3293 } 3313 }
3294 3314
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index b65db309c181..98ca07a2db87 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -321,7 +321,7 @@ static void sata_pmp_quirks(struct ata_port *ap)
321 321
322 if (vendor == 0x1095 && devid == 0x3726) { 322 if (vendor == 0x1095 && devid == 0x3726) {
323 /* sil3726 quirks */ 323 /* sil3726 quirks */
324 ata_port_for_each_link(link, ap) { 324 ata_for_each_link(link, ap, EDGE) {
325 /* Class code report is unreliable and SRST 325 /* Class code report is unreliable and SRST
326 * times out under certain configurations. 326 * times out under certain configurations.
327 */ 327 */
@@ -336,7 +336,7 @@ static void sata_pmp_quirks(struct ata_port *ap)
336 } 336 }
337 } else if (vendor == 0x1095 && devid == 0x4723) { 337 } else if (vendor == 0x1095 && devid == 0x4723) {
338 /* sil4723 quirks */ 338 /* sil4723 quirks */
339 ata_port_for_each_link(link, ap) { 339 ata_for_each_link(link, ap, EDGE) {
340 /* class code report is unreliable */ 340 /* class code report is unreliable */
341 if (link->pmp < 2) 341 if (link->pmp < 2)
342 link->flags |= ATA_LFLAG_ASSUME_ATA; 342 link->flags |= ATA_LFLAG_ASSUME_ATA;
@@ -348,7 +348,7 @@ static void sata_pmp_quirks(struct ata_port *ap)
348 } 348 }
349 } else if (vendor == 0x1095 && devid == 0x4726) { 349 } else if (vendor == 0x1095 && devid == 0x4726) {
350 /* sil4726 quirks */ 350 /* sil4726 quirks */
351 ata_port_for_each_link(link, ap) { 351 ata_for_each_link(link, ap, EDGE) {
352 /* Class code report is unreliable and SRST 352 /* Class code report is unreliable and SRST
353 * times out under certain configurations. 353 * times out under certain configurations.
354 * Config device can be at port 0 or 5 and 354 * Config device can be at port 0 or 5 and
@@ -450,7 +450,7 @@ int sata_pmp_attach(struct ata_device *dev)
450 if (ap->ops->pmp_attach) 450 if (ap->ops->pmp_attach)
451 ap->ops->pmp_attach(ap); 451 ap->ops->pmp_attach(ap);
452 452
453 ata_port_for_each_link(tlink, ap) 453 ata_for_each_link(tlink, ap, EDGE)
454 sata_link_init_spd(tlink); 454 sata_link_init_spd(tlink);
455 455
456 ata_acpi_associate_sata_port(ap); 456 ata_acpi_associate_sata_port(ap);
@@ -487,7 +487,7 @@ static void sata_pmp_detach(struct ata_device *dev)
487 if (ap->ops->pmp_detach) 487 if (ap->ops->pmp_detach)
488 ap->ops->pmp_detach(ap); 488 ap->ops->pmp_detach(ap);
489 489
490 ata_port_for_each_link(tlink, ap) 490 ata_for_each_link(tlink, ap, EDGE)
491 ata_eh_detach_dev(tlink->device); 491 ata_eh_detach_dev(tlink->device);
492 492
493 spin_lock_irqsave(ap->lock, flags); 493 spin_lock_irqsave(ap->lock, flags);
@@ -700,7 +700,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
700 } 700 }
701 701
702 /* PMP is reset, SErrors cannot be trusted, scan all */ 702 /* PMP is reset, SErrors cannot be trusted, scan all */
703 ata_port_for_each_link(tlink, ap) { 703 ata_for_each_link(tlink, ap, EDGE) {
704 struct ata_eh_context *ehc = &tlink->eh_context; 704 struct ata_eh_context *ehc = &tlink->eh_context;
705 705
706 ehc->i.probe_mask |= ATA_ALL_DEVICES; 706 ehc->i.probe_mask |= ATA_ALL_DEVICES;
@@ -768,7 +768,7 @@ static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap)
768 768
769 spin_lock_irqsave(ap->lock, flags); 769 spin_lock_irqsave(ap->lock, flags);
770 770
771 ata_port_for_each_link(link, ap) { 771 ata_for_each_link(link, ap, EDGE) {
772 if (!(link->flags & ATA_LFLAG_DISABLED)) 772 if (!(link->flags & ATA_LFLAG_DISABLED))
773 continue; 773 continue;
774 774
@@ -852,7 +852,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
852 int cnt, rc; 852 int cnt, rc;
853 853
854 pmp_tries = ATA_EH_PMP_TRIES; 854 pmp_tries = ATA_EH_PMP_TRIES;
855 ata_port_for_each_link(link, ap) 855 ata_for_each_link(link, ap, EDGE)
856 link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES; 856 link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
857 857
858 retry: 858 retry:
@@ -861,7 +861,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
861 rc = ata_eh_recover(ap, ops->prereset, ops->softreset, 861 rc = ata_eh_recover(ap, ops->prereset, ops->softreset,
862 ops->hardreset, ops->postreset, NULL); 862 ops->hardreset, ops->postreset, NULL);
863 if (rc) { 863 if (rc) {
864 ata_link_for_each_dev(dev, &ap->link) 864 ata_for_each_dev(dev, &ap->link, ALL)
865 ata_dev_disable(dev); 865 ata_dev_disable(dev);
866 return rc; 866 return rc;
867 } 867 }
@@ -870,7 +870,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
870 return 0; 870 return 0;
871 871
872 /* new PMP online */ 872 /* new PMP online */
873 ata_port_for_each_link(link, ap) 873 ata_for_each_link(link, ap, EDGE)
874 link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES; 874 link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
875 875
876 /* fall through */ 876 /* fall through */
@@ -942,7 +942,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
942 } 942 }
943 943
944 cnt = 0; 944 cnt = 0;
945 ata_port_for_each_link(link, ap) { 945 ata_for_each_link(link, ap, EDGE) {
946 if (!(gscr_error & (1 << link->pmp))) 946 if (!(gscr_error & (1 << link->pmp)))
947 continue; 947 continue;
948 948
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 47c7afcb36f2..4040d8b53216 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -517,7 +517,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
517 /* Good values for timeout and retries? Values below 517 /* Good values for timeout and retries? Values below
518 from scsi_ioctl_send_command() for default case... */ 518 from scsi_ioctl_send_command() for default case... */
519 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, 519 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
520 sensebuf, (10*HZ), 5, 0); 520 sensebuf, (10*HZ), 5, 0, NULL);
521 521
522 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ 522 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
523 u8 *desc = sensebuf + 8; 523 u8 *desc = sensebuf + 8;
@@ -603,7 +603,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
603 /* Good values for timeout and retries? Values below 603 /* Good values for timeout and retries? Values below
604 from scsi_ioctl_send_command() for default case... */ 604 from scsi_ioctl_send_command() for default case... */
605 cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0, 605 cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
606 sensebuf, (10*HZ), 5, 0); 606 sensebuf, (10*HZ), 5, 0, NULL);
607 607
608 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ 608 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
609 u8 *desc = sensebuf + 8; 609 u8 *desc = sensebuf + 8;
@@ -3229,12 +3229,12 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
3229 return; 3229 return;
3230 3230
3231 repeat: 3231 repeat:
3232 ata_port_for_each_link(link, ap) { 3232 ata_for_each_link(link, ap, EDGE) {
3233 ata_link_for_each_dev(dev, link) { 3233 ata_for_each_dev(dev, link, ENABLED) {
3234 struct scsi_device *sdev; 3234 struct scsi_device *sdev;
3235 int channel = 0, id = 0; 3235 int channel = 0, id = 0;
3236 3236
3237 if (!ata_dev_enabled(dev) || dev->sdev) 3237 if (dev->sdev)
3238 continue; 3238 continue;
3239 3239
3240 if (ata_is_host_link(link)) 3240 if (ata_is_host_link(link))
@@ -3255,9 +3255,9 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
3255 * failure occurred, scan would have failed silently. Check 3255 * failure occurred, scan would have failed silently. Check
3256 * whether all devices are attached. 3256 * whether all devices are attached.
3257 */ 3257 */
3258 ata_port_for_each_link(link, ap) { 3258 ata_for_each_link(link, ap, EDGE) {
3259 ata_link_for_each_dev(dev, link) { 3259 ata_for_each_dev(dev, link, ENABLED) {
3260 if (ata_dev_enabled(dev) && !dev->sdev) 3260 if (!dev->sdev)
3261 goto exit_loop; 3261 goto exit_loop;
3262 } 3262 }
3263 } 3263 }
@@ -3381,7 +3381,7 @@ static void ata_scsi_handle_link_detach(struct ata_link *link)
3381 struct ata_port *ap = link->ap; 3381 struct ata_port *ap = link->ap;
3382 struct ata_device *dev; 3382 struct ata_device *dev;
3383 3383
3384 ata_link_for_each_dev(dev, link) { 3384 ata_for_each_dev(dev, link, ALL) {
3385 unsigned long flags; 3385 unsigned long flags;
3386 3386
3387 if (!(dev->flags & ATA_DFLAG_DETACHED)) 3387 if (!(dev->flags & ATA_DFLAG_DETACHED))
@@ -3496,7 +3496,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3496 if (devno == SCAN_WILD_CARD) { 3496 if (devno == SCAN_WILD_CARD) {
3497 struct ata_link *link; 3497 struct ata_link *link;
3498 3498
3499 ata_port_for_each_link(link, ap) { 3499 ata_for_each_link(link, ap, EDGE) {
3500 struct ata_eh_info *ehi = &link->eh_info; 3500 struct ata_eh_info *ehi = &link->eh_info;
3501 ehi->probe_mask |= ATA_ALL_DEVICES; 3501 ehi->probe_mask |= ATA_ALL_DEVICES;
3502 ehi->action |= ATA_EH_RESET; 3502 ehi->action |= ATA_EH_RESET;
@@ -3544,11 +3544,11 @@ void ata_scsi_dev_rescan(struct work_struct *work)
3544 3544
3545 spin_lock_irqsave(ap->lock, flags); 3545 spin_lock_irqsave(ap->lock, flags);
3546 3546
3547 ata_port_for_each_link(link, ap) { 3547 ata_for_each_link(link, ap, EDGE) {
3548 ata_link_for_each_dev(dev, link) { 3548 ata_for_each_dev(dev, link, ENABLED) {
3549 struct scsi_device *sdev = dev->sdev; 3549 struct scsi_device *sdev = dev->sdev;
3550 3550
3551 if (!ata_dev_enabled(dev) || !sdev) 3551 if (!sdev)
3552 continue; 3552 continue;
3553 if (scsi_device_get(sdev)) 3553 if (scsi_device_get(sdev))
3554 continue; 3554 continue;
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 1266924c11f9..1050fed96b2b 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -356,7 +356,6 @@ static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev)
356 * bfin_set_dmamode - Initialize host controller PATA DMA timings 356 * bfin_set_dmamode - Initialize host controller PATA DMA timings
357 * @ap: Port whose timings we are configuring 357 * @ap: Port whose timings we are configuring
358 * @adev: um 358 * @adev: um
359 * @udma: udma mode, 0 - 6
360 * 359 *
361 * Set UDMA mode for device. 360 * Set UDMA mode for device.
362 * 361 *
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 860ede526282..f828a29d7756 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -465,24 +465,22 @@ static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unus
465{ 465{
466 struct ata_device *dev; 466 struct ata_device *dev;
467 467
468 ata_link_for_each_dev(dev, link) { 468 ata_for_each_dev(dev, link, ENABLED) {
469 if (ata_dev_enabled(dev)) { 469 /* We don't really care */
470 /* We don't really care */ 470 dev->pio_mode = XFER_PIO_0;
471 dev->pio_mode = XFER_PIO_0; 471 dev->dma_mode = XFER_MW_DMA_0;
472 dev->dma_mode = XFER_MW_DMA_0; 472 /* We do need the right mode information for DMA or PIO
473 /* We do need the right mode information for DMA or PIO 473 and this comes from the current configuration flags */
474 and this comes from the current configuration flags */ 474 if (ata_id_has_dma(dev->id)) {
475 if (ata_id_has_dma(dev->id)) { 475 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
476 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n"); 476 dev->xfer_mode = XFER_MW_DMA_0;
477 dev->xfer_mode = XFER_MW_DMA_0; 477 dev->xfer_shift = ATA_SHIFT_MWDMA;
478 dev->xfer_shift = ATA_SHIFT_MWDMA; 478 dev->flags &= ~ATA_DFLAG_PIO;
479 dev->flags &= ~ATA_DFLAG_PIO; 479 } else {
480 } else { 480 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
481 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n"); 481 dev->xfer_mode = XFER_PIO_0;
482 dev->xfer_mode = XFER_PIO_0; 482 dev->xfer_shift = ATA_SHIFT_PIO;
483 dev->xfer_shift = ATA_SHIFT_PIO; 483 dev->flags |= ATA_DFLAG_PIO;
484 dev->flags |= ATA_DFLAG_PIO;
485 }
486 } 484 }
487 } 485 }
488 return 0; 486 return 0;
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 2014253f6c88..b173c157ab00 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -30,14 +30,12 @@ static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
30{ 30{
31 struct ata_device *dev; 31 struct ata_device *dev;
32 32
33 ata_link_for_each_dev(dev, link) { 33 ata_for_each_dev(dev, link, ENABLED) {
34 if (ata_dev_enabled(dev)) { 34 ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n");
35 ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n"); 35 dev->pio_mode = XFER_PIO_0;
36 dev->pio_mode = XFER_PIO_0; 36 dev->xfer_mode = XFER_PIO_0;
37 dev->xfer_mode = XFER_PIO_0; 37 dev->xfer_shift = ATA_SHIFT_PIO;
38 dev->xfer_shift = ATA_SHIFT_PIO; 38 dev->flags |= ATA_DFLAG_PIO;
39 dev->flags |= ATA_DFLAG_PIO;
40 }
41 } 39 }
42 return 0; 40 return 0;
43} 41}
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 930c2208640b..6c1d778b63a9 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -194,15 +194,12 @@ static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
194{ 194{
195 struct ata_device *dev; 195 struct ata_device *dev;
196 196
197 ata_link_for_each_dev(dev, link) { 197 ata_for_each_dev(dev, link, ENABLED) {
198 if (ata_dev_enabled(dev)) { 198 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
199 ata_dev_printk(dev, KERN_INFO, 199 dev->pio_mode = XFER_PIO_0;
200 "configured for PIO\n"); 200 dev->xfer_mode = XFER_PIO_0;
201 dev->pio_mode = XFER_PIO_0; 201 dev->xfer_shift = ATA_SHIFT_PIO;
202 dev->xfer_mode = XFER_PIO_0; 202 dev->flags |= ATA_DFLAG_PIO;
203 dev->xfer_shift = ATA_SHIFT_PIO;
204 dev->flags |= ATA_DFLAG_PIO;
205 }
206 } 203 }
207 return 0; 204 return 0;
208} 205}
@@ -641,7 +638,6 @@ static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
641 * qdi6580dp_set_piomode - PIO setup for dual channel 638 * qdi6580dp_set_piomode - PIO setup for dual channel
642 * @ap: Port 639 * @ap: Port
643 * @adev: Device 640 * @adev: Device
644 * @irq: interrupt line
645 * 641 *
646 * In dual channel mode the 6580 has one clock per channel and we have 642 * In dual channel mode the 6580 has one clock per channel and we have
647 * to software clockswitch in qc_issue. 643 * to software clockswitch in qc_issue.
@@ -1028,7 +1024,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
1028 /* Nothing found means we drop the port as its probably not there */ 1024 /* Nothing found means we drop the port as its probably not there */
1029 1025
1030 ret = -ENODEV; 1026 ret = -ENODEV;
1031 ata_link_for_each_dev(dev, &ap->link) { 1027 ata_for_each_dev(dev, &ap->link, ALL) {
1032 if (!ata_dev_absent(dev)) { 1028 if (!ata_dev_absent(dev)) {
1033 legacy_host[probe->slot] = host; 1029 legacy_host[probe->slot] = host;
1034 ld->platform_dev = pdev; 1030 ld->platform_dev = pdev;
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index c0dbc46a348e..2c1a91c40c1a 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -116,7 +116,6 @@ static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
116 * oldpiix_set_dmamode - Initialize host controller PATA DMA timings 116 * oldpiix_set_dmamode - Initialize host controller PATA DMA timings
117 * @ap: Port whose timings we are configuring 117 * @ap: Port whose timings we are configuring
118 * @adev: Device to program 118 * @adev: Device to program
119 * @isich: True if the device is an ICH and has IOCFG registers
120 * 119 *
121 * Set MWDMA mode for device, in host controller PCI config space. 120 * Set MWDMA mode for device, in host controller PCI config space.
122 * 121 *
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 0e1c2c1134d3..e94efccaa482 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -281,7 +281,6 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
281 * pdc2027x_set_piomode - Initialize host controller PATA PIO timings 281 * pdc2027x_set_piomode - Initialize host controller PATA PIO timings
282 * @ap: Port to configure 282 * @ap: Port to configure
283 * @adev: um 283 * @adev: um
284 * @pio: PIO mode, 0 - 4
285 * 284 *
286 * Set PIO mode for device. 285 * Set PIO mode for device.
287 * 286 *
@@ -326,7 +325,6 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
326 * pdc2027x_set_dmamode - Initialize host controller PATA UDMA timings 325 * pdc2027x_set_dmamode - Initialize host controller PATA UDMA timings
327 * @ap: Port to configure 326 * @ap: Port to configure
328 * @adev: um 327 * @adev: um
329 * @udma: udma mode, XFER_UDMA_0 to XFER_UDMA_6
330 * 328 *
331 * Set UDMA mode for device. 329 * Set UDMA mode for device.
332 * 330 *
@@ -406,23 +404,20 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
406 if (rc < 0) 404 if (rc < 0)
407 return rc; 405 return rc;
408 406
409 ata_link_for_each_dev(dev, link) { 407 ata_for_each_dev(dev, link, ENABLED) {
410 if (ata_dev_enabled(dev)) { 408 pdc2027x_set_piomode(ap, dev);
411 409
412 pdc2027x_set_piomode(ap, dev); 410 /*
411 * Enable prefetch if the device support PIO only.
412 */
413 if (dev->xfer_shift == ATA_SHIFT_PIO) {
414 u32 ctcr1 = ioread32(dev_mmio(ap, dev, PDC_CTCR1));
415 ctcr1 |= (1 << 25);
416 iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
413 417
414 /* 418 PDPRINTK("Turn on prefetch\n");
415 * Enable prefetch if the device support PIO only. 419 } else {
416 */ 420 pdc2027x_set_dmamode(ap, dev);
417 if (dev->xfer_shift == ATA_SHIFT_PIO) {
418 u32 ctcr1 = ioread32(dev_mmio(ap, dev, PDC_CTCR1));
419 ctcr1 |= (1 << 25);
420 iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
421
422 PDPRINTK("Turn on prefetch\n");
423 } else {
424 pdc2027x_set_dmamode(ap, dev);
425 }
426 } 421 }
427 } 422 }
428 return 0; 423 return 0;
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 77e4e3b17f54..6afa07a37648 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -34,14 +34,12 @@ static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unu
34{ 34{
35 struct ata_device *dev; 35 struct ata_device *dev;
36 36
37 ata_link_for_each_dev(dev, link) { 37 ata_for_each_dev(dev, link, ENABLED) {
38 if (ata_dev_enabled(dev)) { 38 /* We don't really care */
39 /* We don't really care */ 39 dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
40 dev->pio_mode = dev->xfer_mode = XFER_PIO_0; 40 dev->xfer_shift = ATA_SHIFT_PIO;
41 dev->xfer_shift = ATA_SHIFT_PIO; 41 dev->flags |= ATA_DFLAG_PIO;
42 dev->flags |= ATA_DFLAG_PIO; 42 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
43 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
44 }
45 } 43 }
46 return 0; 44 return 0;
47} 45}
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 0b0aa452de14..695d44ae52c6 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -81,7 +81,6 @@ static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
81 * radisys_set_dmamode - Initialize host controller PATA DMA timings 81 * radisys_set_dmamode - Initialize host controller PATA DMA timings
82 * @ap: Port whose timings we are configuring 82 * @ap: Port whose timings we are configuring
83 * @adev: Device to program 83 * @adev: Device to program
84 * @isich: True if the device is an ICH and has IOCFG registers
85 * 84 *
86 * Set MWDMA mode for device, in host controller PCI config space. 85 * Set MWDMA mode for device, in host controller PCI config space.
87 * 86 *
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 7dfd1f3f6f3a..46d6bc1bf1e9 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -38,15 +38,13 @@ static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused)
38{ 38{
39 struct ata_device *dev; 39 struct ata_device *dev;
40 40
41 ata_link_for_each_dev(dev, link) { 41 ata_for_each_dev(dev, link, ENABLED) {
42 if (ata_dev_enabled(dev)) { 42 /* We don't really care */
43 /* We don't really care */ 43 dev->pio_mode = XFER_PIO_0;
44 dev->pio_mode = XFER_PIO_0; 44 dev->xfer_mode = XFER_PIO_0;
45 dev->xfer_mode = XFER_PIO_0; 45 dev->xfer_shift = ATA_SHIFT_PIO;
46 dev->xfer_shift = ATA_SHIFT_PIO; 46 dev->flags |= ATA_DFLAG_PIO;
47 dev->flags |= ATA_DFLAG_PIO; 47 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
48 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
49 }
50 } 48 }
51 return 0; 49 return 0;
52} 50}
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index cf3707e516a2..d447f1cb46ec 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -210,7 +210,6 @@ static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev)
210 * scc_set_dmamode - Initialize host controller PATA DMA timings 210 * scc_set_dmamode - Initialize host controller PATA DMA timings
211 * @ap: Port whose timings we are configuring 211 * @ap: Port whose timings we are configuring
212 * @adev: um 212 * @adev: um
213 * @udma: udma mode, 0 - 6
214 * 213 *
215 * Set UDMA mode for device. 214 * Set UDMA mode for device.
216 * 215 *
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 72e41c9f969b..8d2fd9dd40c7 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -138,7 +138,6 @@ static struct sv_cable_table cable_detect[] = {
138/** 138/**
139 * serverworks_cable_detect - cable detection 139 * serverworks_cable_detect - cable detection
140 * @ap: ATA port 140 * @ap: ATA port
141 * @deadline: deadline jiffies for the operation
142 * 141 *
143 * Perform cable detection according to the device and subvendor 142 * Perform cable detection according to the device and subvendor
144 * identifications 143 * identifications
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index e4be55e047f6..27ceb42a774b 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -112,7 +112,6 @@ static int sis_133_cable_detect(struct ata_port *ap)
112/** 112/**
113 * sis_66_cable_detect - check for 40/80 pin 113 * sis_66_cable_detect - check for 40/80 pin
114 * @ap: Port 114 * @ap: Port
115 * @deadline: deadline jiffies for the operation
116 * 115 *
117 * Perform cable detection on the UDMA66, UDMA100 and early UDMA133 116 * Perform cable detection on the UDMA66, UDMA100 and early UDMA133
118 * SiS IDE controllers. 117 * SiS IDE controllers.
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 2b24ae58b52e..86918634a4c5 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1836,7 +1836,6 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
1836/** 1836/**
1837 * mv_err_intr - Handle error interrupts on the port 1837 * mv_err_intr - Handle error interrupts on the port
1838 * @ap: ATA channel to manipulate 1838 * @ap: ATA channel to manipulate
1839 * @qc: affected command (non-NCQ), or NULL
1840 * 1839 *
1841 * Most cases require a full reset of the chip's state machine, 1840 * Most cases require a full reset of the chip's state machine,
1842 * which also performs a COMRESET. 1841 * which also performs a COMRESET.
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 031d7b7dee34..564c142b03b0 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -46,7 +46,9 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "sata_sil" 48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "2.3" 49#define DRV_VERSION "2.4"
50
51#define SIL_DMA_BOUNDARY 0x7fffffffUL
50 52
51enum { 53enum {
52 SIL_MMIO_BAR = 5, 54 SIL_MMIO_BAR = 5,
@@ -118,6 +120,10 @@ static void sil_dev_config(struct ata_device *dev);
118static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 120static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
119static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 121static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
120static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); 122static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
123static void sil_qc_prep(struct ata_queued_cmd *qc);
124static void sil_bmdma_setup(struct ata_queued_cmd *qc);
125static void sil_bmdma_start(struct ata_queued_cmd *qc);
126static void sil_bmdma_stop(struct ata_queued_cmd *qc);
121static void sil_freeze(struct ata_port *ap); 127static void sil_freeze(struct ata_port *ap);
122static void sil_thaw(struct ata_port *ap); 128static void sil_thaw(struct ata_port *ap);
123 129
@@ -167,13 +173,22 @@ static struct pci_driver sil_pci_driver = {
167}; 173};
168 174
169static struct scsi_host_template sil_sht = { 175static struct scsi_host_template sil_sht = {
170 ATA_BMDMA_SHT(DRV_NAME), 176 ATA_BASE_SHT(DRV_NAME),
177 /** These controllers support Large Block Transfer which allows
178 transfer chunks up to 2GB and which cross 64KB boundaries,
179 therefore the DMA limits are more relaxed than standard ATA SFF. */
180 .dma_boundary = SIL_DMA_BOUNDARY,
181 .sg_tablesize = ATA_MAX_PRD
171}; 182};
172 183
173static struct ata_port_operations sil_ops = { 184static struct ata_port_operations sil_ops = {
174 .inherits = &ata_bmdma_port_ops, 185 .inherits = &ata_bmdma_port_ops,
175 .dev_config = sil_dev_config, 186 .dev_config = sil_dev_config,
176 .set_mode = sil_set_mode, 187 .set_mode = sil_set_mode,
188 .bmdma_setup = sil_bmdma_setup,
189 .bmdma_start = sil_bmdma_start,
190 .bmdma_stop = sil_bmdma_stop,
191 .qc_prep = sil_qc_prep,
177 .freeze = sil_freeze, 192 .freeze = sil_freeze,
178 .thaw = sil_thaw, 193 .thaw = sil_thaw,
179 .scr_read = sil_scr_read, 194 .scr_read = sil_scr_read,
@@ -249,6 +264,83 @@ module_param(slow_down, int, 0444);
249MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); 264MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
250 265
251 266
267static void sil_bmdma_stop(struct ata_queued_cmd *qc)
268{
269 struct ata_port *ap = qc->ap;
270 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
271 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
272
273 /* clear start/stop bit - can safely always write 0 */
274 iowrite8(0, bmdma2);
275
276 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
277 ata_sff_dma_pause(ap);
278}
279
280static void sil_bmdma_setup(struct ata_queued_cmd *qc)
281{
282 struct ata_port *ap = qc->ap;
283 void __iomem *bmdma = ap->ioaddr.bmdma_addr;
284
285 /* load PRD table addr. */
286 iowrite32(ap->prd_dma, bmdma + ATA_DMA_TABLE_OFS);
287
288 /* issue r/w command */
289 ap->ops->sff_exec_command(ap, &qc->tf);
290}
291
292static void sil_bmdma_start(struct ata_queued_cmd *qc)
293{
294 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
295 struct ata_port *ap = qc->ap;
296 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
297 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
298 u8 dmactl = ATA_DMA_START;
299
300 /* set transfer direction, start host DMA transaction
301 Note: For Large Block Transfer to work, the DMA must be started
302 using the bmdma2 register. */
303 if (!rw)
304 dmactl |= ATA_DMA_WR;
305 iowrite8(dmactl, bmdma2);
306}
307
308/* The way God intended PCI IDE scatter/gather lists to look and behave... */
309static void sil_fill_sg(struct ata_queued_cmd *qc)
310{
311 struct scatterlist *sg;
312 struct ata_port *ap = qc->ap;
313 struct ata_prd *prd, *last_prd = NULL;
314 unsigned int si;
315
316 prd = &ap->prd[0];
317 for_each_sg(qc->sg, sg, qc->n_elem, si) {
318 /* Note h/w doesn't support 64-bit, so we unconditionally
319 * truncate dma_addr_t to u32.
320 */
321 u32 addr = (u32) sg_dma_address(sg);
322 u32 sg_len = sg_dma_len(sg);
323
324 prd->addr = cpu_to_le32(addr);
325 prd->flags_len = cpu_to_le32(sg_len);
326 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, sg_len);
327
328 last_prd = prd;
329 prd++;
330 }
331
332 if (likely(last_prd))
333 last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
334}
335
336static void sil_qc_prep(struct ata_queued_cmd *qc)
337{
338 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
339 return;
340
341 sil_fill_sg(qc);
342}
343
252static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) 344static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
253{ 345{
254 u8 cache_line = 0; 346 u8 cache_line = 0;
@@ -278,7 +370,7 @@ static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
278 if (rc) 370 if (rc)
279 return rc; 371 return rc;
280 372
281 ata_link_for_each_dev(dev, link) { 373 ata_for_each_dev(dev, link, ALL) {
282 if (!ata_dev_enabled(dev)) 374 if (!ata_dev_enabled(dev))
283 dev_mode[dev->devno] = 0; /* PIO0/1/2 */ 375 dev_mode[dev->devno] = 0; /* PIO0/1/2 */
284 else if (dev->flags & ATA_DFLAG_PIO) 376 else if (dev->flags & ATA_DFLAG_PIO)
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9f7c543cc04b..01e69383d9c0 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -164,7 +164,7 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
164 164
165static int cciss_revalidate(struct gendisk *disk); 165static int cciss_revalidate(struct gendisk *disk);
166static int rebuild_lun_table(ctlr_info_t *h, int first_time); 166static int rebuild_lun_table(ctlr_info_t *h, int first_time);
167static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, 167static int deregister_disk(ctlr_info_t *h, int drv_index,
168 int clear_all); 168 int clear_all);
169 169
170static void cciss_read_capacity(int ctlr, int logvol, int withirq, 170static void cciss_read_capacity(int ctlr, int logvol, int withirq,
@@ -215,31 +215,17 @@ static struct block_device_operations cciss_fops = {
215/* 215/*
216 * Enqueuing and dequeuing functions for cmdlists. 216 * Enqueuing and dequeuing functions for cmdlists.
217 */ 217 */
218static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c) 218static inline void addQ(struct hlist_head *list, CommandList_struct *c)
219{ 219{
220 if (*Qptr == NULL) { 220 hlist_add_head(&c->list, list);
221 *Qptr = c;
222 c->next = c->prev = c;
223 } else {
224 c->prev = (*Qptr)->prev;
225 c->next = (*Qptr);
226 (*Qptr)->prev->next = c;
227 (*Qptr)->prev = c;
228 }
229} 221}
230 222
231static inline CommandList_struct *removeQ(CommandList_struct **Qptr, 223static inline void removeQ(CommandList_struct *c)
232 CommandList_struct *c)
233{ 224{
234 if (c && c->next != c) { 225 if (WARN_ON(hlist_unhashed(&c->list)))
235 if (*Qptr == c) 226 return;
236 *Qptr = c->next; 227
237 c->prev->next = c->next; 228 hlist_del_init(&c->list);
238 c->next->prev = c->prev;
239 } else {
240 *Qptr = NULL;
241 }
242 return c;
243} 229}
244 230
245#include "cciss_scsi.c" /* For SCSI tape support */ 231#include "cciss_scsi.c" /* For SCSI tape support */
@@ -506,6 +492,7 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
506 c->cmdindex = i; 492 c->cmdindex = i;
507 } 493 }
508 494
495 INIT_HLIST_NODE(&c->list);
509 c->busaddr = (__u32) cmd_dma_handle; 496 c->busaddr = (__u32) cmd_dma_handle;
510 temp64.val = (__u64) err_dma_handle; 497 temp64.val = (__u64) err_dma_handle;
511 c->ErrDesc.Addr.lower = temp64.val32.lower; 498 c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -1492,8 +1479,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1492 * which keeps the interrupt handler from starting 1479 * which keeps the interrupt handler from starting
1493 * the queue. 1480 * the queue.
1494 */ 1481 */
1495 ret = deregister_disk(h->gendisk[drv_index], 1482 ret = deregister_disk(h, drv_index, 0);
1496 &h->drv[drv_index], 0);
1497 h->drv[drv_index].busy_configuring = 0; 1483 h->drv[drv_index].busy_configuring = 0;
1498 } 1484 }
1499 1485
@@ -1711,8 +1697,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1711 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 1697 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1712 h->drv[i].busy_configuring = 1; 1698 h->drv[i].busy_configuring = 1;
1713 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1699 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1714 return_code = deregister_disk(h->gendisk[i], 1700 return_code = deregister_disk(h, i, 1);
1715 &h->drv[i], 1);
1716 h->drv[i].busy_configuring = 0; 1701 h->drv[i].busy_configuring = 0;
1717 } 1702 }
1718 } 1703 }
@@ -1782,15 +1767,19 @@ mem_msg:
1782 * the highest_lun should be left unchanged and the LunID 1767 * the highest_lun should be left unchanged and the LunID
1783 * should not be cleared. 1768 * should not be cleared.
1784*/ 1769*/
1785static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, 1770static int deregister_disk(ctlr_info_t *h, int drv_index,
1786 int clear_all) 1771 int clear_all)
1787{ 1772{
1788 int i; 1773 int i;
1789 ctlr_info_t *h = get_host(disk); 1774 struct gendisk *disk;
1775 drive_info_struct *drv;
1790 1776
1791 if (!capable(CAP_SYS_RAWIO)) 1777 if (!capable(CAP_SYS_RAWIO))
1792 return -EPERM; 1778 return -EPERM;
1793 1779
1780 drv = &h->drv[drv_index];
1781 disk = h->gendisk[drv_index];
1782
1794 /* make sure logical volume is NOT is use */ 1783 /* make sure logical volume is NOT is use */
1795 if (clear_all || (h->gendisk[0] == disk)) { 1784 if (clear_all || (h->gendisk[0] == disk)) {
1796 if (drv->usage_count > 1) 1785 if (drv->usage_count > 1)
@@ -2548,7 +2537,8 @@ static void start_io(ctlr_info_t *h)
2548{ 2537{
2549 CommandList_struct *c; 2538 CommandList_struct *c;
2550 2539
2551 while ((c = h->reqQ) != NULL) { 2540 while (!hlist_empty(&h->reqQ)) {
2541 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
2552 /* can't do anything if fifo is full */ 2542 /* can't do anything if fifo is full */
2553 if ((h->access.fifo_full(h))) { 2543 if ((h->access.fifo_full(h))) {
2554 printk(KERN_WARNING "cciss: fifo full\n"); 2544 printk(KERN_WARNING "cciss: fifo full\n");
@@ -2556,14 +2546,14 @@ static void start_io(ctlr_info_t *h)
2556 } 2546 }
2557 2547
2558 /* Get the first entry from the Request Q */ 2548 /* Get the first entry from the Request Q */
2559 removeQ(&(h->reqQ), c); 2549 removeQ(c);
2560 h->Qdepth--; 2550 h->Qdepth--;
2561 2551
2562 /* Tell the controller execute command */ 2552 /* Tell the controller execute command */
2563 h->access.submit_command(h, c); 2553 h->access.submit_command(h, c);
2564 2554
2565 /* Put job onto the completed Q */ 2555 /* Put job onto the completed Q */
2566 addQ(&(h->cmpQ), c); 2556 addQ(&h->cmpQ, c);
2567 } 2557 }
2568} 2558}
2569 2559
@@ -2576,7 +2566,7 @@ static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2576 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 2566 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2577 2567
2578 /* add it to software queue and then send it to the controller */ 2568 /* add it to software queue and then send it to the controller */
2579 addQ(&(h->reqQ), c); 2569 addQ(&h->reqQ, c);
2580 h->Qdepth++; 2570 h->Qdepth++;
2581 if (h->Qdepth > h->maxQsinceinit) 2571 if (h->Qdepth > h->maxQsinceinit)
2582 h->maxQsinceinit = h->Qdepth; 2572 h->maxQsinceinit = h->Qdepth;
@@ -2897,7 +2887,7 @@ static void do_cciss_request(struct request_queue *q)
2897 2887
2898 spin_lock_irq(q->queue_lock); 2888 spin_lock_irq(q->queue_lock);
2899 2889
2900 addQ(&(h->reqQ), c); 2890 addQ(&h->reqQ, c);
2901 h->Qdepth++; 2891 h->Qdepth++;
2902 if (h->Qdepth > h->maxQsinceinit) 2892 if (h->Qdepth > h->maxQsinceinit)
2903 h->maxQsinceinit = h->Qdepth; 2893 h->maxQsinceinit = h->Qdepth;
@@ -2985,16 +2975,12 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2985 a = c->busaddr; 2975 a = c->busaddr;
2986 2976
2987 } else { 2977 } else {
2978 struct hlist_node *tmp;
2979
2988 a &= ~3; 2980 a &= ~3;
2989 if ((c = h->cmpQ) == NULL) { 2981 c = NULL;
2990 printk(KERN_WARNING 2982 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2991 "cciss: Completion of %08x ignored\n", 2983 if (c->busaddr == a)
2992 a1);
2993 continue;
2994 }
2995 while (c->busaddr != a) {
2996 c = c->next;
2997 if (c == h->cmpQ)
2998 break; 2984 break;
2999 } 2985 }
3000 } 2986 }
@@ -3002,8 +2988,8 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
3002 * If we've found the command, take it off the 2988 * If we've found the command, take it off the
3003 * completion Q and free it 2989 * completion Q and free it
3004 */ 2990 */
3005 if (c->busaddr == a) { 2991 if (c && c->busaddr == a) {
3006 removeQ(&h->cmpQ, c); 2992 removeQ(c);
3007 if (c->cmd_type == CMD_RWREQ) { 2993 if (c->cmd_type == CMD_RWREQ) {
3008 complete_command(h, c, 0); 2994 complete_command(h, c, 0);
3009 } else if (c->cmd_type == CMD_IOCTL_PEND) { 2995 } else if (c->cmd_type == CMD_IOCTL_PEND) {
@@ -3423,6 +3409,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3423 return -1; 3409 return -1;
3424 3410
3425 hba[i]->busy_initializing = 1; 3411 hba[i]->busy_initializing = 1;
3412 INIT_HLIST_HEAD(&hba[i]->cmpQ);
3413 INIT_HLIST_HEAD(&hba[i]->reqQ);
3426 3414
3427 if (cciss_pci_init(hba[i], pdev) != 0) 3415 if (cciss_pci_init(hba[i], pdev) != 0)
3428 goto clean1; 3416 goto clean1;
@@ -3730,15 +3718,17 @@ static void fail_all_cmds(unsigned long ctlr)
3730 pci_disable_device(h->pdev); /* Make sure it is really dead. */ 3718 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3731 3719
3732 /* move everything off the request queue onto the completed queue */ 3720 /* move everything off the request queue onto the completed queue */
3733 while ((c = h->reqQ) != NULL) { 3721 while (!hlist_empty(&h->reqQ)) {
3734 removeQ(&(h->reqQ), c); 3722 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
3723 removeQ(c);
3735 h->Qdepth--; 3724 h->Qdepth--;
3736 addQ(&(h->cmpQ), c); 3725 addQ(&h->cmpQ, c);
3737 } 3726 }
3738 3727
3739 /* Now, fail everything on the completed queue with a HW error */ 3728 /* Now, fail everything on the completed queue with a HW error */
3740 while ((c = h->cmpQ) != NULL) { 3729 while (!hlist_empty(&h->cmpQ)) {
3741 removeQ(&h->cmpQ, c); 3730 c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
3731 removeQ(c);
3742 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 3732 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3743 if (c->cmd_type == CMD_RWREQ) { 3733 if (c->cmd_type == CMD_RWREQ) {
3744 complete_command(h, c, 0); 3734 complete_command(h, c, 0);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 24a7efa993ab..15e2b84734e3 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -89,8 +89,8 @@ struct ctlr_info
89 struct access_method access; 89 struct access_method access;
90 90
91 /* queue and queue Info */ 91 /* queue and queue Info */
92 CommandList_struct *reqQ; 92 struct hlist_head reqQ;
93 CommandList_struct *cmpQ; 93 struct hlist_head cmpQ;
94 unsigned int Qdepth; 94 unsigned int Qdepth;
95 unsigned int maxQsinceinit; 95 unsigned int maxQsinceinit;
96 unsigned int maxSG; 96 unsigned int maxSG;
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index 43bf5593b59b..24e22dea1a99 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -265,8 +265,7 @@ typedef struct _CommandList_struct {
265 int ctlr; 265 int ctlr;
266 int cmd_type; 266 int cmd_type;
267 long cmdindex; 267 long cmdindex;
268 struct _CommandList_struct *prev; 268 struct hlist_node list;
269 struct _CommandList_struct *next;
270 struct request * rq; 269 struct request * rq;
271 struct completion *waiting; 270 struct completion *waiting;
272 int retry_count; 271 int retry_count;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index fb06ed659212..edbaac6c0573 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -624,20 +624,38 @@ static int loop_switch(struct loop_device *lo, struct file *file)
624} 624}
625 625
626/* 626/*
627 * Helper to flush the IOs in loop, but keeping loop thread running
628 */
629static int loop_flush(struct loop_device *lo)
630{
631 /* loop not yet configured, no running thread, nothing to flush */
632 if (!lo->lo_thread)
633 return 0;
634
635 return loop_switch(lo, NULL);
636}
637
638/*
627 * Do the actual switch; called from the BIO completion routine 639 * Do the actual switch; called from the BIO completion routine
628 */ 640 */
629static void do_loop_switch(struct loop_device *lo, struct switch_request *p) 641static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
630{ 642{
631 struct file *file = p->file; 643 struct file *file = p->file;
632 struct file *old_file = lo->lo_backing_file; 644 struct file *old_file = lo->lo_backing_file;
633 struct address_space *mapping = file->f_mapping; 645 struct address_space *mapping;
646
647 /* if no new file, only flush of queued bios requested */
648 if (!file)
649 goto out;
634 650
651 mapping = file->f_mapping;
635 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); 652 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
636 lo->lo_backing_file = file; 653 lo->lo_backing_file = file;
637 lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ? 654 lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
638 mapping->host->i_bdev->bd_block_size : PAGE_SIZE; 655 mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
639 lo->old_gfp_mask = mapping_gfp_mask(mapping); 656 lo->old_gfp_mask = mapping_gfp_mask(mapping);
640 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 657 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
658out:
641 complete(&p->wait); 659 complete(&p->wait);
642} 660}
643 661
@@ -901,6 +919,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
901 919
902 kthread_stop(lo->lo_thread); 920 kthread_stop(lo->lo_thread);
903 921
922 lo->lo_queue->unplug_fn = NULL;
904 lo->lo_backing_file = NULL; 923 lo->lo_backing_file = NULL;
905 924
906 loop_release_xfer(lo); 925 loop_release_xfer(lo);
@@ -1345,11 +1364,25 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
1345 struct loop_device *lo = disk->private_data; 1364 struct loop_device *lo = disk->private_data;
1346 1365
1347 mutex_lock(&lo->lo_ctl_mutex); 1366 mutex_lock(&lo->lo_ctl_mutex);
1348 --lo->lo_refcnt;
1349 1367
1350 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) && !lo->lo_refcnt) 1368 if (--lo->lo_refcnt)
1369 goto out;
1370
1371 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1372 /*
1373 * In autoclear mode, stop the loop thread
1374 * and remove configuration after last close.
1375 */
1351 loop_clr_fd(lo, NULL); 1376 loop_clr_fd(lo, NULL);
1377 } else {
1378 /*
1379 * Otherwise keep thread (if running) and config,
1380 * but flush possible ongoing bios in thread.
1381 */
1382 loop_flush(lo);
1383 }
1352 1384
1385out:
1353 mutex_unlock(&lo->lo_ctl_mutex); 1386 mutex_unlock(&lo->lo_ctl_mutex);
1354 1387
1355 return 0; 1388 return 0;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index d3a91cacee8c..7bcc1d8bc967 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -722,7 +722,6 @@ static int __init nbd_init(void)
722 722
723 for (i = 0; i < nbds_max; i++) { 723 for (i = 0; i < nbds_max; i++) {
724 struct gendisk *disk = alloc_disk(1 << part_shift); 724 struct gendisk *disk = alloc_disk(1 << part_shift);
725 elevator_t *old_e;
726 if (!disk) 725 if (!disk)
727 goto out; 726 goto out;
728 nbd_dev[i].disk = disk; 727 nbd_dev[i].disk = disk;
@@ -736,11 +735,10 @@ static int __init nbd_init(void)
736 put_disk(disk); 735 put_disk(disk);
737 goto out; 736 goto out;
738 } 737 }
739 old_e = disk->queue->elevator; 738 /*
740 if (elevator_init(disk->queue, "deadline") == 0 || 739 * Tell the block layer that we are not a rotational device
741 elevator_init(disk->queue, "noop") == 0) { 740 */
742 elevator_exit(old_e); 741 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
743 }
744 } 742 }
745 743
746 if (register_blkdev(NBD_MAJOR, "nbd")) { 744 if (register_blkdev(NBD_MAJOR, "nbd")) {
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 85d79a02d487..5d34764c8a87 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -6,7 +6,6 @@
6#include <linux/virtio_blk.h> 6#include <linux/virtio_blk.h>
7#include <linux/scatterlist.h> 7#include <linux/scatterlist.h>
8 8
9#define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS)
10#define PART_BITS 4 9#define PART_BITS 4
11 10
12static int major, index; 11static int major, index;
@@ -26,8 +25,11 @@ struct virtio_blk
26 25
27 mempool_t *pool; 26 mempool_t *pool;
28 27
28 /* What host tells us, plus 2 for header & tailer. */
29 unsigned int sg_elems;
30
29 /* Scatterlist: can be too big for stack. */ 31 /* Scatterlist: can be too big for stack. */
30 struct scatterlist sg[VIRTIO_MAX_SG]; 32 struct scatterlist sg[/*sg_elems*/];
31}; 33};
32 34
33struct virtblk_req 35struct virtblk_req
@@ -97,8 +99,6 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
97 if (blk_barrier_rq(vbr->req)) 99 if (blk_barrier_rq(vbr->req))
98 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 100 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
99 101
100 /* This init could be done at vblk creation time */
101 sg_init_table(vblk->sg, VIRTIO_MAX_SG);
102 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); 102 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
103 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); 103 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
104 sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status)); 104 sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
@@ -130,7 +130,7 @@ static void do_virtblk_request(struct request_queue *q)
130 130
131 while ((req = elv_next_request(q)) != NULL) { 131 while ((req = elv_next_request(q)) != NULL) {
132 vblk = req->rq_disk->private_data; 132 vblk = req->rq_disk->private_data;
133 BUG_ON(req->nr_phys_segments > ARRAY_SIZE(vblk->sg)); 133 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
134 134
135 /* If this request fails, stop queue and wait for something to 135 /* If this request fails, stop queue and wait for something to
136 finish to restart it. */ 136 finish to restart it. */
@@ -196,12 +196,22 @@ static int virtblk_probe(struct virtio_device *vdev)
196 int err; 196 int err;
197 u64 cap; 197 u64 cap;
198 u32 v; 198 u32 v;
199 u32 blk_size; 199 u32 blk_size, sg_elems;
200 200
201 if (index_to_minor(index) >= 1 << MINORBITS) 201 if (index_to_minor(index) >= 1 << MINORBITS)
202 return -ENOSPC; 202 return -ENOSPC;
203 203
204 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); 204 /* We need to know how many segments before we allocate. */
205 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
206 offsetof(struct virtio_blk_config, seg_max),
207 &sg_elems);
208 if (err)
209 sg_elems = 1;
210
211 /* We need an extra sg elements at head and tail. */
212 sg_elems += 2;
213 vdev->priv = vblk = kmalloc(sizeof(*vblk) +
214 sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
205 if (!vblk) { 215 if (!vblk) {
206 err = -ENOMEM; 216 err = -ENOMEM;
207 goto out; 217 goto out;
@@ -210,6 +220,8 @@ static int virtblk_probe(struct virtio_device *vdev)
210 INIT_LIST_HEAD(&vblk->reqs); 220 INIT_LIST_HEAD(&vblk->reqs);
211 spin_lock_init(&vblk->lock); 221 spin_lock_init(&vblk->lock);
212 vblk->vdev = vdev; 222 vblk->vdev = vdev;
223 vblk->sg_elems = sg_elems;
224 sg_init_table(vblk->sg, vblk->sg_elems);
213 225
214 /* We expect one virtqueue, for output. */ 226 /* We expect one virtqueue, for output. */
215 vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); 227 vblk->vq = vdev->config->find_vq(vdev, 0, blk_done);
@@ -237,6 +249,8 @@ static int virtblk_probe(struct virtio_device *vdev)
237 goto out_put_disk; 249 goto out_put_disk;
238 } 250 }
239 251
252 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
253
240 if (index < 26) { 254 if (index < 26) {
241 sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); 255 sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
242 } else if (index < (26 + 1) * 26) { 256 } else if (index < (26 + 1) * 26) {
@@ -277,6 +291,13 @@ static int virtblk_probe(struct virtio_device *vdev)
277 } 291 }
278 set_capacity(vblk->disk, cap); 292 set_capacity(vblk->disk, cap);
279 293
294 /* We can handle whatever the host told us to handle. */
295 blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
296 blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
297
298 /* No real sector limit. */
299 blk_queue_max_sectors(vblk->disk->queue, -1U);
300
280 /* Host can optionally specify maximum segment size and number of 301 /* Host can optionally specify maximum segment size and number of
281 * segments. */ 302 * segments. */
282 err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, 303 err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
@@ -284,12 +305,8 @@ static int virtblk_probe(struct virtio_device *vdev)
284 &v); 305 &v);
285 if (!err) 306 if (!err)
286 blk_queue_max_segment_size(vblk->disk->queue, v); 307 blk_queue_max_segment_size(vblk->disk->queue, v);
287 308 else
288 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, 309 blk_queue_max_segment_size(vblk->disk->queue, -1U);
289 offsetof(struct virtio_blk_config, seg_max),
290 &v);
291 if (!err)
292 blk_queue_max_hw_segments(vblk->disk->queue, v);
293 310
294 /* Host can optionally specify the block size of the device */ 311 /* Host can optionally specify the block size of the device */
295 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, 312 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2d19f0cc47f2..918ef725de41 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -338,18 +338,12 @@ wait:
338static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 338static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
339{ 339{
340 struct request_queue *rq; 340 struct request_queue *rq;
341 elevator_t *old_e;
342 341
343 rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 342 rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
344 if (rq == NULL) 343 if (rq == NULL)
345 return -1; 344 return -1;
346 345
347 old_e = rq->elevator; 346 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
348 if (IS_ERR_VALUE(elevator_init(rq, "noop")))
349 printk(KERN_WARNING
350 "blkfront: Switch elevator failed, use default\n");
351 else
352 elevator_exit(old_e);
353 347
354 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 348 /* Hard sector size and max sectors impersonate the equiv. hardware. */
355 blk_queue_hardsect_size(rq, sector_size); 349 blk_queue_hardsect_size(rq, sector_size);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 7d2e91cccb13..cceace61ef28 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1712,29 +1712,30 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
1712 return 0; 1712 return 0;
1713} 1713}
1714 1714
1715static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s) 1715static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s,
1716 struct packet_command *cgc)
1716{ 1717{
1717 unsigned char buf[21], *base; 1718 unsigned char buf[21], *base;
1718 struct dvd_layer *layer; 1719 struct dvd_layer *layer;
1719 struct packet_command cgc;
1720 struct cdrom_device_ops *cdo = cdi->ops; 1720 struct cdrom_device_ops *cdo = cdi->ops;
1721 int ret, layer_num = s->physical.layer_num; 1721 int ret, layer_num = s->physical.layer_num;
1722 1722
1723 if (layer_num >= DVD_LAYERS) 1723 if (layer_num >= DVD_LAYERS)
1724 return -EINVAL; 1724 return -EINVAL;
1725 1725
1726 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); 1726 init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
1727 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1727 cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
1728 cgc.cmd[6] = layer_num; 1728 cgc->cmd[6] = layer_num;
1729 cgc.cmd[7] = s->type; 1729 cgc->cmd[7] = s->type;
1730 cgc.cmd[9] = cgc.buflen & 0xff; 1730 cgc->cmd[9] = cgc->buflen & 0xff;
1731 1731
1732 /* 1732 /*
1733 * refrain from reporting errors on non-existing layers (mainly) 1733 * refrain from reporting errors on non-existing layers (mainly)
1734 */ 1734 */
1735 cgc.quiet = 1; 1735 cgc->quiet = 1;
1736 1736
1737 if ((ret = cdo->generic_packet(cdi, &cgc))) 1737 ret = cdo->generic_packet(cdi, cgc);
1738 if (ret)
1738 return ret; 1739 return ret;
1739 1740
1740 base = &buf[4]; 1741 base = &buf[4];
@@ -1762,21 +1763,22 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
1762 return 0; 1763 return 0;
1763} 1764}
1764 1765
1765static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s) 1766static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s,
1767 struct packet_command *cgc)
1766{ 1768{
1767 int ret; 1769 int ret;
1768 u_char buf[8]; 1770 u_char buf[8];
1769 struct packet_command cgc;
1770 struct cdrom_device_ops *cdo = cdi->ops; 1771 struct cdrom_device_ops *cdo = cdi->ops;
1771 1772
1772 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); 1773 init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
1773 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1774 cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
1774 cgc.cmd[6] = s->copyright.layer_num; 1775 cgc->cmd[6] = s->copyright.layer_num;
1775 cgc.cmd[7] = s->type; 1776 cgc->cmd[7] = s->type;
1776 cgc.cmd[8] = cgc.buflen >> 8; 1777 cgc->cmd[8] = cgc->buflen >> 8;
1777 cgc.cmd[9] = cgc.buflen & 0xff; 1778 cgc->cmd[9] = cgc->buflen & 0xff;
1778 1779
1779 if ((ret = cdo->generic_packet(cdi, &cgc))) 1780 ret = cdo->generic_packet(cdi, cgc);
1781 if (ret)
1780 return ret; 1782 return ret;
1781 1783
1782 s->copyright.cpst = buf[4]; 1784 s->copyright.cpst = buf[4];
@@ -1785,79 +1787,89 @@ static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
1785 return 0; 1787 return 0;
1786} 1788}
1787 1789
1788static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s) 1790static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s,
1791 struct packet_command *cgc)
1789{ 1792{
1790 int ret, size; 1793 int ret, size;
1791 u_char *buf; 1794 u_char *buf;
1792 struct packet_command cgc;
1793 struct cdrom_device_ops *cdo = cdi->ops; 1795 struct cdrom_device_ops *cdo = cdi->ops;
1794 1796
1795 size = sizeof(s->disckey.value) + 4; 1797 size = sizeof(s->disckey.value) + 4;
1796 1798
1797 if ((buf = kmalloc(size, GFP_KERNEL)) == NULL) 1799 buf = kmalloc(size, GFP_KERNEL);
1800 if (!buf)
1798 return -ENOMEM; 1801 return -ENOMEM;
1799 1802
1800 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); 1803 init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
1801 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1804 cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
1802 cgc.cmd[7] = s->type; 1805 cgc->cmd[7] = s->type;
1803 cgc.cmd[8] = size >> 8; 1806 cgc->cmd[8] = size >> 8;
1804 cgc.cmd[9] = size & 0xff; 1807 cgc->cmd[9] = size & 0xff;
1805 cgc.cmd[10] = s->disckey.agid << 6; 1808 cgc->cmd[10] = s->disckey.agid << 6;
1806 1809
1807 if (!(ret = cdo->generic_packet(cdi, &cgc))) 1810 ret = cdo->generic_packet(cdi, cgc);
1811 if (!ret)
1808 memcpy(s->disckey.value, &buf[4], sizeof(s->disckey.value)); 1812 memcpy(s->disckey.value, &buf[4], sizeof(s->disckey.value));
1809 1813
1810 kfree(buf); 1814 kfree(buf);
1811 return ret; 1815 return ret;
1812} 1816}
1813 1817
1814static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s) 1818static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s,
1819 struct packet_command *cgc)
1815{ 1820{
1816 int ret; 1821 int ret, size = 4 + 188;
1817 u_char buf[4 + 188]; 1822 u_char *buf;
1818 struct packet_command cgc;
1819 struct cdrom_device_ops *cdo = cdi->ops; 1823 struct cdrom_device_ops *cdo = cdi->ops;
1820 1824
1821 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); 1825 buf = kmalloc(size, GFP_KERNEL);
1822 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1826 if (!buf)
1823 cgc.cmd[7] = s->type; 1827 return -ENOMEM;
1824 cgc.cmd[9] = cgc.buflen & 0xff;
1825 1828
1826 if ((ret = cdo->generic_packet(cdi, &cgc))) 1829 init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
1827 return ret; 1830 cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
1831 cgc->cmd[7] = s->type;
1832 cgc->cmd[9] = cgc->buflen & 0xff;
1833
1834 ret = cdo->generic_packet(cdi, cgc);
1835 if (ret)
1836 goto out;
1828 1837
1829 s->bca.len = buf[0] << 8 | buf[1]; 1838 s->bca.len = buf[0] << 8 | buf[1];
1830 if (s->bca.len < 12 || s->bca.len > 188) { 1839 if (s->bca.len < 12 || s->bca.len > 188) {
1831 cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len); 1840 cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
1832 return -EIO; 1841 ret = -EIO;
1842 goto out;
1833 } 1843 }
1834 memcpy(s->bca.value, &buf[4], s->bca.len); 1844 memcpy(s->bca.value, &buf[4], s->bca.len);
1835 1845 ret = 0;
1836 return 0; 1846out:
1847 kfree(buf);
1848 return ret;
1837} 1849}
1838 1850
1839static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s) 1851static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
1852 struct packet_command *cgc)
1840{ 1853{
1841 int ret = 0, size; 1854 int ret = 0, size;
1842 u_char *buf; 1855 u_char *buf;
1843 struct packet_command cgc;
1844 struct cdrom_device_ops *cdo = cdi->ops; 1856 struct cdrom_device_ops *cdo = cdi->ops;
1845 1857
1846 size = sizeof(s->manufact.value) + 4; 1858 size = sizeof(s->manufact.value) + 4;
1847 1859
1848 if ((buf = kmalloc(size, GFP_KERNEL)) == NULL) 1860 buf = kmalloc(size, GFP_KERNEL);
1861 if (!buf)
1849 return -ENOMEM; 1862 return -ENOMEM;
1850 1863
1851 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); 1864 init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
1852 cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE; 1865 cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
1853 cgc.cmd[7] = s->type; 1866 cgc->cmd[7] = s->type;
1854 cgc.cmd[8] = size >> 8; 1867 cgc->cmd[8] = size >> 8;
1855 cgc.cmd[9] = size & 0xff; 1868 cgc->cmd[9] = size & 0xff;
1856 1869
1857 if ((ret = cdo->generic_packet(cdi, &cgc))) { 1870 ret = cdo->generic_packet(cdi, cgc);
1858 kfree(buf); 1871 if (ret)
1859 return ret; 1872 goto out;
1860 }
1861 1873
1862 s->manufact.len = buf[0] << 8 | buf[1]; 1874 s->manufact.len = buf[0] << 8 | buf[1];
1863 if (s->manufact.len < 0 || s->manufact.len > 2048) { 1875 if (s->manufact.len < 0 || s->manufact.len > 2048) {
@@ -1868,27 +1880,29 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
1868 memcpy(s->manufact.value, &buf[4], s->manufact.len); 1880 memcpy(s->manufact.value, &buf[4], s->manufact.len);
1869 } 1881 }
1870 1882
1883out:
1871 kfree(buf); 1884 kfree(buf);
1872 return ret; 1885 return ret;
1873} 1886}
1874 1887
1875static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s) 1888static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s,
1889 struct packet_command *cgc)
1876{ 1890{
1877 switch (s->type) { 1891 switch (s->type) {
1878 case DVD_STRUCT_PHYSICAL: 1892 case DVD_STRUCT_PHYSICAL:
1879 return dvd_read_physical(cdi, s); 1893 return dvd_read_physical(cdi, s, cgc);
1880 1894
1881 case DVD_STRUCT_COPYRIGHT: 1895 case DVD_STRUCT_COPYRIGHT:
1882 return dvd_read_copyright(cdi, s); 1896 return dvd_read_copyright(cdi, s, cgc);
1883 1897
1884 case DVD_STRUCT_DISCKEY: 1898 case DVD_STRUCT_DISCKEY:
1885 return dvd_read_disckey(cdi, s); 1899 return dvd_read_disckey(cdi, s, cgc);
1886 1900
1887 case DVD_STRUCT_BCA: 1901 case DVD_STRUCT_BCA:
1888 return dvd_read_bca(cdi, s); 1902 return dvd_read_bca(cdi, s, cgc);
1889 1903
1890 case DVD_STRUCT_MANUFACT: 1904 case DVD_STRUCT_MANUFACT:
1891 return dvd_read_manufact(cdi, s); 1905 return dvd_read_manufact(cdi, s, cgc);
1892 1906
1893 default: 1907 default:
1894 cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n", 1908 cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n",
@@ -2787,271 +2801,360 @@ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
2787 return cdo->generic_packet(cdi, &cgc); 2801 return cdo->generic_packet(cdi, &cgc);
2788} 2802}
2789 2803
2790static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, 2804static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
2791 unsigned long arg) 2805 void __user *arg,
2792{ 2806 struct packet_command *cgc,
2793 struct cdrom_device_ops *cdo = cdi->ops; 2807 int cmd)
2794 struct packet_command cgc; 2808{
2795 struct request_sense sense; 2809 struct request_sense sense;
2796 unsigned char buffer[32]; 2810 struct cdrom_msf msf;
2797 int ret = 0; 2811 int blocksize = 0, format = 0, lba;
2798 2812 int ret;
2799 memset(&cgc, 0, sizeof(cgc));
2800 2813
2801 /* build a unified command and queue it through
2802 cdo->generic_packet() */
2803 switch (cmd) { 2814 switch (cmd) {
2804 case CDROMREADRAW: 2815 case CDROMREADRAW:
2816 blocksize = CD_FRAMESIZE_RAW;
2817 break;
2805 case CDROMREADMODE1: 2818 case CDROMREADMODE1:
2806 case CDROMREADMODE2: { 2819 blocksize = CD_FRAMESIZE;
2807 struct cdrom_msf msf; 2820 format = 2;
2808 int blocksize = 0, format = 0, lba; 2821 break;
2809 2822 case CDROMREADMODE2:
2810 switch (cmd) { 2823 blocksize = CD_FRAMESIZE_RAW0;
2811 case CDROMREADRAW: 2824 break;
2812 blocksize = CD_FRAMESIZE_RAW; 2825 }
2813 break; 2826 IOCTL_IN(arg, struct cdrom_msf, msf);
2814 case CDROMREADMODE1: 2827 lba = msf_to_lba(msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0);
2815 blocksize = CD_FRAMESIZE; 2828 /* FIXME: we need upper bound checking, too!! */
2816 format = 2; 2829 if (lba < 0)
2817 break; 2830 return -EINVAL;
2818 case CDROMREADMODE2: 2831
2819 blocksize = CD_FRAMESIZE_RAW0; 2832 cgc->buffer = kmalloc(blocksize, GFP_KERNEL);
2820 break; 2833 if (cgc->buffer == NULL)
2821 } 2834 return -ENOMEM;
2822 IOCTL_IN(arg, struct cdrom_msf, msf); 2835
2823 lba = msf_to_lba(msf.cdmsf_min0,msf.cdmsf_sec0,msf.cdmsf_frame0); 2836 memset(&sense, 0, sizeof(sense));
2824 /* FIXME: we need upper bound checking, too!! */ 2837 cgc->sense = &sense;
2825 if (lba < 0) 2838 cgc->data_direction = CGC_DATA_READ;
2826 return -EINVAL; 2839 ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize);
2827 cgc.buffer = kmalloc(blocksize, GFP_KERNEL); 2840 if (ret && sense.sense_key == 0x05 &&
2828 if (cgc.buffer == NULL) 2841 sense.asc == 0x20 &&
2829 return -ENOMEM; 2842 sense.ascq == 0x00) {
2830 memset(&sense, 0, sizeof(sense)); 2843 /*
2831 cgc.sense = &sense; 2844 * SCSI-II devices are not required to support
2832 cgc.data_direction = CGC_DATA_READ; 2845 * READ_CD, so let's try switching block size
2833 ret = cdrom_read_block(cdi, &cgc, lba, 1, format, blocksize); 2846 */
2834 if (ret && sense.sense_key==0x05 && sense.asc==0x20 && sense.ascq==0x00) { 2847 /* FIXME: switch back again... */
2835 /* 2848 ret = cdrom_switch_blocksize(cdi, blocksize);
2836 * SCSI-II devices are not required to support 2849 if (ret)
2837 * READ_CD, so let's try switching block size 2850 goto out;
2838 */ 2851 cgc->sense = NULL;
2839 /* FIXME: switch back again... */ 2852 ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1);
2840 if ((ret = cdrom_switch_blocksize(cdi, blocksize))) { 2853 ret |= cdrom_switch_blocksize(cdi, blocksize);
2841 kfree(cgc.buffer); 2854 }
2842 return ret; 2855 if (!ret && copy_to_user(arg, cgc->buffer, blocksize))
2843 } 2856 ret = -EFAULT;
2844 cgc.sense = NULL; 2857out:
2845 ret = cdrom_read_cd(cdi, &cgc, lba, blocksize, 1); 2858 kfree(cgc->buffer);
2846 ret |= cdrom_switch_blocksize(cdi, blocksize); 2859 return ret;
2847 } 2860}
2848 if (!ret && copy_to_user((char __user *)arg, cgc.buffer, blocksize)) 2861
2849 ret = -EFAULT; 2862static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
2850 kfree(cgc.buffer); 2863 void __user *arg)
2864{
2865 struct cdrom_read_audio ra;
2866 int lba;
2867
2868 IOCTL_IN(arg, struct cdrom_read_audio, ra);
2869
2870 if (ra.addr_format == CDROM_MSF)
2871 lba = msf_to_lba(ra.addr.msf.minute,
2872 ra.addr.msf.second,
2873 ra.addr.msf.frame);
2874 else if (ra.addr_format == CDROM_LBA)
2875 lba = ra.addr.lba;
2876 else
2877 return -EINVAL;
2878
2879 /* FIXME: we need upper bound checking, too!! */
2880 if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES)
2881 return -EINVAL;
2882
2883 return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes);
2884}
2885
2886static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi,
2887 void __user *arg)
2888{
2889 int ret;
2890 struct cdrom_subchnl q;
2891 u_char requested, back;
2892 IOCTL_IN(arg, struct cdrom_subchnl, q);
2893 requested = q.cdsc_format;
2894 if (!((requested == CDROM_MSF) ||
2895 (requested == CDROM_LBA)))
2896 return -EINVAL;
2897 q.cdsc_format = CDROM_MSF;
2898 ret = cdrom_read_subchannel(cdi, &q, 0);
2899 if (ret)
2851 return ret; 2900 return ret;
2852 } 2901 back = q.cdsc_format; /* local copy */
2853 case CDROMREADAUDIO: { 2902 sanitize_format(&q.cdsc_absaddr, &back, requested);
2854 struct cdrom_read_audio ra; 2903 sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
2855 int lba; 2904 IOCTL_OUT(arg, struct cdrom_subchnl, q);
2856 2905 /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
2857 IOCTL_IN(arg, struct cdrom_read_audio, ra); 2906 return 0;
2858 2907}
2859 if (ra.addr_format == CDROM_MSF)
2860 lba = msf_to_lba(ra.addr.msf.minute,
2861 ra.addr.msf.second,
2862 ra.addr.msf.frame);
2863 else if (ra.addr_format == CDROM_LBA)
2864 lba = ra.addr.lba;
2865 else
2866 return -EINVAL;
2867 2908
2868 /* FIXME: we need upper bound checking, too!! */ 2909static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi,
2869 if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES) 2910 void __user *arg,
2870 return -EINVAL; 2911 struct packet_command *cgc)
2912{
2913 struct cdrom_device_ops *cdo = cdi->ops;
2914 struct cdrom_msf msf;
2915 cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
2916 IOCTL_IN(arg, struct cdrom_msf, msf);
2917 cgc->cmd[0] = GPCMD_PLAY_AUDIO_MSF;
2918 cgc->cmd[3] = msf.cdmsf_min0;
2919 cgc->cmd[4] = msf.cdmsf_sec0;
2920 cgc->cmd[5] = msf.cdmsf_frame0;
2921 cgc->cmd[6] = msf.cdmsf_min1;
2922 cgc->cmd[7] = msf.cdmsf_sec1;
2923 cgc->cmd[8] = msf.cdmsf_frame1;
2924 cgc->data_direction = CGC_DATA_NONE;
2925 return cdo->generic_packet(cdi, cgc);
2926}
2871 2927
2872 return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes); 2928static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi,
2873 } 2929 void __user *arg,
2874 case CDROMSUBCHNL: { 2930 struct packet_command *cgc)
2875 struct cdrom_subchnl q; 2931{
2876 u_char requested, back; 2932 struct cdrom_device_ops *cdo = cdi->ops;
2877 IOCTL_IN(arg, struct cdrom_subchnl, q); 2933 struct cdrom_blk blk;
2878 requested = q.cdsc_format; 2934 cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
2879 if (!((requested == CDROM_MSF) || 2935 IOCTL_IN(arg, struct cdrom_blk, blk);
2880 (requested == CDROM_LBA))) 2936 cgc->cmd[0] = GPCMD_PLAY_AUDIO_10;
2881 return -EINVAL; 2937 cgc->cmd[2] = (blk.from >> 24) & 0xff;
2882 q.cdsc_format = CDROM_MSF; 2938 cgc->cmd[3] = (blk.from >> 16) & 0xff;
2883 if ((ret = cdrom_read_subchannel(cdi, &q, 0))) 2939 cgc->cmd[4] = (blk.from >> 8) & 0xff;
2884 return ret; 2940 cgc->cmd[5] = blk.from & 0xff;
2885 back = q.cdsc_format; /* local copy */ 2941 cgc->cmd[7] = (blk.len >> 8) & 0xff;
2886 sanitize_format(&q.cdsc_absaddr, &back, requested); 2942 cgc->cmd[8] = blk.len & 0xff;
2887 sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); 2943 cgc->data_direction = CGC_DATA_NONE;
2888 IOCTL_OUT(arg, struct cdrom_subchnl, q); 2944 return cdo->generic_packet(cdi, cgc);
2889 /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ 2945}
2890 return 0; 2946
2891 } 2947static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
2892 case CDROMPLAYMSF: { 2948 void __user *arg,
2893 struct cdrom_msf msf; 2949 struct packet_command *cgc,
2894 cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); 2950 unsigned int cmd)
2895 IOCTL_IN(arg, struct cdrom_msf, msf); 2951{
2896 cgc.cmd[0] = GPCMD_PLAY_AUDIO_MSF; 2952 struct cdrom_volctrl volctrl;
2897 cgc.cmd[3] = msf.cdmsf_min0; 2953 unsigned char buffer[32];
2898 cgc.cmd[4] = msf.cdmsf_sec0; 2954 char mask[sizeof(buffer)];
2899 cgc.cmd[5] = msf.cdmsf_frame0; 2955 unsigned short offset;
2900 cgc.cmd[6] = msf.cdmsf_min1; 2956 int ret;
2901 cgc.cmd[7] = msf.cdmsf_sec1;
2902 cgc.cmd[8] = msf.cdmsf_frame1;
2903 cgc.data_direction = CGC_DATA_NONE;
2904 return cdo->generic_packet(cdi, &cgc);
2905 }
2906 case CDROMPLAYBLK: {
2907 struct cdrom_blk blk;
2908 cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
2909 IOCTL_IN(arg, struct cdrom_blk, blk);
2910 cgc.cmd[0] = GPCMD_PLAY_AUDIO_10;
2911 cgc.cmd[2] = (blk.from >> 24) & 0xff;
2912 cgc.cmd[3] = (blk.from >> 16) & 0xff;
2913 cgc.cmd[4] = (blk.from >> 8) & 0xff;
2914 cgc.cmd[5] = blk.from & 0xff;
2915 cgc.cmd[7] = (blk.len >> 8) & 0xff;
2916 cgc.cmd[8] = blk.len & 0xff;
2917 cgc.data_direction = CGC_DATA_NONE;
2918 return cdo->generic_packet(cdi, &cgc);
2919 }
2920 case CDROMVOLCTRL:
2921 case CDROMVOLREAD: {
2922 struct cdrom_volctrl volctrl;
2923 char mask[sizeof(buffer)];
2924 unsigned short offset;
2925 2957
2926 cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n"); 2958 cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n");
2927 2959
2928 IOCTL_IN(arg, struct cdrom_volctrl, volctrl); 2960 IOCTL_IN(arg, struct cdrom_volctrl, volctrl);
2929 2961
2930 cgc.buffer = buffer; 2962 cgc->buffer = buffer;
2931 cgc.buflen = 24; 2963 cgc->buflen = 24;
2932 if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_AUDIO_CTL_PAGE, 0))) 2964 ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0);
2933 return ret; 2965 if (ret)
2966 return ret;
2934 2967
2935 /* originally the code depended on buffer[1] to determine 2968 /* originally the code depended on buffer[1] to determine
2936 how much data is available for transfer. buffer[1] is 2969 how much data is available for transfer. buffer[1] is
2937 unfortunately ambigious and the only reliable way seem 2970 unfortunately ambigious and the only reliable way seem
2938 to be to simply skip over the block descriptor... */ 2971 to be to simply skip over the block descriptor... */
2939 offset = 8 + be16_to_cpu(*(__be16 *)(buffer+6)); 2972 offset = 8 + be16_to_cpu(*(__be16 *)(buffer + 6));
2940 2973
2941 if (offset + 16 > sizeof(buffer)) 2974 if (offset + 16 > sizeof(buffer))
2942 return -E2BIG; 2975 return -E2BIG;
2943 2976
2944 if (offset + 16 > cgc.buflen) { 2977 if (offset + 16 > cgc->buflen) {
2945 cgc.buflen = offset+16; 2978 cgc->buflen = offset + 16;
2946 ret = cdrom_mode_sense(cdi, &cgc, 2979 ret = cdrom_mode_sense(cdi, cgc,
2947 GPMODE_AUDIO_CTL_PAGE, 0); 2980 GPMODE_AUDIO_CTL_PAGE, 0);
2948 if (ret) 2981 if (ret)
2949 return ret; 2982 return ret;
2950 } 2983 }
2951 2984
2952 /* sanity check */ 2985 /* sanity check */
2953 if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE || 2986 if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
2954 buffer[offset+1] < 14) 2987 buffer[offset + 1] < 14)
2955 return -EINVAL; 2988 return -EINVAL;
2956 2989
2957 /* now we have the current volume settings. if it was only 2990 /* now we have the current volume settings. if it was only
2958 a CDROMVOLREAD, return these values */ 2991 a CDROMVOLREAD, return these values */
2959 if (cmd == CDROMVOLREAD) { 2992 if (cmd == CDROMVOLREAD) {
2960 volctrl.channel0 = buffer[offset+9]; 2993 volctrl.channel0 = buffer[offset+9];
2961 volctrl.channel1 = buffer[offset+11]; 2994 volctrl.channel1 = buffer[offset+11];
2962 volctrl.channel2 = buffer[offset+13]; 2995 volctrl.channel2 = buffer[offset+13];
2963 volctrl.channel3 = buffer[offset+15]; 2996 volctrl.channel3 = buffer[offset+15];
2964 IOCTL_OUT(arg, struct cdrom_volctrl, volctrl); 2997 IOCTL_OUT(arg, struct cdrom_volctrl, volctrl);
2965 return 0; 2998 return 0;
2966 } 2999 }
2967 3000
2968 /* get the volume mask */ 3001 /* get the volume mask */
2969 cgc.buffer = mask; 3002 cgc->buffer = mask;
2970 if ((ret = cdrom_mode_sense(cdi, &cgc, 3003 ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 1);
2971 GPMODE_AUDIO_CTL_PAGE, 1))) 3004 if (ret)
2972 return ret; 3005 return ret;
2973 3006
2974 buffer[offset+9] = volctrl.channel0 & mask[offset+9]; 3007 buffer[offset + 9] = volctrl.channel0 & mask[offset + 9];
2975 buffer[offset+11] = volctrl.channel1 & mask[offset+11]; 3008 buffer[offset + 11] = volctrl.channel1 & mask[offset + 11];
2976 buffer[offset+13] = volctrl.channel2 & mask[offset+13]; 3009 buffer[offset + 13] = volctrl.channel2 & mask[offset + 13];
2977 buffer[offset+15] = volctrl.channel3 & mask[offset+15]; 3010 buffer[offset + 15] = volctrl.channel3 & mask[offset + 15];
2978 3011
2979 /* set volume */ 3012 /* set volume */
2980 cgc.buffer = buffer + offset - 8; 3013 cgc->buffer = buffer + offset - 8;
2981 memset(cgc.buffer, 0, 8); 3014 memset(cgc->buffer, 0, 8);
2982 return cdrom_mode_select(cdi, &cgc); 3015 return cdrom_mode_select(cdi, cgc);
2983 } 3016}
2984 3017
2985 case CDROMSTART: 3018static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi,
2986 case CDROMSTOP: { 3019 struct packet_command *cgc,
2987 cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n"); 3020 int cmd)
2988 cgc.cmd[0] = GPCMD_START_STOP_UNIT; 3021{
2989 cgc.cmd[1] = 1; 3022 struct cdrom_device_ops *cdo = cdi->ops;
2990 cgc.cmd[4] = (cmd == CDROMSTART) ? 1 : 0; 3023 cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
2991 cgc.data_direction = CGC_DATA_NONE; 3024 cgc->cmd[0] = GPCMD_START_STOP_UNIT;
2992 return cdo->generic_packet(cdi, &cgc); 3025 cgc->cmd[1] = 1;
2993 } 3026 cgc->cmd[4] = (cmd == CDROMSTART) ? 1 : 0;
3027 cgc->data_direction = CGC_DATA_NONE;
3028 return cdo->generic_packet(cdi, cgc);
3029}
2994 3030
2995 case CDROMPAUSE: 3031static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi,
2996 case CDROMRESUME: { 3032 struct packet_command *cgc,
2997 cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n"); 3033 int cmd)
2998 cgc.cmd[0] = GPCMD_PAUSE_RESUME; 3034{
2999 cgc.cmd[8] = (cmd == CDROMRESUME) ? 1 : 0; 3035 struct cdrom_device_ops *cdo = cdi->ops;
3000 cgc.data_direction = CGC_DATA_NONE; 3036 cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
3001 return cdo->generic_packet(cdi, &cgc); 3037 cgc->cmd[0] = GPCMD_PAUSE_RESUME;
3002 } 3038 cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
3039 cgc->data_direction = CGC_DATA_NONE;
3040 return cdo->generic_packet(cdi, cgc);
3041}
3003 3042
3004 case DVD_READ_STRUCT: { 3043static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi,
3005 dvd_struct *s; 3044 void __user *arg,
3006 int size = sizeof(dvd_struct); 3045 struct packet_command *cgc)
3007 if (!CDROM_CAN(CDC_DVD)) 3046{
3008 return -ENOSYS; 3047 int ret;
3009 if ((s = kmalloc(size, GFP_KERNEL)) == NULL) 3048 dvd_struct *s;
3010 return -ENOMEM; 3049 int size = sizeof(dvd_struct);
3011 cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n"); 3050
3012 if (copy_from_user(s, (dvd_struct __user *)arg, size)) { 3051 if (!CDROM_CAN(CDC_DVD))
3013 kfree(s); 3052 return -ENOSYS;
3014 return -EFAULT; 3053
3015 } 3054 s = kmalloc(size, GFP_KERNEL);
3016 if ((ret = dvd_read_struct(cdi, s))) { 3055 if (!s)
3017 kfree(s); 3056 return -ENOMEM;
3018 return ret; 3057
3019 } 3058 cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
3020 if (copy_to_user((dvd_struct __user *)arg, s, size)) 3059 if (copy_from_user(s, arg, size)) {
3021 ret = -EFAULT;
3022 kfree(s); 3060 kfree(s);
3061 return -EFAULT;
3062 }
3063
3064 ret = dvd_read_struct(cdi, s, cgc);
3065 if (ret)
3066 goto out;
3067
3068 if (copy_to_user(arg, s, size))
3069 ret = -EFAULT;
3070out:
3071 kfree(s);
3072 return ret;
3073}
3074
3075static noinline int mmc_ioctl_dvd_auth(struct cdrom_device_info *cdi,
3076 void __user *arg)
3077{
3078 int ret;
3079 dvd_authinfo ai;
3080 if (!CDROM_CAN(CDC_DVD))
3081 return -ENOSYS;
3082 cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n");
3083 IOCTL_IN(arg, dvd_authinfo, ai);
3084 ret = dvd_do_auth(cdi, &ai);
3085 if (ret)
3023 return ret; 3086 return ret;
3024 } 3087 IOCTL_OUT(arg, dvd_authinfo, ai);
3088 return 0;
3089}
3025 3090
3026 case DVD_AUTH: { 3091static noinline int mmc_ioctl_cdrom_next_writable(struct cdrom_device_info *cdi,
3027 dvd_authinfo ai; 3092 void __user *arg)
3028 if (!CDROM_CAN(CDC_DVD)) 3093{
3029 return -ENOSYS; 3094 int ret;
3030 cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n"); 3095 long next = 0;
3031 IOCTL_IN(arg, dvd_authinfo, ai); 3096 cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n");
3032 if ((ret = dvd_do_auth (cdi, &ai))) 3097 ret = cdrom_get_next_writable(cdi, &next);
3033 return ret; 3098 if (ret)
3034 IOCTL_OUT(arg, dvd_authinfo, ai); 3099 return ret;
3035 return 0; 3100 IOCTL_OUT(arg, long, next);
3036 } 3101 return 0;
3102}
3037 3103
3038 case CDROM_NEXT_WRITABLE: { 3104static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi,
3039 long next = 0; 3105 void __user *arg)
3040 cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n"); 3106{
3041 if ((ret = cdrom_get_next_writable(cdi, &next))) 3107 int ret;
3042 return ret; 3108 long last = 0;
3043 IOCTL_OUT(arg, long, next); 3109 cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n");
3044 return 0; 3110 ret = cdrom_get_last_written(cdi, &last);
3045 } 3111 if (ret)
3046 case CDROM_LAST_WRITTEN: { 3112 return ret;
3047 long last = 0; 3113 IOCTL_OUT(arg, long, last);
3048 cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n"); 3114 return 0;
3049 if ((ret = cdrom_get_last_written(cdi, &last))) 3115}
3050 return ret; 3116
3051 IOCTL_OUT(arg, long, last); 3117static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
3052 return 0; 3118 unsigned long arg)
3053 } 3119{
3054 } /* switch */ 3120 struct packet_command cgc;
3121 void __user *userptr = (void __user *)arg;
3122
3123 memset(&cgc, 0, sizeof(cgc));
3124
3125 /* build a unified command and queue it through
3126 cdo->generic_packet() */
3127 switch (cmd) {
3128 case CDROMREADRAW:
3129 case CDROMREADMODE1:
3130 case CDROMREADMODE2:
3131 return mmc_ioctl_cdrom_read_data(cdi, userptr, &cgc, cmd);
3132 case CDROMREADAUDIO:
3133 return mmc_ioctl_cdrom_read_audio(cdi, userptr);
3134 case CDROMSUBCHNL:
3135 return mmc_ioctl_cdrom_subchannel(cdi, userptr);
3136 case CDROMPLAYMSF:
3137 return mmc_ioctl_cdrom_play_msf(cdi, userptr, &cgc);
3138 case CDROMPLAYBLK:
3139 return mmc_ioctl_cdrom_play_blk(cdi, userptr, &cgc);
3140 case CDROMVOLCTRL:
3141 case CDROMVOLREAD:
3142 return mmc_ioctl_cdrom_volume(cdi, userptr, &cgc, cmd);
3143 case CDROMSTART:
3144 case CDROMSTOP:
3145 return mmc_ioctl_cdrom_start_stop(cdi, &cgc, cmd);
3146 case CDROMPAUSE:
3147 case CDROMRESUME:
3148 return mmc_ioctl_cdrom_pause_resume(cdi, &cgc, cmd);
3149 case DVD_READ_STRUCT:
3150 return mmc_ioctl_dvd_read_struct(cdi, userptr, &cgc);
3151 case DVD_AUTH:
3152 return mmc_ioctl_dvd_auth(cdi, userptr);
3153 case CDROM_NEXT_WRITABLE:
3154 return mmc_ioctl_cdrom_next_writable(cdi, userptr);
3155 case CDROM_LAST_WRITTEN:
3156 return mmc_ioctl_cdrom_last_written(cdi, userptr);
3157 }
3055 3158
3056 return -ENOTTY; 3159 return -ENOTTY;
3057} 3160}
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 9cf6e9bb017e..c7714185f831 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -40,6 +40,8 @@
40#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 40#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
41#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 41#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
42#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 42#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
43#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
44#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
43 45
44/* cover 915 and 945 variants */ 46/* cover 915 and 945 variants */
45#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 47#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -63,7 +65,8 @@
63#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \ 65#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
64 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ 66 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
65 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ 67 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
66 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB) 68 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
69 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB)
67 70
68extern int agp_memory_reserved; 71extern int agp_memory_reserved;
69 72
@@ -1196,6 +1199,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1196 case PCI_DEVICE_ID_INTEL_IGD_E_HB: 1199 case PCI_DEVICE_ID_INTEL_IGD_E_HB:
1197 case PCI_DEVICE_ID_INTEL_Q45_HB: 1200 case PCI_DEVICE_ID_INTEL_Q45_HB:
1198 case PCI_DEVICE_ID_INTEL_G45_HB: 1201 case PCI_DEVICE_ID_INTEL_G45_HB:
1202 case PCI_DEVICE_ID_INTEL_G41_HB:
1199 *gtt_offset = *gtt_size = MB(2); 1203 *gtt_offset = *gtt_size = MB(2);
1200 break; 1204 break;
1201 default: 1205 default:
@@ -2156,13 +2160,15 @@ static const struct intel_driver_description {
2156 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 2160 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
2157 NULL, &intel_g33_driver }, 2161 NULL, &intel_g33_driver },
2158 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 2162 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
2159 "Mobile Intel? GM45 Express", NULL, &intel_i965_driver }, 2163 "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
2160 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, 2164 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
2161 "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, 2165 "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
2162 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, 2166 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
2163 "Q45/Q43", NULL, &intel_i965_driver }, 2167 "Q45/Q43", NULL, &intel_i965_driver },
2164 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, 2168 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
2165 "G45/G43", NULL, &intel_i965_driver }, 2169 "G45/G43", NULL, &intel_i965_driver },
2170 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
2171 "G41", NULL, &intel_i965_driver },
2166 { 0, 0, 0, NULL, NULL, NULL } 2172 { 0, 0, 0, NULL, NULL, NULL }
2167}; 2173};
2168 2174
@@ -2360,6 +2366,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
2360 ID(PCI_DEVICE_ID_INTEL_IGD_E_HB), 2366 ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
2361 ID(PCI_DEVICE_ID_INTEL_Q45_HB), 2367 ID(PCI_DEVICE_ID_INTEL_Q45_HB),
2362 ID(PCI_DEVICE_ID_INTEL_G45_HB), 2368 ID(PCI_DEVICE_ID_INTEL_G45_HB),
2369 ID(PCI_DEVICE_ID_INTEL_G41_HB),
2363 { } 2370 { }
2364}; 2371};
2365 2372
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index 74e9cd81b5b2..61f0146e215d 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -43,52 +43,51 @@ static const char *fan_state[] = { "off", "on", "on (hardwired)" };
43 * chance that the WaveArtist driver could touch these bits to 43 * chance that the WaveArtist driver could touch these bits to
44 * enable or disable the speaker. 44 * enable or disable the speaker.
45 */ 45 */
46extern spinlock_t gpio_lock;
47extern unsigned int system_rev; 46extern unsigned int system_rev;
48 47
49static inline void netwinder_ds1620_set_clk(int clk) 48static inline void netwinder_ds1620_set_clk(int clk)
50{ 49{
51 gpio_modify_op(GPIO_DSCLK, clk ? GPIO_DSCLK : 0); 50 nw_gpio_modify_op(GPIO_DSCLK, clk ? GPIO_DSCLK : 0);
52} 51}
53 52
54static inline void netwinder_ds1620_set_data(int dat) 53static inline void netwinder_ds1620_set_data(int dat)
55{ 54{
56 gpio_modify_op(GPIO_DATA, dat ? GPIO_DATA : 0); 55 nw_gpio_modify_op(GPIO_DATA, dat ? GPIO_DATA : 0);
57} 56}
58 57
59static inline int netwinder_ds1620_get_data(void) 58static inline int netwinder_ds1620_get_data(void)
60{ 59{
61 return gpio_read() & GPIO_DATA; 60 return nw_gpio_read() & GPIO_DATA;
62} 61}
63 62
64static inline void netwinder_ds1620_set_data_dir(int dir) 63static inline void netwinder_ds1620_set_data_dir(int dir)
65{ 64{
66 gpio_modify_io(GPIO_DATA, dir ? GPIO_DATA : 0); 65 nw_gpio_modify_io(GPIO_DATA, dir ? GPIO_DATA : 0);
67} 66}
68 67
69static inline void netwinder_ds1620_reset(void) 68static inline void netwinder_ds1620_reset(void)
70{ 69{
71 cpld_modify(CPLD_DS_ENABLE, 0); 70 nw_cpld_modify(CPLD_DS_ENABLE, 0);
72 cpld_modify(CPLD_DS_ENABLE, CPLD_DS_ENABLE); 71 nw_cpld_modify(CPLD_DS_ENABLE, CPLD_DS_ENABLE);
73} 72}
74 73
75static inline void netwinder_lock(unsigned long *flags) 74static inline void netwinder_lock(unsigned long *flags)
76{ 75{
77 spin_lock_irqsave(&gpio_lock, *flags); 76 spin_lock_irqsave(&nw_gpio_lock, *flags);
78} 77}
79 78
80static inline void netwinder_unlock(unsigned long *flags) 79static inline void netwinder_unlock(unsigned long *flags)
81{ 80{
82 spin_unlock_irqrestore(&gpio_lock, *flags); 81 spin_unlock_irqrestore(&nw_gpio_lock, *flags);
83} 82}
84 83
85static inline void netwinder_set_fan(int i) 84static inline void netwinder_set_fan(int i)
86{ 85{
87 unsigned long flags; 86 unsigned long flags;
88 87
89 spin_lock_irqsave(&gpio_lock, flags); 88 spin_lock_irqsave(&nw_gpio_lock, flags);
90 gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0); 89 nw_gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0);
91 spin_unlock_irqrestore(&gpio_lock, flags); 90 spin_unlock_irqrestore(&nw_gpio_lock, flags);
92} 91}
93 92
94static inline int netwinder_get_fan(void) 93static inline int netwinder_get_fan(void)
@@ -96,7 +95,7 @@ static inline int netwinder_get_fan(void)
96 if ((system_rev & 0xf000) == 0x4000) 95 if ((system_rev & 0xf000) == 0x4000)
97 return FAN_ALWAYS_ON; 96 return FAN_ALWAYS_ON;
98 97
99 return (gpio_read() & GPIO_FAN) ? FAN_ON : FAN_OFF; 98 return (nw_gpio_read() & GPIO_FAN) ? FAN_ON : FAN_OFF;
100} 99}
101 100
102/* 101/*
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 53fdc7ff3870..32b8bbf5003e 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -46,7 +46,7 @@
46/* 46/*
47 * The High Precision Event Timer driver. 47 * The High Precision Event Timer driver.
48 * This driver is closely modelled after the rtc.c driver. 48 * This driver is closely modelled after the rtc.c driver.
49 * http://www.intel.com/hardwaredesign/hpetspec.htm 49 * http://www.intel.com/hardwaredesign/hpetspec_1.pdf
50 */ 50 */
51#define HPET_USER_FREQ (64) 51#define HPET_USER_FREQ (64)
52#define HPET_DRIFT (500) 52#define HPET_DRIFT (500)
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index fb57f67bb427..0587b66d6fc7 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -695,6 +695,7 @@ void hvc_resize(struct hvc_struct *hp, struct winsize ws)
695 hp->ws = ws; 695 hp->ws = ws;
696 schedule_work(&hp->tty_resize); 696 schedule_work(&hp->tty_resize);
697} 697}
698EXPORT_SYMBOL_GPL(hvc_resize);
698 699
699/* 700/*
700 * This kthread is either polling or interrupt driven. This is determined by 701 * This kthread is either polling or interrupt driven. This is determined by
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index 006be92ee3f3..8c7df5ba088f 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -58,8 +58,6 @@ static volatile unsigned char *FLASH_BASE;
58static int gbFlashSize = KFLASH_SIZE; 58static int gbFlashSize = KFLASH_SIZE;
59static DEFINE_MUTEX(nwflash_mutex); 59static DEFINE_MUTEX(nwflash_mutex);
60 60
61extern spinlock_t gpio_lock;
62
63static int get_flash_id(void) 61static int get_flash_id(void)
64{ 62{
65 volatile unsigned int c1, c2; 63 volatile unsigned int c1, c2;
@@ -616,9 +614,9 @@ static void kick_open(void)
616 * we want to write a bit pattern XXX1 to Xilinx to enable 614 * we want to write a bit pattern XXX1 to Xilinx to enable
617 * the write gate, which will be open for about the next 2ms. 615 * the write gate, which will be open for about the next 2ms.
618 */ 616 */
619 spin_lock_irqsave(&gpio_lock, flags); 617 spin_lock_irqsave(&nw_gpio_lock, flags);
620 cpld_modify(1, 1); 618 nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
621 spin_unlock_irqrestore(&gpio_lock, flags); 619 spin_unlock_irqrestore(&nw_gpio_lock, flags);
622 620
623 /* 621 /*
624 * let the ISA bus to catch on... 622 * let the ISA bus to catch on...
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 675076f5fca8..d26891bfcd41 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -558,23 +558,9 @@ struct timer_rand_state {
558 unsigned dont_count_entropy:1; 558 unsigned dont_count_entropy:1;
559}; 559};
560 560
561static struct timer_rand_state *irq_timer_state[NR_IRQS]; 561#ifndef CONFIG_SPARSE_IRQ
562 562struct timer_rand_state *irq_timer_state[NR_IRQS];
563static struct timer_rand_state *get_timer_rand_state(unsigned int irq) 563#endif
564{
565 if (irq >= nr_irqs)
566 return NULL;
567
568 return irq_timer_state[irq];
569}
570
571static void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
572{
573 if (irq >= nr_irqs)
574 return;
575
576 irq_timer_state[irq] = state;
577}
578 564
579static struct timer_rand_state input_timer_state; 565static struct timer_rand_state input_timer_state;
580 566
@@ -933,8 +919,10 @@ void rand_initialize_irq(int irq)
933{ 919{
934 struct timer_rand_state *state; 920 struct timer_rand_state *state;
935 921
922#ifndef CONFIG_SPARSE_IRQ
936 if (irq >= nr_irqs) 923 if (irq >= nr_irqs)
937 return; 924 return;
925#endif
938 926
939 state = get_timer_rand_state(irq); 927 state = get_timer_rand_state(irq);
940 928
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 3fb0d2c88ba5..ff6f5a4b58fb 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -138,12 +138,33 @@ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
138} 138}
139 139
140/* 140/*
141 * virtio console configuration. This supports:
142 * - console resize
143 */
144static void virtcons_apply_config(struct virtio_device *dev)
145{
146 struct winsize ws;
147
148 if (virtio_has_feature(dev, VIRTIO_CONSOLE_F_SIZE)) {
149 dev->config->get(dev,
150 offsetof(struct virtio_console_config, cols),
151 &ws.ws_col, sizeof(u16));
152 dev->config->get(dev,
153 offsetof(struct virtio_console_config, rows),
154 &ws.ws_row, sizeof(u16));
155 hvc_resize(hvc, ws);
156 }
157}
158
159/*
141 * we support only one console, the hvc struct is a global var 160 * we support only one console, the hvc struct is a global var
142 * There is no need to do anything 161 * We set the configuration at this point, since we now have a tty
143 */ 162 */
144static int notifier_add_vio(struct hvc_struct *hp, int data) 163static int notifier_add_vio(struct hvc_struct *hp, int data)
145{ 164{
146 hp->irq_requested = 1; 165 hp->irq_requested = 1;
166 virtcons_apply_config(vdev);
167
147 return 0; 168 return 0;
148} 169}
149 170
@@ -234,11 +255,18 @@ static struct virtio_device_id id_table[] = {
234 { 0 }, 255 { 0 },
235}; 256};
236 257
258static unsigned int features[] = {
259 VIRTIO_CONSOLE_F_SIZE,
260};
261
237static struct virtio_driver virtio_console = { 262static struct virtio_driver virtio_console = {
263 .feature_table = features,
264 .feature_table_size = ARRAY_SIZE(features),
238 .driver.name = KBUILD_MODNAME, 265 .driver.name = KBUILD_MODNAME,
239 .driver.owner = THIS_MODULE, 266 .driver.owner = THIS_MODULE,
240 .id_table = id_table, 267 .id_table = id_table,
241 .probe = virtcons_probe, 268 .probe = virtcons_probe,
269 .config_changed = virtcons_apply_config,
242}; 270};
243 271
244static int __init init(void) 272static int __init init(void)
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index c20171078d1d..e1129fad96dd 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -57,11 +57,6 @@ u32 acpi_pm_read_verified(void)
57 return v2; 57 return v2;
58} 58}
59 59
60static cycle_t acpi_pm_read_slow(void)
61{
62 return (cycle_t)acpi_pm_read_verified();
63}
64
65static cycle_t acpi_pm_read(void) 60static cycle_t acpi_pm_read(void)
66{ 61{
67 return (cycle_t)read_pmtmr(); 62 return (cycle_t)read_pmtmr();
@@ -88,6 +83,11 @@ static int __init acpi_pm_good_setup(char *__str)
88} 83}
89__setup("acpi_pm_good", acpi_pm_good_setup); 84__setup("acpi_pm_good", acpi_pm_good_setup);
90 85
86static cycle_t acpi_pm_read_slow(void)
87{
88 return (cycle_t)acpi_pm_read_verified();
89}
90
91static inline void acpi_pm_need_workaround(void) 91static inline void acpi_pm_need_workaround(void)
92{ 92{
93 clocksource_acpi_pm.read = acpi_pm_read_slow; 93 clocksource_acpi_pm.read = acpi_pm_read_slow;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 4a597d8c2f70..78b989d202a3 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -582,3 +582,19 @@ int dmi_walk(void (*decode)(const struct dmi_header *))
582 return 0; 582 return 0;
583} 583}
584EXPORT_SYMBOL_GPL(dmi_walk); 584EXPORT_SYMBOL_GPL(dmi_walk);
585
586/**
587 * dmi_match - compare a string to the dmi field (if exists)
588 *
589 * Returns true if the requested field equals to the str (including NULL).
590 */
591bool dmi_match(enum dmi_field f, const char *str)
592{
593 const char *info = dmi_get_system_info(f);
594
595 if (info == NULL || str == NULL)
596 return info == str;
597
598 return !strcmp(info, str);
599}
600EXPORT_SYMBOL_GPL(dmi_match);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index a8b33c2ec8d2..5130b72d593c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -7,6 +7,8 @@
7menuconfig DRM 7menuconfig DRM
8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
9 depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
10 select I2C
11 select I2C_ALGOBIT
10 help 12 help
11 Kernel-level support for the Direct Rendering Infrastructure (DRI) 13 Kernel-level support for the Direct Rendering Infrastructure (DRI)
12 introduced in XFree86 4.0. If you say Y here, you need to select 14 introduced in XFree86 4.0. If you say Y here, you need to select
@@ -65,6 +67,10 @@ config DRM_I830
65 will load the correct one. 67 will load the correct one.
66 68
67config DRM_I915 69config DRM_I915
70 select FB_CFB_FILLRECT
71 select FB_CFB_COPYAREA
72 select FB_CFB_IMAGEBLIT
73 depends on FB
68 tristate "i915 driver" 74 tristate "i915 driver"
69 help 75 help
70 Choose this option if you have a system that has Intel 830M, 845G, 76 Choose this option if you have a system that has Intel 830M, 845G,
@@ -76,6 +82,17 @@ config DRM_I915
76 82
77endchoice 83endchoice
78 84
85config DRM_I915_KMS
86 bool "Enable modesetting on intel by default"
87 depends on DRM_I915
88 help
89 Choose this option if you want kernel modesetting enabled by default,
90 and you have a new enough userspace to support this. Running old
91 userspaces with this enabled will cause pain. Note that this causes
92 the driver to bind to PCI devices, which precludes loading things
93 like intelfb.
94
95
79config DRM_MGA 96config DRM_MGA
80 tristate "Matrox g200/g400" 97 tristate "Matrox g200/g400"
81 depends on DRM 98 depends on DRM
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 74da99495e21..30022c4a5c12 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,7 +9,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o 12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
13 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
13 14
14drm-$(CONFIG_COMPAT) += drm_ioc32.o 15drm-$(CONFIG_COMPAT) += drm_ioc32.o
15 16
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index a73462723d2d..ca7a9ef5007b 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -45,14 +45,15 @@
45 * the one with matching magic number, while holding the drm_device::struct_mutex 45 * the one with matching magic number, while holding the drm_device::struct_mutex
46 * lock. 46 * lock.
47 */ 47 */
48static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) 48static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
49{ 49{
50 struct drm_file *retval = NULL; 50 struct drm_file *retval = NULL;
51 struct drm_magic_entry *pt; 51 struct drm_magic_entry *pt;
52 struct drm_hash_item *hash; 52 struct drm_hash_item *hash;
53 struct drm_device *dev = master->minor->dev;
53 54
54 mutex_lock(&dev->struct_mutex); 55 mutex_lock(&dev->struct_mutex);
55 if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { 56 if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
56 pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); 57 pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
57 retval = pt->priv; 58 retval = pt->priv;
58 } 59 }
@@ -71,11 +72,11 @@ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic
71 * associated the magic number hash key in drm_device::magiclist, while holding 72 * associated the magic number hash key in drm_device::magiclist, while holding
72 * the drm_device::struct_mutex lock. 73 * the drm_device::struct_mutex lock.
73 */ 74 */
74static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, 75static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
75 drm_magic_t magic) 76 drm_magic_t magic)
76{ 77{
77 struct drm_magic_entry *entry; 78 struct drm_magic_entry *entry;
78 79 struct drm_device *dev = master->minor->dev;
79 DRM_DEBUG("%d\n", magic); 80 DRM_DEBUG("%d\n", magic);
80 81
81 entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); 82 entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
@@ -83,11 +84,10 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
83 return -ENOMEM; 84 return -ENOMEM;
84 memset(entry, 0, sizeof(*entry)); 85 memset(entry, 0, sizeof(*entry));
85 entry->priv = priv; 86 entry->priv = priv;
86
87 entry->hash_item.key = (unsigned long)magic; 87 entry->hash_item.key = (unsigned long)magic;
88 mutex_lock(&dev->struct_mutex); 88 mutex_lock(&dev->struct_mutex);
89 drm_ht_insert_item(&dev->magiclist, &entry->hash_item); 89 drm_ht_insert_item(&master->magiclist, &entry->hash_item);
90 list_add_tail(&entry->head, &dev->magicfree); 90 list_add_tail(&entry->head, &master->magicfree);
91 mutex_unlock(&dev->struct_mutex); 91 mutex_unlock(&dev->struct_mutex);
92 92
93 return 0; 93 return 0;
@@ -102,20 +102,21 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
102 * Searches and unlinks the entry in drm_device::magiclist with the magic 102 * Searches and unlinks the entry in drm_device::magiclist with the magic
103 * number hash key, while holding the drm_device::struct_mutex lock. 103 * number hash key, while holding the drm_device::struct_mutex lock.
104 */ 104 */
105static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) 105static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
106{ 106{
107 struct drm_magic_entry *pt; 107 struct drm_magic_entry *pt;
108 struct drm_hash_item *hash; 108 struct drm_hash_item *hash;
109 struct drm_device *dev = master->minor->dev;
109 110
110 DRM_DEBUG("%d\n", magic); 111 DRM_DEBUG("%d\n", magic);
111 112
112 mutex_lock(&dev->struct_mutex); 113 mutex_lock(&dev->struct_mutex);
113 if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { 114 if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
114 mutex_unlock(&dev->struct_mutex); 115 mutex_unlock(&dev->struct_mutex);
115 return -EINVAL; 116 return -EINVAL;
116 } 117 }
117 pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); 118 pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
118 drm_ht_remove_item(&dev->magiclist, hash); 119 drm_ht_remove_item(&master->magiclist, hash);
119 list_del(&pt->head); 120 list_del(&pt->head);
120 mutex_unlock(&dev->struct_mutex); 121 mutex_unlock(&dev->struct_mutex);
121 122
@@ -153,9 +154,9 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
153 ++sequence; /* reserve 0 */ 154 ++sequence; /* reserve 0 */
154 auth->magic = sequence++; 155 auth->magic = sequence++;
155 spin_unlock(&lock); 156 spin_unlock(&lock);
156 } while (drm_find_file(dev, auth->magic)); 157 } while (drm_find_file(file_priv->master, auth->magic));
157 file_priv->magic = auth->magic; 158 file_priv->magic = auth->magic;
158 drm_add_magic(dev, file_priv, auth->magic); 159 drm_add_magic(file_priv->master, file_priv, auth->magic);
159 } 160 }
160 161
161 DRM_DEBUG("%u\n", auth->magic); 162 DRM_DEBUG("%u\n", auth->magic);
@@ -181,9 +182,9 @@ int drm_authmagic(struct drm_device *dev, void *data,
181 struct drm_file *file; 182 struct drm_file *file;
182 183
183 DRM_DEBUG("%u\n", auth->magic); 184 DRM_DEBUG("%u\n", auth->magic);
184 if ((file = drm_find_file(dev, auth->magic))) { 185 if ((file = drm_find_file(file_priv->master, auth->magic))) {
185 file->authenticated = 1; 186 file->authenticated = 1;
186 drm_remove_magic(dev, auth->magic); 187 drm_remove_magic(file_priv->master, auth->magic);
187 return 0; 188 return 0;
188 } 189 }
189 return -EINVAL; 190 return -EINVAL;
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index bde64b84166e..72c667f9bee1 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -54,9 +54,9 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
54{ 54{
55 struct drm_map_list *entry; 55 struct drm_map_list *entry;
56 list_for_each_entry(entry, &dev->maplist, head) { 56 list_for_each_entry(entry, &dev->maplist, head) {
57 if (entry->map && map->type == entry->map->type && 57 if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) &&
58 ((entry->map->offset == map->offset) || 58 ((entry->map->offset == map->offset) ||
59 (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) { 59 ((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
60 return entry; 60 return entry;
61 } 61 }
62 } 62 }
@@ -210,12 +210,12 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
210 map->offset = (unsigned long)map->handle; 210 map->offset = (unsigned long)map->handle;
211 if (map->flags & _DRM_CONTAINS_LOCK) { 211 if (map->flags & _DRM_CONTAINS_LOCK) {
212 /* Prevent a 2nd X Server from creating a 2nd lock */ 212 /* Prevent a 2nd X Server from creating a 2nd lock */
213 if (dev->lock.hw_lock != NULL) { 213 if (dev->primary->master->lock.hw_lock != NULL) {
214 vfree(map->handle); 214 vfree(map->handle);
215 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 215 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
216 return -EBUSY; 216 return -EBUSY;
217 } 217 }
218 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ 218 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
219 } 219 }
220 break; 220 break;
221 case _DRM_AGP: { 221 case _DRM_AGP: {
@@ -262,6 +262,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
262 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); 262 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
263 263
264 break; 264 break;
265 case _DRM_GEM:
266 DRM_ERROR("tried to rmmap GEM object\n");
267 break;
265 } 268 }
266 case _DRM_SCATTER_GATHER: 269 case _DRM_SCATTER_GATHER:
267 if (!dev->sg) { 270 if (!dev->sg) {
@@ -319,6 +322,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
319 list->user_token = list->hash.key << PAGE_SHIFT; 322 list->user_token = list->hash.key << PAGE_SHIFT;
320 mutex_unlock(&dev->struct_mutex); 323 mutex_unlock(&dev->struct_mutex);
321 324
325 list->master = dev->primary->master;
322 *maplist = list; 326 *maplist = list;
323 return 0; 327 return 0;
324 } 328 }
@@ -345,7 +349,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
345 struct drm_map_list *maplist; 349 struct drm_map_list *maplist;
346 int err; 350 int err;
347 351
348 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) 352 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
349 return -EPERM; 353 return -EPERM;
350 354
351 err = drm_addmap_core(dev, map->offset, map->size, map->type, 355 err = drm_addmap_core(dev, map->offset, map->size, map->type,
@@ -380,10 +384,12 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
380 struct drm_map_list *r_list = NULL, *list_t; 384 struct drm_map_list *r_list = NULL, *list_t;
381 drm_dma_handle_t dmah; 385 drm_dma_handle_t dmah;
382 int found = 0; 386 int found = 0;
387 struct drm_master *master;
383 388
384 /* Find the list entry for the map and remove it */ 389 /* Find the list entry for the map and remove it */
385 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { 390 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
386 if (r_list->map == map) { 391 if (r_list->map == map) {
392 master = r_list->master;
387 list_del(&r_list->head); 393 list_del(&r_list->head);
388 drm_ht_remove_key(&dev->map_hash, 394 drm_ht_remove_key(&dev->map_hash,
389 r_list->user_token >> PAGE_SHIFT); 395 r_list->user_token >> PAGE_SHIFT);
@@ -409,6 +415,13 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
409 break; 415 break;
410 case _DRM_SHM: 416 case _DRM_SHM:
411 vfree(map->handle); 417 vfree(map->handle);
418 if (master) {
419 if (dev->sigdata.lock == master->lock.hw_lock)
420 dev->sigdata.lock = NULL;
421 master->lock.hw_lock = NULL; /* SHM removed */
422 master->lock.file_priv = NULL;
423 wake_up_interruptible(&master->lock.lock_queue);
424 }
412 break; 425 break;
413 case _DRM_AGP: 426 case _DRM_AGP:
414 case _DRM_SCATTER_GATHER: 427 case _DRM_SCATTER_GATHER:
@@ -419,11 +432,15 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
419 dmah.size = map->size; 432 dmah.size = map->size;
420 __drm_pci_free(dev, &dmah); 433 __drm_pci_free(dev, &dmah);
421 break; 434 break;
435 case _DRM_GEM:
436 DRM_ERROR("tried to rmmap GEM object\n");
437 break;
422 } 438 }
423 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 439 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
424 440
425 return 0; 441 return 0;
426} 442}
443EXPORT_SYMBOL(drm_rmmap_locked);
427 444
428int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) 445int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
429{ 446{
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index d505f695421f..809ec0f03452 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -256,12 +256,13 @@ static int drm_context_switch(struct drm_device * dev, int old, int new)
256 * hardware lock is held, clears the drm_device::context_flag and wakes up 256 * hardware lock is held, clears the drm_device::context_flag and wakes up
257 * drm_device::context_wait. 257 * drm_device::context_wait.
258 */ 258 */
259static int drm_context_switch_complete(struct drm_device * dev, int new) 259static int drm_context_switch_complete(struct drm_device *dev,
260 struct drm_file *file_priv, int new)
260{ 261{
261 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ 262 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
262 dev->last_switch = jiffies; 263 dev->last_switch = jiffies;
263 264
264 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { 265 if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
265 DRM_ERROR("Lock isn't held after context switch\n"); 266 DRM_ERROR("Lock isn't held after context switch\n");
266 } 267 }
267 268
@@ -420,7 +421,7 @@ int drm_newctx(struct drm_device *dev, void *data,
420 struct drm_ctx *ctx = data; 421 struct drm_ctx *ctx = data;
421 422
422 DRM_DEBUG("%d\n", ctx->handle); 423 DRM_DEBUG("%d\n", ctx->handle);
423 drm_context_switch_complete(dev, ctx->handle); 424 drm_context_switch_complete(dev, file_priv, ctx->handle);
424 425
425 return 0; 426 return 0;
426} 427}
@@ -442,9 +443,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
442 struct drm_ctx *ctx = data; 443 struct drm_ctx *ctx = data;
443 444
444 DRM_DEBUG("%d\n", ctx->handle); 445 DRM_DEBUG("%d\n", ctx->handle);
445 if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
446 file_priv->remove_auth_on_close = 1;
447 }
448 if (ctx->handle != DRM_KERNEL_CONTEXT) { 446 if (ctx->handle != DRM_KERNEL_CONTEXT) {
449 if (dev->driver->context_dtor) 447 if (dev->driver->context_dtor)
450 dev->driver->context_dtor(dev, ctx->handle); 448 dev->driver->context_dtor(dev, ctx->handle);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
new file mode 100644
index 000000000000..53c87254be4c
--- /dev/null
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -0,0 +1,2446 @@
1/*
2 * Copyright (c) 2006-2008 Intel Corporation
3 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
4 * Copyright (c) 2008 Red Hat Inc.
5 *
6 * DRM core CRTC related functions
7 *
8 * Permission to use, copy, modify, distribute, and sell this software and its
9 * documentation for any purpose is hereby granted without fee, provided that
10 * the above copyright notice appear in all copies and that both that copyright
11 * notice and this permission notice appear in supporting documentation, and
12 * that the name of the copyright holders not be used in advertising or
13 * publicity pertaining to distribution of the software without specific,
14 * written prior permission. The copyright holders make no representations
15 * about the suitability of this software for any purpose. It is provided "as
16 * is" without express or implied warranty.
17 *
18 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
19 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
20 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
21 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
22 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
23 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
24 * OF THIS SOFTWARE.
25 *
26 * Authors:
27 * Keith Packard
28 * Eric Anholt <eric@anholt.net>
29 * Dave Airlie <airlied@linux.ie>
30 * Jesse Barnes <jesse.barnes@intel.com>
31 */
32#include <linux/list.h>
33#include "drm.h"
34#include "drmP.h"
35#include "drm_crtc.h"
36
37struct drm_prop_enum_list {
38 int type;
39 char *name;
40};
41
42/* Avoid boilerplate. I'm tired of typing. */
43#define DRM_ENUM_NAME_FN(fnname, list) \
44 char *fnname(int val) \
45 { \
46 int i; \
47 for (i = 0; i < ARRAY_SIZE(list); i++) { \
48 if (list[i].type == val) \
49 return list[i].name; \
50 } \
51 return "(unknown)"; \
52 }
53
54/*
55 * Global properties
56 */
57static struct drm_prop_enum_list drm_dpms_enum_list[] =
58{ { DRM_MODE_DPMS_ON, "On" },
59 { DRM_MODE_DPMS_STANDBY, "Standby" },
60 { DRM_MODE_DPMS_SUSPEND, "Suspend" },
61 { DRM_MODE_DPMS_OFF, "Off" }
62};
63
64DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
65
66/*
67 * Optional properties
68 */
69static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
70{
71 { DRM_MODE_SCALE_NON_GPU, "Non-GPU" },
72 { DRM_MODE_SCALE_FULLSCREEN, "Fullscreen" },
73 { DRM_MODE_SCALE_NO_SCALE, "No scale" },
74 { DRM_MODE_SCALE_ASPECT, "Aspect" },
75};
76
77static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
78{
79 { DRM_MODE_DITHERING_OFF, "Off" },
80 { DRM_MODE_DITHERING_ON, "On" },
81};
82
83/*
84 * Non-global properties, but "required" for certain connectors.
85 */
86static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
87{
88 { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
89 { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
90 { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
91};
92
93DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
94
95static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
96{
97 { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
98 { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
99 { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
100};
101
102DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
103 drm_dvi_i_subconnector_enum_list)
104
105static struct drm_prop_enum_list drm_tv_select_enum_list[] =
106{
107 { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
108 { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
109 { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
110 { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
111};
112
113DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
114
115static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
116{
117 { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
118 { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
119 { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
120 { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
121};
122
123DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
124 drm_tv_subconnector_enum_list)
125
126struct drm_conn_prop_enum_list {
127 int type;
128 char *name;
129 int count;
130};
131
132/*
133 * Connector and encoder types.
134 */
135static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
136{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
137 { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
138 { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
139 { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
140 { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
141 { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
142 { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
143 { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
144 { DRM_MODE_CONNECTOR_Component, "Component", 0 },
145 { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
146 { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
147 { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
148 { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
149};
150
151static struct drm_prop_enum_list drm_encoder_enum_list[] =
152{ { DRM_MODE_ENCODER_NONE, "None" },
153 { DRM_MODE_ENCODER_DAC, "DAC" },
154 { DRM_MODE_ENCODER_TMDS, "TMDS" },
155 { DRM_MODE_ENCODER_LVDS, "LVDS" },
156 { DRM_MODE_ENCODER_TVDAC, "TV" },
157};
158
159char *drm_get_encoder_name(struct drm_encoder *encoder)
160{
161 static char buf[32];
162
163 snprintf(buf, 32, "%s-%d",
164 drm_encoder_enum_list[encoder->encoder_type].name,
165 encoder->base.id);
166 return buf;
167}
168
169char *drm_get_connector_name(struct drm_connector *connector)
170{
171 static char buf[32];
172
173 snprintf(buf, 32, "%s-%d",
174 drm_connector_enum_list[connector->connector_type].name,
175 connector->connector_type_id);
176 return buf;
177}
178EXPORT_SYMBOL(drm_get_connector_name);
179
180char *drm_get_connector_status_name(enum drm_connector_status status)
181{
182 if (status == connector_status_connected)
183 return "connected";
184 else if (status == connector_status_disconnected)
185 return "disconnected";
186 else
187 return "unknown";
188}
189
190/**
191 * drm_mode_object_get - allocate a new identifier
192 * @dev: DRM device
193 * @ptr: object pointer, used to generate unique ID
194 * @type: object type
195 *
196 * LOCKING:
197 * Caller must hold DRM mode_config lock.
198 *
199 * Create a unique identifier based on @ptr in @dev's identifier space. Used
200 * for tracking modes, CRTCs and connectors.
201 *
202 * RETURNS:
203 * New unique (relative to other objects in @dev) integer identifier for the
204 * object.
205 */
206static int drm_mode_object_get(struct drm_device *dev,
207 struct drm_mode_object *obj, uint32_t obj_type)
208{
209 int new_id = 0;
210 int ret;
211
212 WARN(!mutex_is_locked(&dev->mode_config.mutex),
213 "%s called w/o mode_config lock\n", __FUNCTION__);
214again:
215 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
216 DRM_ERROR("Ran out memory getting a mode number\n");
217 return -EINVAL;
218 }
219
220 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
221 if (ret == -EAGAIN)
222 goto again;
223
224 obj->id = new_id;
225 obj->type = obj_type;
226 return 0;
227}
228
229/**
230 * drm_mode_object_put - free an identifer
231 * @dev: DRM device
232 * @id: ID to free
233 *
234 * LOCKING:
235 * Caller must hold DRM mode_config lock.
236 *
237 * Free @id from @dev's unique identifier pool.
238 */
239static void drm_mode_object_put(struct drm_device *dev,
240 struct drm_mode_object *object)
241{
242 idr_remove(&dev->mode_config.crtc_idr, object->id);
243}
244
245void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
246{
247 struct drm_mode_object *obj;
248
249 obj = idr_find(&dev->mode_config.crtc_idr, id);
250 if (!obj || (obj->type != type) || (obj->id != id))
251 return NULL;
252
253 return obj;
254}
255EXPORT_SYMBOL(drm_mode_object_find);
256
257/**
258 * drm_crtc_from_fb - find the CRTC structure associated with an fb
259 * @dev: DRM device
260 * @fb: framebuffer in question
261 *
262 * LOCKING:
263 * Caller must hold mode_config lock.
264 *
265 * Find CRTC in the mode_config structure that matches @fb.
266 *
267 * RETURNS:
268 * Pointer to the CRTC or NULL if it wasn't found.
269 */
270struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev,
271 struct drm_framebuffer *fb)
272{
273 struct drm_crtc *crtc;
274
275 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
276 if (crtc->fb == fb)
277 return crtc;
278 }
279 return NULL;
280}
281
282/**
283 * drm_framebuffer_init - initialize a framebuffer
284 * @dev: DRM device
285 *
286 * LOCKING:
287 * Caller must hold mode config lock.
288 *
289 * Allocates an ID for the framebuffer's parent mode object, sets its mode
290 * functions & device file and adds it to the master fd list.
291 *
292 * RETURNS:
293 * Zero on success, error code on falure.
294 */
295int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
296 const struct drm_framebuffer_funcs *funcs)
297{
298 int ret;
299
300 ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
301 if (ret) {
302 return ret;
303 }
304
305 fb->dev = dev;
306 fb->funcs = funcs;
307 dev->mode_config.num_fb++;
308 list_add(&fb->head, &dev->mode_config.fb_list);
309
310 return 0;
311}
312EXPORT_SYMBOL(drm_framebuffer_init);
313
314/**
315 * drm_framebuffer_cleanup - remove a framebuffer object
316 * @fb: framebuffer to remove
317 *
318 * LOCKING:
319 * Caller must hold mode config lock.
320 *
321 * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
322 * it, setting it to NULL.
323 */
324void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
325{
326 struct drm_device *dev = fb->dev;
327 struct drm_crtc *crtc;
328
329 /* remove from any CRTC */
330 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
331 if (crtc->fb == fb)
332 crtc->fb = NULL;
333 }
334
335 drm_mode_object_put(dev, &fb->base);
336 list_del(&fb->head);
337 dev->mode_config.num_fb--;
338}
339EXPORT_SYMBOL(drm_framebuffer_cleanup);
340
341/**
342 * drm_crtc_init - Initialise a new CRTC object
343 * @dev: DRM device
344 * @crtc: CRTC object to init
345 * @funcs: callbacks for the new CRTC
346 *
347 * LOCKING:
348 * Caller must hold mode config lock.
349 *
350 * Inits a new object created as base part of an driver crtc object.
351 */
352void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
353 const struct drm_crtc_funcs *funcs)
354{
355 crtc->dev = dev;
356 crtc->funcs = funcs;
357
358 mutex_lock(&dev->mode_config.mutex);
359 drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
360
361 list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
362 dev->mode_config.num_crtc++;
363 mutex_unlock(&dev->mode_config.mutex);
364}
365EXPORT_SYMBOL(drm_crtc_init);
366
367/**
368 * drm_crtc_cleanup - Cleans up the core crtc usage.
369 * @crtc: CRTC to cleanup
370 *
371 * LOCKING:
372 * Caller must hold mode config lock.
373 *
374 * Cleanup @crtc. Removes from drm modesetting space
375 * does NOT free object, caller does that.
376 */
377void drm_crtc_cleanup(struct drm_crtc *crtc)
378{
379 struct drm_device *dev = crtc->dev;
380
381 if (crtc->gamma_store) {
382 kfree(crtc->gamma_store);
383 crtc->gamma_store = NULL;
384 }
385
386 drm_mode_object_put(dev, &crtc->base);
387 list_del(&crtc->head);
388 dev->mode_config.num_crtc--;
389}
390EXPORT_SYMBOL(drm_crtc_cleanup);
391
392/**
393 * drm_mode_probed_add - add a mode to a connector's probed mode list
394 * @connector: connector the new mode
395 * @mode: mode data
396 *
397 * LOCKING:
398 * Caller must hold mode config lock.
399 *
400 * Add @mode to @connector's mode list for later use.
401 */
402void drm_mode_probed_add(struct drm_connector *connector,
403 struct drm_display_mode *mode)
404{
405 list_add(&mode->head, &connector->probed_modes);
406}
407EXPORT_SYMBOL(drm_mode_probed_add);
408
409/**
410 * drm_mode_remove - remove and free a mode
411 * @connector: connector list to modify
412 * @mode: mode to remove
413 *
414 * LOCKING:
415 * Caller must hold mode config lock.
416 *
417 * Remove @mode from @connector's mode list, then free it.
418 */
419void drm_mode_remove(struct drm_connector *connector,
420 struct drm_display_mode *mode)
421{
422 list_del(&mode->head);
423 kfree(mode);
424}
425EXPORT_SYMBOL(drm_mode_remove);
426
427/**
428 * drm_connector_init - Init a preallocated connector
429 * @dev: DRM device
430 * @connector: the connector to init
431 * @funcs: callbacks for this connector
432 * @name: user visible name of the connector
433 *
434 * LOCKING:
435 * Caller must hold @dev's mode_config lock.
436 *
437 * Initialises a preallocated connector. Connectors should be
438 * subclassed as part of driver connector objects.
439 */
440void drm_connector_init(struct drm_device *dev,
441 struct drm_connector *connector,
442 const struct drm_connector_funcs *funcs,
443 int connector_type)
444{
445 mutex_lock(&dev->mode_config.mutex);
446
447 connector->dev = dev;
448 connector->funcs = funcs;
449 drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
450 connector->connector_type = connector_type;
451 connector->connector_type_id =
452 ++drm_connector_enum_list[connector_type].count; /* TODO */
453 INIT_LIST_HEAD(&connector->user_modes);
454 INIT_LIST_HEAD(&connector->probed_modes);
455 INIT_LIST_HEAD(&connector->modes);
456 connector->edid_blob_ptr = NULL;
457
458 list_add_tail(&connector->head, &dev->mode_config.connector_list);
459 dev->mode_config.num_connector++;
460
461 drm_connector_attach_property(connector,
462 dev->mode_config.edid_property, 0);
463
464 drm_connector_attach_property(connector,
465 dev->mode_config.dpms_property, 0);
466
467 mutex_unlock(&dev->mode_config.mutex);
468}
469EXPORT_SYMBOL(drm_connector_init);
470
471/**
472 * drm_connector_cleanup - cleans up an initialised connector
473 * @connector: connector to cleanup
474 *
475 * LOCKING:
476 * Caller must hold @dev's mode_config lock.
477 *
478 * Cleans up the connector but doesn't free the object.
479 */
480void drm_connector_cleanup(struct drm_connector *connector)
481{
482 struct drm_device *dev = connector->dev;
483 struct drm_display_mode *mode, *t;
484
485 list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
486 drm_mode_remove(connector, mode);
487
488 list_for_each_entry_safe(mode, t, &connector->modes, head)
489 drm_mode_remove(connector, mode);
490
491 list_for_each_entry_safe(mode, t, &connector->user_modes, head)
492 drm_mode_remove(connector, mode);
493
494 mutex_lock(&dev->mode_config.mutex);
495 drm_mode_object_put(dev, &connector->base);
496 list_del(&connector->head);
497 mutex_unlock(&dev->mode_config.mutex);
498}
499EXPORT_SYMBOL(drm_connector_cleanup);
500
501void drm_encoder_init(struct drm_device *dev,
502 struct drm_encoder *encoder,
503 const struct drm_encoder_funcs *funcs,
504 int encoder_type)
505{
506 mutex_lock(&dev->mode_config.mutex);
507
508 encoder->dev = dev;
509
510 drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
511 encoder->encoder_type = encoder_type;
512 encoder->funcs = funcs;
513
514 list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
515 dev->mode_config.num_encoder++;
516
517 mutex_unlock(&dev->mode_config.mutex);
518}
519EXPORT_SYMBOL(drm_encoder_init);
520
521void drm_encoder_cleanup(struct drm_encoder *encoder)
522{
523 struct drm_device *dev = encoder->dev;
524 mutex_lock(&dev->mode_config.mutex);
525 drm_mode_object_put(dev, &encoder->base);
526 list_del(&encoder->head);
527 mutex_unlock(&dev->mode_config.mutex);
528}
529EXPORT_SYMBOL(drm_encoder_cleanup);
530
531/**
532 * drm_mode_create - create a new display mode
533 * @dev: DRM device
534 *
535 * LOCKING:
536 * Caller must hold DRM mode_config lock.
537 *
538 * Create a new drm_display_mode, give it an ID, and return it.
539 *
540 * RETURNS:
541 * Pointer to new mode on success, NULL on error.
542 */
543struct drm_display_mode *drm_mode_create(struct drm_device *dev)
544{
545 struct drm_display_mode *nmode;
546
547 nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
548 if (!nmode)
549 return NULL;
550
551 drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
552 return nmode;
553}
554EXPORT_SYMBOL(drm_mode_create);
555
556/**
557 * drm_mode_destroy - remove a mode
558 * @dev: DRM device
559 * @mode: mode to remove
560 *
561 * LOCKING:
562 * Caller must hold mode config lock.
563 *
564 * Free @mode's unique identifier, then free it.
565 */
566void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
567{
568 drm_mode_object_put(dev, &mode->base);
569
570 kfree(mode);
571}
572EXPORT_SYMBOL(drm_mode_destroy);
573
574static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
575{
576 struct drm_property *edid;
577 struct drm_property *dpms;
578 int i;
579
580 /*
581 * Standard properties (apply to all connectors)
582 */
583 edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
584 DRM_MODE_PROP_IMMUTABLE,
585 "EDID", 0);
586 dev->mode_config.edid_property = edid;
587
588 dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
589 "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
590 for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
591 drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
592 drm_dpms_enum_list[i].name);
593 dev->mode_config.dpms_property = dpms;
594
595 return 0;
596}
597
598/**
599 * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
600 * @dev: DRM device
601 *
602 * Called by a driver the first time a DVI-I connector is made.
603 */
604int drm_mode_create_dvi_i_properties(struct drm_device *dev)
605{
606 struct drm_property *dvi_i_selector;
607 struct drm_property *dvi_i_subconnector;
608 int i;
609
610 if (dev->mode_config.dvi_i_select_subconnector_property)
611 return 0;
612
613 dvi_i_selector =
614 drm_property_create(dev, DRM_MODE_PROP_ENUM,
615 "select subconnector",
616 ARRAY_SIZE(drm_dvi_i_select_enum_list));
617 for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
618 drm_property_add_enum(dvi_i_selector, i,
619 drm_dvi_i_select_enum_list[i].type,
620 drm_dvi_i_select_enum_list[i].name);
621 dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
622
623 dvi_i_subconnector =
624 drm_property_create(dev, DRM_MODE_PROP_ENUM |
625 DRM_MODE_PROP_IMMUTABLE,
626 "subconnector",
627 ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
628 for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
629 drm_property_add_enum(dvi_i_subconnector, i,
630 drm_dvi_i_subconnector_enum_list[i].type,
631 drm_dvi_i_subconnector_enum_list[i].name);
632 dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
633
634 return 0;
635}
636EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
637
638/**
639 * drm_create_tv_properties - create TV specific connector properties
640 * @dev: DRM device
641 * @num_modes: number of different TV formats (modes) supported
642 * @modes: array of pointers to strings containing name of each format
643 *
644 * Called by a driver's TV initialization routine, this function creates
645 * the TV specific connector properties for a given device. Caller is
646 * responsible for allocating a list of format names and passing them to
647 * this routine.
648 */
649int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
650 char *modes[])
651{
652 struct drm_property *tv_selector;
653 struct drm_property *tv_subconnector;
654 int i;
655
656 if (dev->mode_config.tv_select_subconnector_property)
657 return 0;
658
659 /*
660 * Basic connector properties
661 */
662 tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
663 "select subconnector",
664 ARRAY_SIZE(drm_tv_select_enum_list));
665 for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
666 drm_property_add_enum(tv_selector, i,
667 drm_tv_select_enum_list[i].type,
668 drm_tv_select_enum_list[i].name);
669 dev->mode_config.tv_select_subconnector_property = tv_selector;
670
671 tv_subconnector =
672 drm_property_create(dev, DRM_MODE_PROP_ENUM |
673 DRM_MODE_PROP_IMMUTABLE, "subconnector",
674 ARRAY_SIZE(drm_tv_subconnector_enum_list));
675 for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
676 drm_property_add_enum(tv_subconnector, i,
677 drm_tv_subconnector_enum_list[i].type,
678 drm_tv_subconnector_enum_list[i].name);
679 dev->mode_config.tv_subconnector_property = tv_subconnector;
680
681 /*
682 * Other, TV specific properties: margins & TV modes.
683 */
684 dev->mode_config.tv_left_margin_property =
685 drm_property_create(dev, DRM_MODE_PROP_RANGE,
686 "left margin", 2);
687 dev->mode_config.tv_left_margin_property->values[0] = 0;
688 dev->mode_config.tv_left_margin_property->values[1] = 100;
689
690 dev->mode_config.tv_right_margin_property =
691 drm_property_create(dev, DRM_MODE_PROP_RANGE,
692 "right margin", 2);
693 dev->mode_config.tv_right_margin_property->values[0] = 0;
694 dev->mode_config.tv_right_margin_property->values[1] = 100;
695
696 dev->mode_config.tv_top_margin_property =
697 drm_property_create(dev, DRM_MODE_PROP_RANGE,
698 "top margin", 2);
699 dev->mode_config.tv_top_margin_property->values[0] = 0;
700 dev->mode_config.tv_top_margin_property->values[1] = 100;
701
702 dev->mode_config.tv_bottom_margin_property =
703 drm_property_create(dev, DRM_MODE_PROP_RANGE,
704 "bottom margin", 2);
705 dev->mode_config.tv_bottom_margin_property->values[0] = 0;
706 dev->mode_config.tv_bottom_margin_property->values[1] = 100;
707
708 dev->mode_config.tv_mode_property =
709 drm_property_create(dev, DRM_MODE_PROP_ENUM,
710 "mode", num_modes);
711 for (i = 0; i < num_modes; i++)
712 drm_property_add_enum(dev->mode_config.tv_mode_property, i,
713 i, modes[i]);
714
715 return 0;
716}
717EXPORT_SYMBOL(drm_mode_create_tv_properties);
718
719/**
720 * drm_mode_create_scaling_mode_property - create scaling mode property
721 * @dev: DRM device
722 *
723 * Called by a driver the first time it's needed, must be attached to desired
724 * connectors.
725 */
726int drm_mode_create_scaling_mode_property(struct drm_device *dev)
727{
728 struct drm_property *scaling_mode;
729 int i;
730
731 if (dev->mode_config.scaling_mode_property)
732 return 0;
733
734 scaling_mode =
735 drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
736 ARRAY_SIZE(drm_scaling_mode_enum_list));
737 for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
738 drm_property_add_enum(scaling_mode, i,
739 drm_scaling_mode_enum_list[i].type,
740 drm_scaling_mode_enum_list[i].name);
741
742 dev->mode_config.scaling_mode_property = scaling_mode;
743
744 return 0;
745}
746EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
747
748/**
749 * drm_mode_create_dithering_property - create dithering property
750 * @dev: DRM device
751 *
752 * Called by a driver the first time it's needed, must be attached to desired
753 * connectors.
754 */
755int drm_mode_create_dithering_property(struct drm_device *dev)
756{
757 struct drm_property *dithering_mode;
758 int i;
759
760 if (dev->mode_config.dithering_mode_property)
761 return 0;
762
763 dithering_mode =
764 drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
765 ARRAY_SIZE(drm_dithering_mode_enum_list));
766 for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
767 drm_property_add_enum(dithering_mode, i,
768 drm_dithering_mode_enum_list[i].type,
769 drm_dithering_mode_enum_list[i].name);
770 dev->mode_config.dithering_mode_property = dithering_mode;
771
772 return 0;
773}
774EXPORT_SYMBOL(drm_mode_create_dithering_property);
775
776/**
777 * drm_mode_config_init - initialize DRM mode_configuration structure
778 * @dev: DRM device
779 *
780 * LOCKING:
781 * None, should happen single threaded at init time.
782 *
783 * Initialize @dev's mode_config structure, used for tracking the graphics
784 * configuration of @dev.
785 */
786void drm_mode_config_init(struct drm_device *dev)
787{
788 mutex_init(&dev->mode_config.mutex);
789 INIT_LIST_HEAD(&dev->mode_config.fb_list);
790 INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
791 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
792 INIT_LIST_HEAD(&dev->mode_config.connector_list);
793 INIT_LIST_HEAD(&dev->mode_config.encoder_list);
794 INIT_LIST_HEAD(&dev->mode_config.property_list);
795 INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
796 idr_init(&dev->mode_config.crtc_idr);
797
798 mutex_lock(&dev->mode_config.mutex);
799 drm_mode_create_standard_connector_properties(dev);
800 mutex_unlock(&dev->mode_config.mutex);
801
802 /* Just to be sure */
803 dev->mode_config.num_fb = 0;
804 dev->mode_config.num_connector = 0;
805 dev->mode_config.num_crtc = 0;
806 dev->mode_config.num_encoder = 0;
807}
808EXPORT_SYMBOL(drm_mode_config_init);
809
810int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
811{
812 uint32_t total_objects = 0;
813
814 total_objects += dev->mode_config.num_crtc;
815 total_objects += dev->mode_config.num_connector;
816 total_objects += dev->mode_config.num_encoder;
817
818 if (total_objects == 0)
819 return -EINVAL;
820
821 group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
822 if (!group->id_list)
823 return -ENOMEM;
824
825 group->num_crtcs = 0;
826 group->num_connectors = 0;
827 group->num_encoders = 0;
828 return 0;
829}
830
831int drm_mode_group_init_legacy_group(struct drm_device *dev,
832 struct drm_mode_group *group)
833{
834 struct drm_crtc *crtc;
835 struct drm_encoder *encoder;
836 struct drm_connector *connector;
837 int ret;
838
839 if ((ret = drm_mode_group_init(dev, group)))
840 return ret;
841
842 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
843 group->id_list[group->num_crtcs++] = crtc->base.id;
844
845 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
846 group->id_list[group->num_crtcs + group->num_encoders++] =
847 encoder->base.id;
848
849 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
850 group->id_list[group->num_crtcs + group->num_encoders +
851 group->num_connectors++] = connector->base.id;
852
853 return 0;
854}
855
856/**
857 * drm_mode_config_cleanup - free up DRM mode_config info
858 * @dev: DRM device
859 *
860 * LOCKING:
861 * Caller must hold mode config lock.
862 *
863 * Free up all the connectors and CRTCs associated with this DRM device, then
864 * free up the framebuffers and associated buffer objects.
865 *
866 * FIXME: cleanup any dangling user buffer objects too
867 */
868void drm_mode_config_cleanup(struct drm_device *dev)
869{
870 struct drm_connector *connector, *ot;
871 struct drm_crtc *crtc, *ct;
872 struct drm_encoder *encoder, *enct;
873 struct drm_framebuffer *fb, *fbt;
874 struct drm_property *property, *pt;
875
876 list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
877 head) {
878 encoder->funcs->destroy(encoder);
879 }
880
881 list_for_each_entry_safe(connector, ot,
882 &dev->mode_config.connector_list, head) {
883 connector->funcs->destroy(connector);
884 }
885
886 list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
887 head) {
888 drm_property_destroy(dev, property);
889 }
890
891 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
892 fb->funcs->destroy(fb);
893 }
894
895 list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
896 crtc->funcs->destroy(crtc);
897 }
898
899}
900EXPORT_SYMBOL(drm_mode_config_cleanup);
901
902/**
903 * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
904 * @out: drm_mode_modeinfo struct to return to the user
905 * @in: drm_display_mode to use
906 *
907 * LOCKING:
908 * None.
909 *
910 * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
911 * the user.
912 */
913void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
914 struct drm_display_mode *in)
915{
916 out->clock = in->clock;
917 out->hdisplay = in->hdisplay;
918 out->hsync_start = in->hsync_start;
919 out->hsync_end = in->hsync_end;
920 out->htotal = in->htotal;
921 out->hskew = in->hskew;
922 out->vdisplay = in->vdisplay;
923 out->vsync_start = in->vsync_start;
924 out->vsync_end = in->vsync_end;
925 out->vtotal = in->vtotal;
926 out->vscan = in->vscan;
927 out->vrefresh = in->vrefresh;
928 out->flags = in->flags;
929 out->type = in->type;
930 strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
931 out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
932}
933
934/**
935 * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
936 * @out: drm_display_mode to return to the user
937 * @in: drm_mode_modeinfo to use
938 *
939 * LOCKING:
940 * None.
941 *
942 * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
943 * the caller.
944 */
945void drm_crtc_convert_umode(struct drm_display_mode *out,
946 struct drm_mode_modeinfo *in)
947{
948 out->clock = in->clock;
949 out->hdisplay = in->hdisplay;
950 out->hsync_start = in->hsync_start;
951 out->hsync_end = in->hsync_end;
952 out->htotal = in->htotal;
953 out->hskew = in->hskew;
954 out->vdisplay = in->vdisplay;
955 out->vsync_start = in->vsync_start;
956 out->vsync_end = in->vsync_end;
957 out->vtotal = in->vtotal;
958 out->vscan = in->vscan;
959 out->vrefresh = in->vrefresh;
960 out->flags = in->flags;
961 out->type = in->type;
962 strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
963 out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
964}
965
966/**
967 * drm_mode_getresources - get graphics configuration
968 * @inode: inode from the ioctl
969 * @filp: file * from the ioctl
970 * @cmd: cmd from ioctl
971 * @arg: arg from ioctl
972 *
973 * LOCKING:
974 * Takes mode config lock.
975 *
976 * Construct a set of configuration description structures and return
977 * them to the user, including CRTC, connector and framebuffer configuration.
978 *
979 * Called by the user via ioctl.
980 *
981 * RETURNS:
982 * Zero on success, errno on failure.
983 */
984int drm_mode_getresources(struct drm_device *dev, void *data,
985 struct drm_file *file_priv)
986{
987 struct drm_mode_card_res *card_res = data;
988 struct list_head *lh;
989 struct drm_framebuffer *fb;
990 struct drm_connector *connector;
991 struct drm_crtc *crtc;
992 struct drm_encoder *encoder;
993 int ret = 0;
994 int connector_count = 0;
995 int crtc_count = 0;
996 int fb_count = 0;
997 int encoder_count = 0;
998 int copied = 0, i;
999 uint32_t __user *fb_id;
1000 uint32_t __user *crtc_id;
1001 uint32_t __user *connector_id;
1002 uint32_t __user *encoder_id;
1003 struct drm_mode_group *mode_group;
1004
1005 mutex_lock(&dev->mode_config.mutex);
1006
1007 /*
1008 * For the non-control nodes we need to limit the list of resources
1009 * by IDs in the group list for this node
1010 */
1011 list_for_each(lh, &file_priv->fbs)
1012 fb_count++;
1013
1014 mode_group = &file_priv->master->minor->mode_group;
1015 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
1016
1017 list_for_each(lh, &dev->mode_config.crtc_list)
1018 crtc_count++;
1019
1020 list_for_each(lh, &dev->mode_config.connector_list)
1021 connector_count++;
1022
1023 list_for_each(lh, &dev->mode_config.encoder_list)
1024 encoder_count++;
1025 } else {
1026
1027 crtc_count = mode_group->num_crtcs;
1028 connector_count = mode_group->num_connectors;
1029 encoder_count = mode_group->num_encoders;
1030 }
1031
1032 card_res->max_height = dev->mode_config.max_height;
1033 card_res->min_height = dev->mode_config.min_height;
1034 card_res->max_width = dev->mode_config.max_width;
1035 card_res->min_width = dev->mode_config.min_width;
1036
1037 /* handle this in 4 parts */
1038 /* FBs */
1039 if (card_res->count_fbs >= fb_count) {
1040 copied = 0;
1041 fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
1042 list_for_each_entry(fb, &file_priv->fbs, head) {
1043 if (put_user(fb->base.id, fb_id + copied)) {
1044 ret = -EFAULT;
1045 goto out;
1046 }
1047 copied++;
1048 }
1049 }
1050 card_res->count_fbs = fb_count;
1051
1052 /* CRTCs */
1053 if (card_res->count_crtcs >= crtc_count) {
1054 copied = 0;
1055 crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
1056 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
1057 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
1058 head) {
1059 DRM_DEBUG("CRTC ID is %d\n", crtc->base.id);
1060 if (put_user(crtc->base.id, crtc_id + copied)) {
1061 ret = -EFAULT;
1062 goto out;
1063 }
1064 copied++;
1065 }
1066 } else {
1067 for (i = 0; i < mode_group->num_crtcs; i++) {
1068 if (put_user(mode_group->id_list[i],
1069 crtc_id + copied)) {
1070 ret = -EFAULT;
1071 goto out;
1072 }
1073 copied++;
1074 }
1075 }
1076 }
1077 card_res->count_crtcs = crtc_count;
1078
1079 /* Encoders */
1080 if (card_res->count_encoders >= encoder_count) {
1081 copied = 0;
1082 encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
1083 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
1084 list_for_each_entry(encoder,
1085 &dev->mode_config.encoder_list,
1086 head) {
1087 DRM_DEBUG("ENCODER ID is %d\n",
1088 encoder->base.id);
1089 if (put_user(encoder->base.id, encoder_id +
1090 copied)) {
1091 ret = -EFAULT;
1092 goto out;
1093 }
1094 copied++;
1095 }
1096 } else {
1097 for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
1098 if (put_user(mode_group->id_list[i],
1099 encoder_id + copied)) {
1100 ret = -EFAULT;
1101 goto out;
1102 }
1103 copied++;
1104 }
1105
1106 }
1107 }
1108 card_res->count_encoders = encoder_count;
1109
1110 /* Connectors */
1111 if (card_res->count_connectors >= connector_count) {
1112 copied = 0;
1113 connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
1114 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
1115 list_for_each_entry(connector,
1116 &dev->mode_config.connector_list,
1117 head) {
1118 DRM_DEBUG("CONNECTOR ID is %d\n",
1119 connector->base.id);
1120 if (put_user(connector->base.id,
1121 connector_id + copied)) {
1122 ret = -EFAULT;
1123 goto out;
1124 }
1125 copied++;
1126 }
1127 } else {
1128 int start = mode_group->num_crtcs +
1129 mode_group->num_encoders;
1130 for (i = start; i < start + mode_group->num_connectors; i++) {
1131 if (put_user(mode_group->id_list[i],
1132 connector_id + copied)) {
1133 ret = -EFAULT;
1134 goto out;
1135 }
1136 copied++;
1137 }
1138 }
1139 }
1140 card_res->count_connectors = connector_count;
1141
1142 DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs,
1143 card_res->count_connectors, card_res->count_encoders);
1144
1145out:
1146 mutex_unlock(&dev->mode_config.mutex);
1147 return ret;
1148}
1149
1150/**
1151 * drm_mode_getcrtc - get CRTC configuration
1152 * @inode: inode from the ioctl
1153 * @filp: file * from the ioctl
1154 * @cmd: cmd from ioctl
1155 * @arg: arg from ioctl
1156 *
1157 * LOCKING:
1158 * Caller? (FIXME)
1159 *
1160 * Construct a CRTC configuration structure to return to the user.
1161 *
1162 * Called by the user via ioctl.
1163 *
1164 * RETURNS:
1165 * Zero on success, errno on failure.
1166 */
1167int drm_mode_getcrtc(struct drm_device *dev,
1168 void *data, struct drm_file *file_priv)
1169{
1170 struct drm_mode_crtc *crtc_resp = data;
1171 struct drm_crtc *crtc;
1172 struct drm_mode_object *obj;
1173 int ret = 0;
1174
1175 mutex_lock(&dev->mode_config.mutex);
1176
1177 obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
1178 DRM_MODE_OBJECT_CRTC);
1179 if (!obj) {
1180 ret = -EINVAL;
1181 goto out;
1182 }
1183 crtc = obj_to_crtc(obj);
1184
1185 crtc_resp->x = crtc->x;
1186 crtc_resp->y = crtc->y;
1187 crtc_resp->gamma_size = crtc->gamma_size;
1188 if (crtc->fb)
1189 crtc_resp->fb_id = crtc->fb->base.id;
1190 else
1191 crtc_resp->fb_id = 0;
1192
1193 if (crtc->enabled) {
1194
1195 drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
1196 crtc_resp->mode_valid = 1;
1197
1198 } else {
1199 crtc_resp->mode_valid = 0;
1200 }
1201
1202out:
1203 mutex_unlock(&dev->mode_config.mutex);
1204 return ret;
1205}
1206
1207/**
1208 * drm_mode_getconnector - get connector configuration
1209 * @inode: inode from the ioctl
1210 * @filp: file * from the ioctl
1211 * @cmd: cmd from ioctl
1212 * @arg: arg from ioctl
1213 *
1214 * LOCKING:
1215 * Caller? (FIXME)
1216 *
1217 * Construct a connector configuration structure to return to the user.
1218 *
1219 * Called by the user via ioctl.
1220 *
1221 * RETURNS:
1222 * Zero on success, errno on failure.
1223 */
1224int drm_mode_getconnector(struct drm_device *dev, void *data,
1225 struct drm_file *file_priv)
1226{
1227 struct drm_mode_get_connector *out_resp = data;
1228 struct drm_mode_object *obj;
1229 struct drm_connector *connector;
1230 struct drm_display_mode *mode;
1231 int mode_count = 0;
1232 int props_count = 0;
1233 int encoders_count = 0;
1234 int ret = 0;
1235 int copied = 0;
1236 int i;
1237 struct drm_mode_modeinfo u_mode;
1238 struct drm_mode_modeinfo __user *mode_ptr;
1239 uint32_t __user *prop_ptr;
1240 uint64_t __user *prop_values;
1241 uint32_t __user *encoder_ptr;
1242
1243 memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
1244
1245 DRM_DEBUG("connector id %d:\n", out_resp->connector_id);
1246
1247 mutex_lock(&dev->mode_config.mutex);
1248
1249 obj = drm_mode_object_find(dev, out_resp->connector_id,
1250 DRM_MODE_OBJECT_CONNECTOR);
1251 if (!obj) {
1252 ret = -EINVAL;
1253 goto out;
1254 }
1255 connector = obj_to_connector(obj);
1256
1257 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
1258 if (connector->property_ids[i] != 0) {
1259 props_count++;
1260 }
1261 }
1262
1263 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1264 if (connector->encoder_ids[i] != 0) {
1265 encoders_count++;
1266 }
1267 }
1268
1269 if (out_resp->count_modes == 0) {
1270 connector->funcs->fill_modes(connector,
1271 dev->mode_config.max_width,
1272 dev->mode_config.max_height);
1273 }
1274
1275 /* delayed so we get modes regardless of pre-fill_modes state */
1276 list_for_each_entry(mode, &connector->modes, head)
1277 mode_count++;
1278
1279 out_resp->connector_id = connector->base.id;
1280 out_resp->connector_type = connector->connector_type;
1281 out_resp->connector_type_id = connector->connector_type_id;
1282 out_resp->mm_width = connector->display_info.width_mm;
1283 out_resp->mm_height = connector->display_info.height_mm;
1284 out_resp->subpixel = connector->display_info.subpixel_order;
1285 out_resp->connection = connector->status;
1286 if (connector->encoder)
1287 out_resp->encoder_id = connector->encoder->base.id;
1288 else
1289 out_resp->encoder_id = 0;
1290
1291 /*
1292 * This ioctl is called twice, once to determine how much space is
1293 * needed, and the 2nd time to fill it.
1294 */
1295 if ((out_resp->count_modes >= mode_count) && mode_count) {
1296 copied = 0;
1297 mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
1298 list_for_each_entry(mode, &connector->modes, head) {
1299 drm_crtc_convert_to_umode(&u_mode, mode);
1300 if (copy_to_user(mode_ptr + copied,
1301 &u_mode, sizeof(u_mode))) {
1302 ret = -EFAULT;
1303 goto out;
1304 }
1305 copied++;
1306 }
1307 }
1308 out_resp->count_modes = mode_count;
1309
1310 if ((out_resp->count_props >= props_count) && props_count) {
1311 copied = 0;
1312 prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
1313 prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
1314 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
1315 if (connector->property_ids[i] != 0) {
1316 if (put_user(connector->property_ids[i],
1317 prop_ptr + copied)) {
1318 ret = -EFAULT;
1319 goto out;
1320 }
1321
1322 if (put_user(connector->property_values[i],
1323 prop_values + copied)) {
1324 ret = -EFAULT;
1325 goto out;
1326 }
1327 copied++;
1328 }
1329 }
1330 }
1331 out_resp->count_props = props_count;
1332
1333 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
1334 copied = 0;
1335 encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
1336 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1337 if (connector->encoder_ids[i] != 0) {
1338 if (put_user(connector->encoder_ids[i],
1339 encoder_ptr + copied)) {
1340 ret = -EFAULT;
1341 goto out;
1342 }
1343 copied++;
1344 }
1345 }
1346 }
1347 out_resp->count_encoders = encoders_count;
1348
1349out:
1350 mutex_unlock(&dev->mode_config.mutex);
1351 return ret;
1352}
1353
1354int drm_mode_getencoder(struct drm_device *dev, void *data,
1355 struct drm_file *file_priv)
1356{
1357 struct drm_mode_get_encoder *enc_resp = data;
1358 struct drm_mode_object *obj;
1359 struct drm_encoder *encoder;
1360 int ret = 0;
1361
1362 mutex_lock(&dev->mode_config.mutex);
1363 obj = drm_mode_object_find(dev, enc_resp->encoder_id,
1364 DRM_MODE_OBJECT_ENCODER);
1365 if (!obj) {
1366 ret = -EINVAL;
1367 goto out;
1368 }
1369 encoder = obj_to_encoder(obj);
1370
1371 if (encoder->crtc)
1372 enc_resp->crtc_id = encoder->crtc->base.id;
1373 else
1374 enc_resp->crtc_id = 0;
1375 enc_resp->encoder_type = encoder->encoder_type;
1376 enc_resp->encoder_id = encoder->base.id;
1377 enc_resp->possible_crtcs = encoder->possible_crtcs;
1378 enc_resp->possible_clones = encoder->possible_clones;
1379
1380out:
1381 mutex_unlock(&dev->mode_config.mutex);
1382 return ret;
1383}
1384
1385/**
1386 * drm_mode_setcrtc - set CRTC configuration
1387 * @inode: inode from the ioctl
1388 * @filp: file * from the ioctl
1389 * @cmd: cmd from ioctl
1390 * @arg: arg from ioctl
1391 *
1392 * LOCKING:
1393 * Caller? (FIXME)
1394 *
1395 * Build a new CRTC configuration based on user request.
1396 *
1397 * Called by the user via ioctl.
1398 *
1399 * RETURNS:
1400 * Zero on success, errno on failure.
1401 */
1402int drm_mode_setcrtc(struct drm_device *dev, void *data,
1403 struct drm_file *file_priv)
1404{
1405 struct drm_mode_config *config = &dev->mode_config;
1406 struct drm_mode_crtc *crtc_req = data;
1407 struct drm_mode_object *obj;
1408 struct drm_crtc *crtc, *crtcfb;
1409 struct drm_connector **connector_set = NULL, *connector;
1410 struct drm_framebuffer *fb = NULL;
1411 struct drm_display_mode *mode = NULL;
1412 struct drm_mode_set set;
1413 uint32_t __user *set_connectors_ptr;
1414 int ret = 0;
1415 int i;
1416
1417 mutex_lock(&dev->mode_config.mutex);
1418 obj = drm_mode_object_find(dev, crtc_req->crtc_id,
1419 DRM_MODE_OBJECT_CRTC);
1420 if (!obj) {
1421 DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id);
1422 ret = -EINVAL;
1423 goto out;
1424 }
1425 crtc = obj_to_crtc(obj);
1426
1427 if (crtc_req->mode_valid) {
1428 /* If we have a mode we need a framebuffer. */
1429 /* If we pass -1, set the mode with the currently bound fb */
1430 if (crtc_req->fb_id == -1) {
1431 list_for_each_entry(crtcfb,
1432 &dev->mode_config.crtc_list, head) {
1433 if (crtcfb == crtc) {
1434 DRM_DEBUG("Using current fb for setmode\n");
1435 fb = crtc->fb;
1436 }
1437 }
1438 } else {
1439 obj = drm_mode_object_find(dev, crtc_req->fb_id,
1440 DRM_MODE_OBJECT_FB);
1441 if (!obj) {
1442 DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id);
1443 ret = -EINVAL;
1444 goto out;
1445 }
1446 fb = obj_to_fb(obj);
1447 }
1448
1449 mode = drm_mode_create(dev);
1450 drm_crtc_convert_umode(mode, &crtc_req->mode);
1451 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1452 }
1453
1454 if (crtc_req->count_connectors == 0 && mode) {
1455 DRM_DEBUG("Count connectors is 0 but mode set\n");
1456 ret = -EINVAL;
1457 goto out;
1458 }
1459
1460 if (crtc_req->count_connectors > 0 && !mode && !fb) {
1461 DRM_DEBUG("Count connectors is %d but no mode or fb set\n",
1462 crtc_req->count_connectors);
1463 ret = -EINVAL;
1464 goto out;
1465 }
1466
1467 if (crtc_req->count_connectors > 0) {
1468 u32 out_id;
1469
1470 /* Avoid unbounded kernel memory allocation */
1471 if (crtc_req->count_connectors > config->num_connector) {
1472 ret = -EINVAL;
1473 goto out;
1474 }
1475
1476 connector_set = kmalloc(crtc_req->count_connectors *
1477 sizeof(struct drm_connector *),
1478 GFP_KERNEL);
1479 if (!connector_set) {
1480 ret = -ENOMEM;
1481 goto out;
1482 }
1483
1484 for (i = 0; i < crtc_req->count_connectors; i++) {
1485 set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
1486 if (get_user(out_id, &set_connectors_ptr[i])) {
1487 ret = -EFAULT;
1488 goto out;
1489 }
1490
1491 obj = drm_mode_object_find(dev, out_id,
1492 DRM_MODE_OBJECT_CONNECTOR);
1493 if (!obj) {
1494 DRM_DEBUG("Connector id %d unknown\n", out_id);
1495 ret = -EINVAL;
1496 goto out;
1497 }
1498 connector = obj_to_connector(obj);
1499
1500 connector_set[i] = connector;
1501 }
1502 }
1503
1504 set.crtc = crtc;
1505 set.x = crtc_req->x;
1506 set.y = crtc_req->y;
1507 set.mode = mode;
1508 set.connectors = connector_set;
1509 set.num_connectors = crtc_req->count_connectors;
1510 set.fb =fb;
1511 ret = crtc->funcs->set_config(&set);
1512
1513out:
1514 kfree(connector_set);
1515 mutex_unlock(&dev->mode_config.mutex);
1516 return ret;
1517}
1518
1519int drm_mode_cursor_ioctl(struct drm_device *dev,
1520 void *data, struct drm_file *file_priv)
1521{
1522 struct drm_mode_cursor *req = data;
1523 struct drm_mode_object *obj;
1524 struct drm_crtc *crtc;
1525 int ret = 0;
1526
1527 DRM_DEBUG("\n");
1528
1529 if (!req->flags) {
1530 DRM_ERROR("no operation set\n");
1531 return -EINVAL;
1532 }
1533
1534 mutex_lock(&dev->mode_config.mutex);
1535 obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
1536 if (!obj) {
1537 DRM_DEBUG("Unknown CRTC ID %d\n", req->crtc_id);
1538 ret = -EINVAL;
1539 goto out;
1540 }
1541 crtc = obj_to_crtc(obj);
1542
1543 if (req->flags & DRM_MODE_CURSOR_BO) {
1544 if (!crtc->funcs->cursor_set) {
1545 DRM_ERROR("crtc does not support cursor\n");
1546 ret = -ENXIO;
1547 goto out;
1548 }
1549 /* Turns off the cursor if handle is 0 */
1550 ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
1551 req->width, req->height);
1552 }
1553
1554 if (req->flags & DRM_MODE_CURSOR_MOVE) {
1555 if (crtc->funcs->cursor_move) {
1556 ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
1557 } else {
1558 DRM_ERROR("crtc does not support cursor\n");
1559 ret = -EFAULT;
1560 goto out;
1561 }
1562 }
1563out:
1564 mutex_unlock(&dev->mode_config.mutex);
1565 return ret;
1566}
1567
1568/**
1569 * drm_mode_addfb - add an FB to the graphics configuration
1570 * @inode: inode from the ioctl
1571 * @filp: file * from the ioctl
1572 * @cmd: cmd from ioctl
1573 * @arg: arg from ioctl
1574 *
1575 * LOCKING:
1576 * Takes mode config lock.
1577 *
1578 * Add a new FB to the specified CRTC, given a user request.
1579 *
1580 * Called by the user via ioctl.
1581 *
1582 * RETURNS:
1583 * Zero on success, errno on failure.
1584 */
1585int drm_mode_addfb(struct drm_device *dev,
1586 void *data, struct drm_file *file_priv)
1587{
1588 struct drm_mode_fb_cmd *r = data;
1589 struct drm_mode_config *config = &dev->mode_config;
1590 struct drm_framebuffer *fb;
1591 int ret = 0;
1592
1593 if ((config->min_width > r->width) || (r->width > config->max_width)) {
1594 DRM_ERROR("mode new framebuffer width not within limits\n");
1595 return -EINVAL;
1596 }
1597 if ((config->min_height > r->height) || (r->height > config->max_height)) {
1598 DRM_ERROR("mode new framebuffer height not within limits\n");
1599 return -EINVAL;
1600 }
1601
1602 mutex_lock(&dev->mode_config.mutex);
1603
1604 /* TODO check buffer is sufficently large */
1605 /* TODO setup destructor callback */
1606
1607 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
1608 if (!fb) {
1609 DRM_ERROR("could not create framebuffer\n");
1610 ret = -EINVAL;
1611 goto out;
1612 }
1613
1614 r->fb_id = fb->base.id;
1615 list_add(&fb->filp_head, &file_priv->fbs);
1616
1617out:
1618 mutex_unlock(&dev->mode_config.mutex);
1619 return ret;
1620}
1621
1622/**
1623 * drm_mode_rmfb - remove an FB from the configuration
1624 * @inode: inode from the ioctl
1625 * @filp: file * from the ioctl
1626 * @cmd: cmd from ioctl
1627 * @arg: arg from ioctl
1628 *
1629 * LOCKING:
1630 * Takes mode config lock.
1631 *
1632 * Remove the FB specified by the user.
1633 *
1634 * Called by the user via ioctl.
1635 *
1636 * RETURNS:
1637 * Zero on success, errno on failure.
1638 */
1639int drm_mode_rmfb(struct drm_device *dev,
1640 void *data, struct drm_file *file_priv)
1641{
1642 struct drm_mode_object *obj;
1643 struct drm_framebuffer *fb = NULL;
1644 struct drm_framebuffer *fbl = NULL;
1645 uint32_t *id = data;
1646 int ret = 0;
1647 int found = 0;
1648
1649 mutex_lock(&dev->mode_config.mutex);
1650 obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
1651 /* TODO check that we realy get a framebuffer back. */
1652 if (!obj) {
1653 DRM_ERROR("mode invalid framebuffer id\n");
1654 ret = -EINVAL;
1655 goto out;
1656 }
1657 fb = obj_to_fb(obj);
1658
1659 list_for_each_entry(fbl, &file_priv->fbs, filp_head)
1660 if (fb == fbl)
1661 found = 1;
1662
1663 if (!found) {
1664 DRM_ERROR("tried to remove a fb that we didn't own\n");
1665 ret = -EINVAL;
1666 goto out;
1667 }
1668
1669 /* TODO release all crtc connected to the framebuffer */
1670 /* TODO unhock the destructor from the buffer object */
1671
1672 list_del(&fb->filp_head);
1673 fb->funcs->destroy(fb);
1674
1675out:
1676 mutex_unlock(&dev->mode_config.mutex);
1677 return ret;
1678}
1679
1680/**
1681 * drm_mode_getfb - get FB info
1682 * @inode: inode from the ioctl
1683 * @filp: file * from the ioctl
1684 * @cmd: cmd from ioctl
1685 * @arg: arg from ioctl
1686 *
1687 * LOCKING:
1688 * Caller? (FIXME)
1689 *
1690 * Lookup the FB given its ID and return info about it.
1691 *
1692 * Called by the user via ioctl.
1693 *
1694 * RETURNS:
1695 * Zero on success, errno on failure.
1696 */
1697int drm_mode_getfb(struct drm_device *dev,
1698 void *data, struct drm_file *file_priv)
1699{
1700 struct drm_mode_fb_cmd *r = data;
1701 struct drm_mode_object *obj;
1702 struct drm_framebuffer *fb;
1703 int ret = 0;
1704
1705 mutex_lock(&dev->mode_config.mutex);
1706 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
1707 if (!obj) {
1708 DRM_ERROR("invalid framebuffer id\n");
1709 ret = -EINVAL;
1710 goto out;
1711 }
1712 fb = obj_to_fb(obj);
1713
1714 r->height = fb->height;
1715 r->width = fb->width;
1716 r->depth = fb->depth;
1717 r->bpp = fb->bits_per_pixel;
1718 r->pitch = fb->pitch;
1719 fb->funcs->create_handle(fb, file_priv, &r->handle);
1720
1721out:
1722 mutex_unlock(&dev->mode_config.mutex);
1723 return ret;
1724}
1725
1726/**
1727 * drm_fb_release - remove and free the FBs on this file
1728 * @filp: file * from the ioctl
1729 *
1730 * LOCKING:
1731 * Takes mode config lock.
1732 *
1733 * Destroy all the FBs associated with @filp.
1734 *
1735 * Called by the user via ioctl.
1736 *
1737 * RETURNS:
1738 * Zero on success, errno on failure.
1739 */
1740void drm_fb_release(struct file *filp)
1741{
1742 struct drm_file *priv = filp->private_data;
1743 struct drm_device *dev = priv->minor->dev;
1744 struct drm_framebuffer *fb, *tfb;
1745
1746 mutex_lock(&dev->mode_config.mutex);
1747 list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
1748 list_del(&fb->filp_head);
1749 fb->funcs->destroy(fb);
1750 }
1751 mutex_unlock(&dev->mode_config.mutex);
1752}
1753
1754/**
1755 * drm_mode_attachmode - add a mode to the user mode list
1756 * @dev: DRM device
1757 * @connector: connector to add the mode to
1758 * @mode: mode to add
1759 *
1760 * Add @mode to @connector's user mode list.
1761 */
1762static int drm_mode_attachmode(struct drm_device *dev,
1763 struct drm_connector *connector,
1764 struct drm_display_mode *mode)
1765{
1766 int ret = 0;
1767
1768 list_add_tail(&mode->head, &connector->user_modes);
1769 return ret;
1770}
1771
1772int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
1773 struct drm_display_mode *mode)
1774{
1775 struct drm_connector *connector;
1776 int ret = 0;
1777 struct drm_display_mode *dup_mode;
1778 int need_dup = 0;
1779 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1780 if (!connector->encoder)
1781 break;
1782 if (connector->encoder->crtc == crtc) {
1783 if (need_dup)
1784 dup_mode = drm_mode_duplicate(dev, mode);
1785 else
1786 dup_mode = mode;
1787 ret = drm_mode_attachmode(dev, connector, dup_mode);
1788 if (ret)
1789 return ret;
1790 need_dup = 1;
1791 }
1792 }
1793 return 0;
1794}
1795EXPORT_SYMBOL(drm_mode_attachmode_crtc);
1796
1797static int drm_mode_detachmode(struct drm_device *dev,
1798 struct drm_connector *connector,
1799 struct drm_display_mode *mode)
1800{
1801 int found = 0;
1802 int ret = 0;
1803 struct drm_display_mode *match_mode, *t;
1804
1805 list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
1806 if (drm_mode_equal(match_mode, mode)) {
1807 list_del(&match_mode->head);
1808 drm_mode_destroy(dev, match_mode);
1809 found = 1;
1810 break;
1811 }
1812 }
1813
1814 if (!found)
1815 ret = -EINVAL;
1816
1817 return ret;
1818}
1819
1820int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
1821{
1822 struct drm_connector *connector;
1823
1824 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1825 drm_mode_detachmode(dev, connector, mode);
1826 }
1827 return 0;
1828}
1829EXPORT_SYMBOL(drm_mode_detachmode_crtc);
1830
1831/**
1832 * drm_fb_attachmode - Attach a user mode to an connector
1833 * @inode: inode from the ioctl
1834 * @filp: file * from the ioctl
1835 * @cmd: cmd from ioctl
1836 * @arg: arg from ioctl
1837 *
1838 * This attaches a user specified mode to an connector.
1839 * Called by the user via ioctl.
1840 *
1841 * RETURNS:
1842 * Zero on success, errno on failure.
1843 */
1844int drm_mode_attachmode_ioctl(struct drm_device *dev,
1845 void *data, struct drm_file *file_priv)
1846{
1847 struct drm_mode_mode_cmd *mode_cmd = data;
1848 struct drm_connector *connector;
1849 struct drm_display_mode *mode;
1850 struct drm_mode_object *obj;
1851 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
1852 int ret = 0;
1853
1854 mutex_lock(&dev->mode_config.mutex);
1855
1856 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
1857 if (!obj) {
1858 ret = -EINVAL;
1859 goto out;
1860 }
1861 connector = obj_to_connector(obj);
1862
1863 mode = drm_mode_create(dev);
1864 if (!mode) {
1865 ret = -ENOMEM;
1866 goto out;
1867 }
1868
1869 drm_crtc_convert_umode(mode, umode);
1870
1871 ret = drm_mode_attachmode(dev, connector, mode);
1872out:
1873 mutex_unlock(&dev->mode_config.mutex);
1874 return ret;
1875}
1876
1877
1878/**
1879 * drm_fb_detachmode - Detach a user specified mode from an connector
1880 * @inode: inode from the ioctl
1881 * @filp: file * from the ioctl
1882 * @cmd: cmd from ioctl
1883 * @arg: arg from ioctl
1884 *
1885 * Called by the user via ioctl.
1886 *
1887 * RETURNS:
1888 * Zero on success, errno on failure.
1889 */
1890int drm_mode_detachmode_ioctl(struct drm_device *dev,
1891 void *data, struct drm_file *file_priv)
1892{
1893 struct drm_mode_object *obj;
1894 struct drm_mode_mode_cmd *mode_cmd = data;
1895 struct drm_connector *connector;
1896 struct drm_display_mode mode;
1897 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
1898 int ret = 0;
1899
1900 mutex_lock(&dev->mode_config.mutex);
1901
1902 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
1903 if (!obj) {
1904 ret = -EINVAL;
1905 goto out;
1906 }
1907 connector = obj_to_connector(obj);
1908
1909 drm_crtc_convert_umode(&mode, umode);
1910 ret = drm_mode_detachmode(dev, connector, &mode);
1911out:
1912 mutex_unlock(&dev->mode_config.mutex);
1913 return ret;
1914}
1915
1916struct drm_property *drm_property_create(struct drm_device *dev, int flags,
1917 const char *name, int num_values)
1918{
1919 struct drm_property *property = NULL;
1920
1921 property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
1922 if (!property)
1923 return NULL;
1924
1925 if (num_values) {
1926 property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
1927 if (!property->values)
1928 goto fail;
1929 }
1930
1931 drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
1932 property->flags = flags;
1933 property->num_values = num_values;
1934 INIT_LIST_HEAD(&property->enum_blob_list);
1935
1936 if (name)
1937 strncpy(property->name, name, DRM_PROP_NAME_LEN);
1938
1939 list_add_tail(&property->head, &dev->mode_config.property_list);
1940 return property;
1941fail:
1942 kfree(property);
1943 return NULL;
1944}
1945EXPORT_SYMBOL(drm_property_create);
1946
1947int drm_property_add_enum(struct drm_property *property, int index,
1948 uint64_t value, const char *name)
1949{
1950 struct drm_property_enum *prop_enum;
1951
1952 if (!(property->flags & DRM_MODE_PROP_ENUM))
1953 return -EINVAL;
1954
1955 if (!list_empty(&property->enum_blob_list)) {
1956 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
1957 if (prop_enum->value == value) {
1958 strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
1959 prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
1960 return 0;
1961 }
1962 }
1963 }
1964
1965 prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
1966 if (!prop_enum)
1967 return -ENOMEM;
1968
1969 strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
1970 prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
1971 prop_enum->value = value;
1972
1973 property->values[index] = value;
1974 list_add_tail(&prop_enum->head, &property->enum_blob_list);
1975 return 0;
1976}
1977EXPORT_SYMBOL(drm_property_add_enum);
1978
1979void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
1980{
1981 struct drm_property_enum *prop_enum, *pt;
1982
1983 list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
1984 list_del(&prop_enum->head);
1985 kfree(prop_enum);
1986 }
1987
1988 if (property->num_values)
1989 kfree(property->values);
1990 drm_mode_object_put(dev, &property->base);
1991 list_del(&property->head);
1992 kfree(property);
1993}
1994EXPORT_SYMBOL(drm_property_destroy);
1995
1996int drm_connector_attach_property(struct drm_connector *connector,
1997 struct drm_property *property, uint64_t init_val)
1998{
1999 int i;
2000
2001 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
2002 if (connector->property_ids[i] == 0) {
2003 connector->property_ids[i] = property->base.id;
2004 connector->property_values[i] = init_val;
2005 break;
2006 }
2007 }
2008
2009 if (i == DRM_CONNECTOR_MAX_PROPERTY)
2010 return -EINVAL;
2011 return 0;
2012}
2013EXPORT_SYMBOL(drm_connector_attach_property);
2014
2015int drm_connector_property_set_value(struct drm_connector *connector,
2016 struct drm_property *property, uint64_t value)
2017{
2018 int i;
2019
2020 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
2021 if (connector->property_ids[i] == property->base.id) {
2022 connector->property_values[i] = value;
2023 break;
2024 }
2025 }
2026
2027 if (i == DRM_CONNECTOR_MAX_PROPERTY)
2028 return -EINVAL;
2029 return 0;
2030}
2031EXPORT_SYMBOL(drm_connector_property_set_value);
2032
2033int drm_connector_property_get_value(struct drm_connector *connector,
2034 struct drm_property *property, uint64_t *val)
2035{
2036 int i;
2037
2038 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
2039 if (connector->property_ids[i] == property->base.id) {
2040 *val = connector->property_values[i];
2041 break;
2042 }
2043 }
2044
2045 if (i == DRM_CONNECTOR_MAX_PROPERTY)
2046 return -EINVAL;
2047 return 0;
2048}
2049EXPORT_SYMBOL(drm_connector_property_get_value);
2050
2051int drm_mode_getproperty_ioctl(struct drm_device *dev,
2052 void *data, struct drm_file *file_priv)
2053{
2054 struct drm_mode_object *obj;
2055 struct drm_mode_get_property *out_resp = data;
2056 struct drm_property *property;
2057 int enum_count = 0;
2058 int blob_count = 0;
2059 int value_count = 0;
2060 int ret = 0, i;
2061 int copied;
2062 struct drm_property_enum *prop_enum;
2063 struct drm_mode_property_enum __user *enum_ptr;
2064 struct drm_property_blob *prop_blob;
2065 uint32_t *blob_id_ptr;
2066 uint64_t __user *values_ptr;
2067 uint32_t __user *blob_length_ptr;
2068
2069 mutex_lock(&dev->mode_config.mutex);
2070 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
2071 if (!obj) {
2072 ret = -EINVAL;
2073 goto done;
2074 }
2075 property = obj_to_property(obj);
2076
2077 if (property->flags & DRM_MODE_PROP_ENUM) {
2078 list_for_each_entry(prop_enum, &property->enum_blob_list, head)
2079 enum_count++;
2080 } else if (property->flags & DRM_MODE_PROP_BLOB) {
2081 list_for_each_entry(prop_blob, &property->enum_blob_list, head)
2082 blob_count++;
2083 }
2084
2085 value_count = property->num_values;
2086
2087 strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
2088 out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
2089 out_resp->flags = property->flags;
2090
2091 if ((out_resp->count_values >= value_count) && value_count) {
2092 values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
2093 for (i = 0; i < value_count; i++) {
2094 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
2095 ret = -EFAULT;
2096 goto done;
2097 }
2098 }
2099 }
2100 out_resp->count_values = value_count;
2101
2102 if (property->flags & DRM_MODE_PROP_ENUM) {
2103 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
2104 copied = 0;
2105 enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
2106 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
2107
2108 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
2109 ret = -EFAULT;
2110 goto done;
2111 }
2112
2113 if (copy_to_user(&enum_ptr[copied].name,
2114 &prop_enum->name, DRM_PROP_NAME_LEN)) {
2115 ret = -EFAULT;
2116 goto done;
2117 }
2118 copied++;
2119 }
2120 }
2121 out_resp->count_enum_blobs = enum_count;
2122 }
2123
2124 if (property->flags & DRM_MODE_PROP_BLOB) {
2125 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
2126 copied = 0;
2127 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
2128 blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
2129
2130 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
2131 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
2132 ret = -EFAULT;
2133 goto done;
2134 }
2135
2136 if (put_user(prop_blob->length, blob_length_ptr + copied)) {
2137 ret = -EFAULT;
2138 goto done;
2139 }
2140
2141 copied++;
2142 }
2143 }
2144 out_resp->count_enum_blobs = blob_count;
2145 }
2146done:
2147 mutex_unlock(&dev->mode_config.mutex);
2148 return ret;
2149}
2150
2151static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
2152 void *data)
2153{
2154 struct drm_property_blob *blob;
2155
2156 if (!length || !data)
2157 return NULL;
2158
2159 blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
2160 if (!blob)
2161 return NULL;
2162
2163 blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
2164 blob->length = length;
2165
2166 memcpy(blob->data, data, length);
2167
2168 drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
2169
2170 list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
2171 return blob;
2172}
2173
2174static void drm_property_destroy_blob(struct drm_device *dev,
2175 struct drm_property_blob *blob)
2176{
2177 drm_mode_object_put(dev, &blob->base);
2178 list_del(&blob->head);
2179 kfree(blob);
2180}
2181
2182int drm_mode_getblob_ioctl(struct drm_device *dev,
2183 void *data, struct drm_file *file_priv)
2184{
2185 struct drm_mode_object *obj;
2186 struct drm_mode_get_blob *out_resp = data;
2187 struct drm_property_blob *blob;
2188 int ret = 0;
2189 void *blob_ptr;
2190
2191 mutex_lock(&dev->mode_config.mutex);
2192 obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
2193 if (!obj) {
2194 ret = -EINVAL;
2195 goto done;
2196 }
2197 blob = obj_to_blob(obj);
2198
2199 if (out_resp->length == blob->length) {
2200 blob_ptr = (void *)(unsigned long)out_resp->data;
2201 if (copy_to_user(blob_ptr, blob->data, blob->length)){
2202 ret = -EFAULT;
2203 goto done;
2204 }
2205 }
2206 out_resp->length = blob->length;
2207
2208done:
2209 mutex_unlock(&dev->mode_config.mutex);
2210 return ret;
2211}
2212
2213int drm_mode_connector_update_edid_property(struct drm_connector *connector,
2214 struct edid *edid)
2215{
2216 struct drm_device *dev = connector->dev;
2217 int ret = 0;
2218
2219 if (connector->edid_blob_ptr)
2220 drm_property_destroy_blob(dev, connector->edid_blob_ptr);
2221
2222 /* Delete edid, when there is none. */
2223 if (!edid) {
2224 connector->edid_blob_ptr = NULL;
2225 ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
2226 return ret;
2227 }
2228
2229 connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
2230
2231 ret = drm_connector_property_set_value(connector,
2232 dev->mode_config.edid_property,
2233 connector->edid_blob_ptr->base.id);
2234
2235 return ret;
2236}
2237EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
2238
2239int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
2240 void *data, struct drm_file *file_priv)
2241{
2242 struct drm_mode_connector_set_property *out_resp = data;
2243 struct drm_mode_object *obj;
2244 struct drm_property *property;
2245 struct drm_connector *connector;
2246 int ret = -EINVAL;
2247 int i;
2248
2249 mutex_lock(&dev->mode_config.mutex);
2250
2251 obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
2252 if (!obj) {
2253 goto out;
2254 }
2255 connector = obj_to_connector(obj);
2256
2257 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
2258 if (connector->property_ids[i] == out_resp->prop_id)
2259 break;
2260 }
2261
2262 if (i == DRM_CONNECTOR_MAX_PROPERTY) {
2263 goto out;
2264 }
2265
2266 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
2267 if (!obj) {
2268 goto out;
2269 }
2270 property = obj_to_property(obj);
2271
2272 if (property->flags & DRM_MODE_PROP_IMMUTABLE)
2273 goto out;
2274
2275 if (property->flags & DRM_MODE_PROP_RANGE) {
2276 if (out_resp->value < property->values[0])
2277 goto out;
2278
2279 if (out_resp->value > property->values[1])
2280 goto out;
2281 } else {
2282 int found = 0;
2283 for (i = 0; i < property->num_values; i++) {
2284 if (property->values[i] == out_resp->value) {
2285 found = 1;
2286 break;
2287 }
2288 }
2289 if (!found) {
2290 goto out;
2291 }
2292 }
2293
2294 if (connector->funcs->set_property)
2295 ret = connector->funcs->set_property(connector, property, out_resp->value);
2296
2297 /* store the property value if succesful */
2298 if (!ret)
2299 drm_connector_property_set_value(connector, property, out_resp->value);
2300out:
2301 mutex_unlock(&dev->mode_config.mutex);
2302 return ret;
2303}
2304
2305int drm_mode_connector_attach_encoder(struct drm_connector *connector,
2306 struct drm_encoder *encoder)
2307{
2308 int i;
2309
2310 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
2311 if (connector->encoder_ids[i] == 0) {
2312 connector->encoder_ids[i] = encoder->base.id;
2313 return 0;
2314 }
2315 }
2316 return -ENOMEM;
2317}
2318EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
2319
2320void drm_mode_connector_detach_encoder(struct drm_connector *connector,
2321 struct drm_encoder *encoder)
2322{
2323 int i;
2324 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
2325 if (connector->encoder_ids[i] == encoder->base.id) {
2326 connector->encoder_ids[i] = 0;
2327 if (connector->encoder == encoder)
2328 connector->encoder = NULL;
2329 break;
2330 }
2331 }
2332}
2333EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
2334
2335bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
2336 int gamma_size)
2337{
2338 crtc->gamma_size = gamma_size;
2339
2340 crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
2341 if (!crtc->gamma_store) {
2342 crtc->gamma_size = 0;
2343 return false;
2344 }
2345
2346 return true;
2347}
2348EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
2349
2350int drm_mode_gamma_set_ioctl(struct drm_device *dev,
2351 void *data, struct drm_file *file_priv)
2352{
2353 struct drm_mode_crtc_lut *crtc_lut = data;
2354 struct drm_mode_object *obj;
2355 struct drm_crtc *crtc;
2356 void *r_base, *g_base, *b_base;
2357 int size;
2358 int ret = 0;
2359
2360 mutex_lock(&dev->mode_config.mutex);
2361 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
2362 if (!obj) {
2363 ret = -EINVAL;
2364 goto out;
2365 }
2366 crtc = obj_to_crtc(obj);
2367
2368 /* memcpy into gamma store */
2369 if (crtc_lut->gamma_size != crtc->gamma_size) {
2370 ret = -EINVAL;
2371 goto out;
2372 }
2373
2374 size = crtc_lut->gamma_size * (sizeof(uint16_t));
2375 r_base = crtc->gamma_store;
2376 if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
2377 ret = -EFAULT;
2378 goto out;
2379 }
2380
2381 g_base = r_base + size;
2382 if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
2383 ret = -EFAULT;
2384 goto out;
2385 }
2386
2387 b_base = g_base + size;
2388 if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
2389 ret = -EFAULT;
2390 goto out;
2391 }
2392
2393 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
2394
2395out:
2396 mutex_unlock(&dev->mode_config.mutex);
2397 return ret;
2398
2399}
2400
2401int drm_mode_gamma_get_ioctl(struct drm_device *dev,
2402 void *data, struct drm_file *file_priv)
2403{
2404 struct drm_mode_crtc_lut *crtc_lut = data;
2405 struct drm_mode_object *obj;
2406 struct drm_crtc *crtc;
2407 void *r_base, *g_base, *b_base;
2408 int size;
2409 int ret = 0;
2410
2411 mutex_lock(&dev->mode_config.mutex);
2412 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
2413 if (!obj) {
2414 ret = -EINVAL;
2415 goto out;
2416 }
2417 crtc = obj_to_crtc(obj);
2418
2419 /* memcpy into gamma store */
2420 if (crtc_lut->gamma_size != crtc->gamma_size) {
2421 ret = -EINVAL;
2422 goto out;
2423 }
2424
2425 size = crtc_lut->gamma_size * (sizeof(uint16_t));
2426 r_base = crtc->gamma_store;
2427 if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
2428 ret = -EFAULT;
2429 goto out;
2430 }
2431
2432 g_base = r_base + size;
2433 if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
2434 ret = -EFAULT;
2435 goto out;
2436 }
2437
2438 b_base = g_base + size;
2439 if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
2440 ret = -EFAULT;
2441 goto out;
2442 }
2443out:
2444 mutex_unlock(&dev->mode_config.mutex);
2445 return ret;
2446}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
new file mode 100644
index 000000000000..d8a982b71296
--- /dev/null
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -0,0 +1,826 @@
1/*
2 * Copyright (c) 2006-2008 Intel Corporation
3 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
4 *
5 * DRM core CRTC related functions
6 *
7 * Permission to use, copy, modify, distribute, and sell this software and its
8 * documentation for any purpose is hereby granted without fee, provided that
9 * the above copyright notice appear in all copies and that both that copyright
10 * notice and this permission notice appear in supporting documentation, and
11 * that the name of the copyright holders not be used in advertising or
12 * publicity pertaining to distribution of the software without specific,
13 * written prior permission. The copyright holders make no representations
14 * about the suitability of this software for any purpose. It is provided "as
15 * is" without express or implied warranty.
16 *
17 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
19 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
20 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
21 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
22 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 * OF THIS SOFTWARE.
24 *
25 * Authors:
26 * Keith Packard
27 * Eric Anholt <eric@anholt.net>
28 * Dave Airlie <airlied@linux.ie>
29 * Jesse Barnes <jesse.barnes@intel.com>
30 */
31
32#include "drmP.h"
33#include "drm_crtc.h"
34#include "drm_crtc_helper.h"
35
36/*
37 * Detailed mode info for 800x600@60Hz
38 */
39static struct drm_display_mode std_mode[] = {
40 { DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840,
41 968, 1056, 0, 600, 601, 605, 628, 0,
42 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
43};
44
45/**
46 * drm_helper_probe_connector_modes - get complete set of display modes
47 * @dev: DRM device
48 * @maxX: max width for modes
49 * @maxY: max height for modes
50 *
51 * LOCKING:
52 * Caller must hold mode config lock.
53 *
54 * Based on @dev's mode_config layout, scan all the connectors and try to detect
55 * modes on them. Modes will first be added to the connector's probed_modes
56 * list, then culled (based on validity and the @maxX, @maxY parameters) and
57 * put into the normal modes list.
58 *
59 * Intended to be used either at bootup time or when major configuration
60 * changes have occurred.
61 *
62 * FIXME: take into account monitor limits
63 */
64void drm_helper_probe_single_connector_modes(struct drm_connector *connector,
65 uint32_t maxX, uint32_t maxY)
66{
67 struct drm_device *dev = connector->dev;
68 struct drm_display_mode *mode, *t;
69 struct drm_connector_helper_funcs *connector_funcs =
70 connector->helper_private;
71 int ret;
72
73 DRM_DEBUG("%s\n", drm_get_connector_name(connector));
74 /* set all modes to the unverified state */
75 list_for_each_entry_safe(mode, t, &connector->modes, head)
76 mode->status = MODE_UNVERIFIED;
77
78 connector->status = connector->funcs->detect(connector);
79
80 if (connector->status == connector_status_disconnected) {
81 DRM_DEBUG("%s is disconnected\n",
82 drm_get_connector_name(connector));
83 /* TODO set EDID to NULL */
84 return;
85 }
86
87 ret = (*connector_funcs->get_modes)(connector);
88
89 if (ret) {
90 drm_mode_connector_list_update(connector);
91 }
92
93 if (maxX && maxY)
94 drm_mode_validate_size(dev, &connector->modes, maxX,
95 maxY, 0);
96 list_for_each_entry_safe(mode, t, &connector->modes, head) {
97 if (mode->status == MODE_OK)
98 mode->status = connector_funcs->mode_valid(connector,
99 mode);
100 }
101
102
103 drm_mode_prune_invalid(dev, &connector->modes, true);
104
105 if (list_empty(&connector->modes)) {
106 struct drm_display_mode *stdmode;
107
108 DRM_DEBUG("No valid modes on %s\n",
109 drm_get_connector_name(connector));
110
111 /* Should we do this here ???
112 * When no valid EDID modes are available we end up
113 * here and bailed in the past, now we add a standard
114 * 640x480@60Hz mode and carry on.
115 */
116 stdmode = drm_mode_duplicate(dev, &std_mode[0]);
117 drm_mode_probed_add(connector, stdmode);
118 drm_mode_list_concat(&connector->probed_modes,
119 &connector->modes);
120
121 DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n",
122 drm_get_connector_name(connector));
123 }
124
125 drm_mode_sort(&connector->modes);
126
127 DRM_DEBUG("Probed modes for %s\n", drm_get_connector_name(connector));
128 list_for_each_entry_safe(mode, t, &connector->modes, head) {
129 mode->vrefresh = drm_mode_vrefresh(mode);
130
131 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
132 drm_mode_debug_printmodeline(mode);
133 }
134}
135EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
136
137void drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
138 uint32_t maxY)
139{
140 struct drm_connector *connector;
141
142 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
143 drm_helper_probe_single_connector_modes(connector, maxX, maxY);
144 }
145}
146EXPORT_SYMBOL(drm_helper_probe_connector_modes);
147
148
149/**
150 * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
151 * @crtc: CRTC to check
152 *
153 * LOCKING:
154 * Caller must hold mode config lock.
155 *
156 * Walk @crtc's DRM device's mode_config and see if it's in use.
157 *
158 * RETURNS:
159 * True if @crtc is part of the mode_config, false otherwise.
160 */
161bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
162{
163 struct drm_encoder *encoder;
164 struct drm_device *dev = crtc->dev;
165 /* FIXME: Locking around list access? */
166 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
167 if (encoder->crtc == crtc)
168 return true;
169 return false;
170}
171EXPORT_SYMBOL(drm_helper_crtc_in_use);
172
173/**
174 * drm_disable_unused_functions - disable unused objects
175 * @dev: DRM device
176 *
177 * LOCKING:
178 * Caller must hold mode config lock.
179 *
180 * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
181 * by calling its dpms function, which should power it off.
182 */
183void drm_helper_disable_unused_functions(struct drm_device *dev)
184{
185 struct drm_encoder *encoder;
186 struct drm_encoder_helper_funcs *encoder_funcs;
187 struct drm_crtc *crtc;
188
189 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
190 encoder_funcs = encoder->helper_private;
191 if (!encoder->crtc)
192 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
193 }
194
195 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
196 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
197 crtc->enabled = drm_helper_crtc_in_use(crtc);
198 if (!crtc->enabled) {
199 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
200 crtc->fb = NULL;
201 }
202 }
203}
204EXPORT_SYMBOL(drm_helper_disable_unused_functions);
205
206static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
207{
208 struct drm_display_mode *mode;
209
210 list_for_each_entry(mode, &connector->modes, head) {
211 if (drm_mode_width(mode) > width ||
212 drm_mode_height(mode) > height)
213 continue;
214 if (mode->type & DRM_MODE_TYPE_PREFERRED)
215 return mode;
216 }
217 return NULL;
218}
219
220static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
221{
222 bool enable;
223
224 if (strict) {
225 enable = connector->status == connector_status_connected;
226 } else {
227 enable = connector->status != connector_status_disconnected;
228 }
229 return enable;
230}
231
232static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
233{
234 bool any_enabled = false;
235 struct drm_connector *connector;
236 int i = 0;
237
238 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
239 enabled[i] = drm_connector_enabled(connector, true);
240 any_enabled |= enabled[i];
241 i++;
242 }
243
244 if (any_enabled)
245 return;
246
247 i = 0;
248 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
249 enabled[i] = drm_connector_enabled(connector, false);
250 i++;
251 }
252}
253
254static bool drm_target_preferred(struct drm_device *dev,
255 struct drm_display_mode **modes,
256 bool *enabled, int width, int height)
257{
258 struct drm_connector *connector;
259 int i = 0;
260
261 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
262
263 if (enabled[i] == false) {
264 i++;
265 continue;
266 }
267
268 modes[i] = drm_has_preferred_mode(connector, width, height);
269 if (!modes[i]) {
270 list_for_each_entry(modes[i], &connector->modes, head)
271 break;
272 }
273 i++;
274 }
275 return true;
276}
277
278static int drm_pick_crtcs(struct drm_device *dev,
279 struct drm_crtc **best_crtcs,
280 struct drm_display_mode **modes,
281 int n, int width, int height)
282{
283 int c, o;
284 struct drm_connector *connector;
285 struct drm_connector_helper_funcs *connector_funcs;
286 struct drm_encoder *encoder;
287 struct drm_crtc *best_crtc;
288 int my_score, best_score, score;
289 struct drm_crtc **crtcs, *crtc;
290
291 if (n == dev->mode_config.num_connector)
292 return 0;
293 c = 0;
294 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
295 if (c == n)
296 break;
297 c++;
298 }
299
300 best_crtcs[n] = NULL;
301 best_crtc = NULL;
302 best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
303 if (modes[n] == NULL)
304 return best_score;
305
306 crtcs = kmalloc(dev->mode_config.num_connector *
307 sizeof(struct drm_crtc *), GFP_KERNEL);
308 if (!crtcs)
309 return best_score;
310
311 my_score = 1;
312 if (connector->status == connector_status_connected)
313 my_score++;
314 if (drm_has_preferred_mode(connector, width, height))
315 my_score++;
316
317 connector_funcs = connector->helper_private;
318 encoder = connector_funcs->best_encoder(connector);
319 if (!encoder)
320 goto out;
321
322 connector->encoder = encoder;
323
324 /* select a crtc for this connector and then attempt to configure
325 remaining connectors */
326 c = 0;
327 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
328
329 if ((connector->encoder->possible_crtcs & (1 << c)) == 0) {
330 c++;
331 continue;
332 }
333
334 for (o = 0; o < n; o++)
335 if (best_crtcs[o] == crtc)
336 break;
337
338 if (o < n) {
339 /* ignore cloning for now */
340 c++;
341 continue;
342 }
343
344 crtcs[n] = crtc;
345 memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
346 score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
347 width, height);
348 if (score > best_score) {
349 best_crtc = crtc;
350 best_score = score;
351 memcpy(best_crtcs, crtcs,
352 dev->mode_config.num_connector *
353 sizeof(struct drm_crtc *));
354 }
355 c++;
356 }
357out:
358 kfree(crtcs);
359 return best_score;
360}
361
362static void drm_setup_crtcs(struct drm_device *dev)
363{
364 struct drm_crtc **crtcs;
365 struct drm_display_mode **modes;
366 struct drm_encoder *encoder;
367 struct drm_connector *connector;
368 bool *enabled;
369 int width, height;
370 int i, ret;
371
372 width = dev->mode_config.max_width;
373 height = dev->mode_config.max_height;
374
375 /* clean out all the encoder/crtc combos */
376 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
377 encoder->crtc = NULL;
378 }
379
380 crtcs = kcalloc(dev->mode_config.num_connector,
381 sizeof(struct drm_crtc *), GFP_KERNEL);
382 modes = kcalloc(dev->mode_config.num_connector,
383 sizeof(struct drm_display_mode *), GFP_KERNEL);
384 enabled = kcalloc(dev->mode_config.num_connector,
385 sizeof(bool), GFP_KERNEL);
386
387 drm_enable_connectors(dev, enabled);
388
389 ret = drm_target_preferred(dev, modes, enabled, width, height);
390 if (!ret)
391 DRM_ERROR("Unable to find initial modes\n");
392
393 drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
394
395 i = 0;
396 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
397 struct drm_display_mode *mode = modes[i];
398 struct drm_crtc *crtc = crtcs[i];
399
400 if (connector->encoder == NULL) {
401 i++;
402 continue;
403 }
404
405 if (mode && crtc) {
406 crtc->desired_mode = mode;
407 connector->encoder->crtc = crtc;
408 } else
409 connector->encoder->crtc = NULL;
410 i++;
411 }
412
413 kfree(crtcs);
414 kfree(modes);
415 kfree(enabled);
416}
417/**
418 * drm_crtc_set_mode - set a mode
419 * @crtc: CRTC to program
420 * @mode: mode to use
421 * @x: width of mode
422 * @y: height of mode
423 *
424 * LOCKING:
425 * Caller must hold mode config lock.
426 *
427 * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
428 * to fixup or reject the mode prior to trying to set it.
429 *
430 * RETURNS:
431 * True if the mode was set successfully, or false otherwise.
432 */
433bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
434 struct drm_display_mode *mode,
435 int x, int y,
436 struct drm_framebuffer *old_fb)
437{
438 struct drm_device *dev = crtc->dev;
439 struct drm_display_mode *adjusted_mode, saved_mode;
440 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
441 struct drm_encoder_helper_funcs *encoder_funcs;
442 int saved_x, saved_y;
443 struct drm_encoder *encoder;
444 bool ret = true;
445
446 adjusted_mode = drm_mode_duplicate(dev, mode);
447
448 crtc->enabled = drm_helper_crtc_in_use(crtc);
449
450 if (!crtc->enabled)
451 return true;
452
453 saved_mode = crtc->mode;
454 saved_x = crtc->x;
455 saved_y = crtc->y;
456
457 /* Update crtc values up front so the driver can rely on them for mode
458 * setting.
459 */
460 crtc->mode = *mode;
461 crtc->x = x;
462 crtc->y = y;
463
464 if (drm_mode_equal(&saved_mode, &crtc->mode)) {
465 if (saved_x != crtc->x || saved_y != crtc->y) {
466 crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y,
467 old_fb);
468 goto done;
469 }
470 }
471
472 /* Pass our mode to the connectors and the CRTC to give them a chance to
473 * adjust it according to limitations or connector properties, and also
474 * a chance to reject the mode entirely.
475 */
476 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
477
478 if (encoder->crtc != crtc)
479 continue;
480 encoder_funcs = encoder->helper_private;
481 if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
482 adjusted_mode))) {
483 goto done;
484 }
485 }
486
487 if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
488 goto done;
489 }
490
491 /* Prepare the encoders and CRTCs before setting the mode. */
492 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
493
494 if (encoder->crtc != crtc)
495 continue;
496 encoder_funcs = encoder->helper_private;
497 /* Disable the encoders as the first thing we do. */
498 encoder_funcs->prepare(encoder);
499 }
500
501 crtc_funcs->prepare(crtc);
502
503 /* Set up the DPLL and any encoders state that needs to adjust or depend
504 * on the DPLL.
505 */
506 crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
507
508 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
509
510 if (encoder->crtc != crtc)
511 continue;
512
513 DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
514 mode->name, mode->base.id);
515 encoder_funcs = encoder->helper_private;
516 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
517 }
518
519 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
520 crtc_funcs->commit(crtc);
521
522 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
523
524 if (encoder->crtc != crtc)
525 continue;
526
527 encoder_funcs = encoder->helper_private;
528 encoder_funcs->commit(encoder);
529
530 }
531
532 /* XXX free adjustedmode */
533 drm_mode_destroy(dev, adjusted_mode);
534 /* FIXME: add subpixel order */
535done:
536 if (!ret) {
537 crtc->mode = saved_mode;
538 crtc->x = saved_x;
539 crtc->y = saved_y;
540 }
541
542 return ret;
543}
544EXPORT_SYMBOL(drm_crtc_helper_set_mode);
545
546
547/**
548 * drm_crtc_helper_set_config - set a new config from userspace
549 * @crtc: CRTC to setup
550 * @crtc_info: user provided configuration
551 * @new_mode: new mode to set
552 * @connector_set: set of connectors for the new config
553 * @fb: new framebuffer
554 *
555 * LOCKING:
556 * Caller must hold mode config lock.
557 *
558 * Setup a new configuration, provided by the user in @crtc_info, and enable
559 * it.
560 *
561 * RETURNS:
562 * Zero. (FIXME)
563 */
564int drm_crtc_helper_set_config(struct drm_mode_set *set)
565{
566 struct drm_device *dev;
567 struct drm_crtc **save_crtcs, *new_crtc;
568 struct drm_encoder **save_encoders, *new_encoder;
569 struct drm_framebuffer *old_fb;
570 bool save_enabled;
571 bool changed = false;
572 bool flip_or_move = false;
573 struct drm_connector *connector;
574 int count = 0, ro, fail = 0;
575 struct drm_crtc_helper_funcs *crtc_funcs;
576 int ret = 0;
577
578 DRM_DEBUG("\n");
579
580 if (!set)
581 return -EINVAL;
582
583 if (!set->crtc)
584 return -EINVAL;
585
586 if (!set->crtc->helper_private)
587 return -EINVAL;
588
589 crtc_funcs = set->crtc->helper_private;
590
591 DRM_DEBUG("crtc: %p %d fb: %p connectors: %p num_connectors: %d (x, y) (%i, %i)\n",
592 set->crtc, set->crtc->base.id, set->fb, set->connectors,
593 (int)set->num_connectors, set->x, set->y);
594
595 dev = set->crtc->dev;
596
597 /* save previous config */
598 save_enabled = set->crtc->enabled;
599
600 /* this is meant to be num_connector not num_crtc */
601 save_crtcs = kzalloc(dev->mode_config.num_connector *
602 sizeof(struct drm_crtc *), GFP_KERNEL);
603 if (!save_crtcs)
604 return -ENOMEM;
605
606 save_encoders = kzalloc(dev->mode_config.num_connector *
607 sizeof(struct drm_encoders *), GFP_KERNEL);
608 if (!save_encoders) {
609 kfree(save_crtcs);
610 return -ENOMEM;
611 }
612
613 /* We should be able to check here if the fb has the same properties
614 * and then just flip_or_move it */
615 if (set->crtc->fb != set->fb) {
616 /* if we have no fb then its a change not a flip */
617 if (set->crtc->fb == NULL)
618 changed = true;
619 else
620 flip_or_move = true;
621 }
622
623 if (set->x != set->crtc->x || set->y != set->crtc->y)
624 flip_or_move = true;
625
626 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
627 DRM_DEBUG("modes are different\n");
628 drm_mode_debug_printmodeline(&set->crtc->mode);
629 drm_mode_debug_printmodeline(set->mode);
630 changed = true;
631 }
632
633 /* a) traverse passed in connector list and get encoders for them */
634 count = 0;
635 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
636 struct drm_connector_helper_funcs *connector_funcs =
637 connector->helper_private;
638 save_encoders[count++] = connector->encoder;
639 new_encoder = connector->encoder;
640 for (ro = 0; ro < set->num_connectors; ro++) {
641 if (set->connectors[ro] == connector) {
642 new_encoder = connector_funcs->best_encoder(connector);
643 /* if we can't get an encoder for a connector
644 we are setting now - then fail */
645 if (new_encoder == NULL)
646 /* don't break so fail path works correct */
647 fail = 1;
648 break;
649 }
650 }
651
652 if (new_encoder != connector->encoder) {
653 changed = true;
654 connector->encoder = new_encoder;
655 }
656 }
657
658 if (fail) {
659 ret = -EINVAL;
660 goto fail_no_encoder;
661 }
662
663 count = 0;
664 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
665 if (!connector->encoder)
666 continue;
667
668 save_crtcs[count++] = connector->encoder->crtc;
669
670 if (connector->encoder->crtc == set->crtc)
671 new_crtc = NULL;
672 else
673 new_crtc = connector->encoder->crtc;
674
675 for (ro = 0; ro < set->num_connectors; ro++) {
676 if (set->connectors[ro] == connector)
677 new_crtc = set->crtc;
678 }
679 if (new_crtc != connector->encoder->crtc) {
680 changed = true;
681 connector->encoder->crtc = new_crtc;
682 }
683 }
684
685 /* mode_set_base is not a required function */
686 if (flip_or_move && !crtc_funcs->mode_set_base)
687 changed = true;
688
689 if (changed) {
690 old_fb = set->crtc->fb;
691 set->crtc->fb = set->fb;
692 set->crtc->enabled = (set->mode != NULL);
693 if (set->mode != NULL) {
694 DRM_DEBUG("attempting to set mode from userspace\n");
695 drm_mode_debug_printmodeline(set->mode);
696 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
697 set->x, set->y,
698 old_fb)) {
699 ret = -EINVAL;
700 goto fail_set_mode;
701 }
702 /* TODO are these needed? */
703 set->crtc->desired_x = set->x;
704 set->crtc->desired_y = set->y;
705 set->crtc->desired_mode = set->mode;
706 }
707 drm_helper_disable_unused_functions(dev);
708 } else if (flip_or_move) {
709 old_fb = set->crtc->fb;
710 if (set->crtc->fb != set->fb)
711 set->crtc->fb = set->fb;
712 crtc_funcs->mode_set_base(set->crtc, set->x, set->y, old_fb);
713 }
714
715 kfree(save_encoders);
716 kfree(save_crtcs);
717 return 0;
718
719fail_set_mode:
720 set->crtc->enabled = save_enabled;
721 count = 0;
722 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
723 connector->encoder->crtc = save_crtcs[count++];
724fail_no_encoder:
725 kfree(save_crtcs);
726 count = 0;
727 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
728 connector->encoder = save_encoders[count++];
729 }
730 kfree(save_encoders);
731 return ret;
732}
733EXPORT_SYMBOL(drm_crtc_helper_set_config);
734
735bool drm_helper_plugged_event(struct drm_device *dev)
736{
737 DRM_DEBUG("\n");
738
739 drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
740 dev->mode_config.max_height);
741
742 drm_setup_crtcs(dev);
743
744 /* alert the driver fb layer */
745 dev->mode_config.funcs->fb_changed(dev);
746
747 /* FIXME: send hotplug event */
748 return true;
749}
750/**
751 * drm_initial_config - setup a sane initial connector configuration
752 * @dev: DRM device
753 * @can_grow: this configuration is growable
754 *
755 * LOCKING:
756 * Called at init time, must take mode config lock.
757 *
758 * Scan the CRTCs and connectors and try to put together an initial setup.
759 * At the moment, this is a cloned configuration across all heads with
760 * a new framebuffer object as the backing store.
761 *
762 * RETURNS:
763 * Zero if everything went ok, nonzero otherwise.
764 */
765bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
766{
767 int ret = false;
768
769 drm_helper_plugged_event(dev);
770 return ret;
771}
772EXPORT_SYMBOL(drm_helper_initial_config);
773
774/**
775 * drm_hotplug_stage_two
776 * @dev DRM device
777 * @connector hotpluged connector
778 *
779 * LOCKING.
780 * Caller must hold mode config lock, function might grab struct lock.
781 *
782 * Stage two of a hotplug.
783 *
784 * RETURNS:
785 * Zero on success, errno on failure.
786 */
787int drm_helper_hotplug_stage_two(struct drm_device *dev)
788{
789 drm_helper_plugged_event(dev);
790
791 return 0;
792}
793EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
794
795int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
796 struct drm_mode_fb_cmd *mode_cmd)
797{
798 fb->width = mode_cmd->width;
799 fb->height = mode_cmd->height;
800 fb->pitch = mode_cmd->pitch;
801 fb->bits_per_pixel = mode_cmd->bpp;
802 fb->depth = mode_cmd->depth;
803
804 return 0;
805}
806EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
807
808int drm_helper_resume_force_mode(struct drm_device *dev)
809{
810 struct drm_crtc *crtc;
811 int ret;
812
813 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
814
815 if (!crtc->enabled)
816 continue;
817
818 ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
819 crtc->x, crtc->y, crtc->fb);
820
821 if (ret == false)
822 DRM_ERROR("failed to set mode on crtc %p\n", crtc);
823 }
824 return 0;
825}
826EXPORT_SYMBOL(drm_helper_resume_force_mode);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 996097acb5e7..febb517ee679 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -74,6 +74,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 74 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
75 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), 75 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
76 76
77 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
78 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
79
77 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), 80 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
78 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 81 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
79 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 82 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -123,6 +126,23 @@ static struct drm_ioctl_desc drm_ioctls[] = {
123 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), 126 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
124 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), 127 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
125 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), 128 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
129
130 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
131 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
132 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
133 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
134 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
135 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
136 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
126}; 146};
127 147
128#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 148#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -138,8 +158,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
138 */ 158 */
139int drm_lastclose(struct drm_device * dev) 159int drm_lastclose(struct drm_device * dev)
140{ 160{
141 struct drm_magic_entry *pt, *next;
142 struct drm_map_list *r_list, *list_t;
143 struct drm_vma_entry *vma, *vma_temp; 161 struct drm_vma_entry *vma, *vma_temp;
144 int i; 162 int i;
145 163
@@ -149,13 +167,7 @@ int drm_lastclose(struct drm_device * dev)
149 dev->driver->lastclose(dev); 167 dev->driver->lastclose(dev);
150 DRM_DEBUG("driver lastclose completed\n"); 168 DRM_DEBUG("driver lastclose completed\n");
151 169
152 if (dev->unique) { 170 if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
153 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
154 dev->unique = NULL;
155 dev->unique_len = 0;
156 }
157
158 if (dev->irq_enabled)
159 drm_irq_uninstall(dev); 171 drm_irq_uninstall(dev);
160 172
161 mutex_lock(&dev->struct_mutex); 173 mutex_lock(&dev->struct_mutex);
@@ -164,18 +176,9 @@ int drm_lastclose(struct drm_device * dev)
164 drm_drawable_free_all(dev); 176 drm_drawable_free_all(dev);
165 del_timer(&dev->timer); 177 del_timer(&dev->timer);
166 178
167 /* Clear pid list */
168 if (dev->magicfree.next) {
169 list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
170 list_del(&pt->head);
171 drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
172 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
173 }
174 drm_ht_remove(&dev->magiclist);
175 }
176
177 /* Clear AGP information */ 179 /* Clear AGP information */
178 if (drm_core_has_AGP(dev) && dev->agp) { 180 if (drm_core_has_AGP(dev) && dev->agp &&
181 !drm_core_check_feature(dev, DRIVER_MODESET)) {
179 struct drm_agp_mem *entry, *tempe; 182 struct drm_agp_mem *entry, *tempe;
180 183
181 /* Remove AGP resources, but leave dev->agp 184 /* Remove AGP resources, but leave dev->agp
@@ -194,7 +197,8 @@ int drm_lastclose(struct drm_device * dev)
194 dev->agp->acquired = 0; 197 dev->agp->acquired = 0;
195 dev->agp->enabled = 0; 198 dev->agp->enabled = 0;
196 } 199 }
197 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) { 200 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
201 !drm_core_check_feature(dev, DRIVER_MODESET)) {
198 drm_sg_cleanup(dev->sg); 202 drm_sg_cleanup(dev->sg);
199 dev->sg = NULL; 203 dev->sg = NULL;
200 } 204 }
@@ -205,13 +209,6 @@ int drm_lastclose(struct drm_device * dev)
205 drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); 209 drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
206 } 210 }
207 211
208 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
209 if (!(r_list->map->flags & _DRM_DRIVER)) {
210 drm_rmmap_locked(dev, r_list->map);
211 r_list = NULL;
212 }
213 }
214
215 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { 212 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
216 for (i = 0; i < dev->queue_count; i++) { 213 for (i = 0; i < dev->queue_count; i++) {
217 if (dev->queuelist[i]) { 214 if (dev->queuelist[i]) {
@@ -228,14 +225,11 @@ int drm_lastclose(struct drm_device * dev)
228 } 225 }
229 dev->queue_count = 0; 226 dev->queue_count = 0;
230 227
231 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 228 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
229 !drm_core_check_feature(dev, DRIVER_MODESET))
232 drm_dma_takedown(dev); 230 drm_dma_takedown(dev);
233 231
234 if (dev->lock.hw_lock) { 232 dev->dev_mapping = NULL;
235 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
236 dev->lock.file_priv = NULL;
237 wake_up_interruptible(&dev->lock.lock_queue);
238 }
239 mutex_unlock(&dev->struct_mutex); 233 mutex_unlock(&dev->struct_mutex);
240 234
241 DRM_DEBUG("lastclose completed\n"); 235 DRM_DEBUG("lastclose completed\n");
@@ -263,6 +257,8 @@ int drm_init(struct drm_driver *driver)
263 257
264 DRM_DEBUG("\n"); 258 DRM_DEBUG("\n");
265 259
260 INIT_LIST_HEAD(&driver->device_list);
261
266 for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { 262 for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
267 pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; 263 pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
268 264
@@ -329,35 +325,24 @@ static void drm_cleanup(struct drm_device * dev)
329 drm_ht_remove(&dev->map_hash); 325 drm_ht_remove(&dev->map_hash);
330 drm_ctxbitmap_cleanup(dev); 326 drm_ctxbitmap_cleanup(dev);
331 327
328 if (drm_core_check_feature(dev, DRIVER_MODESET))
329 drm_put_minor(&dev->control);
330
331 if (dev->driver->driver_features & DRIVER_GEM)
332 drm_gem_destroy(dev);
333
332 drm_put_minor(&dev->primary); 334 drm_put_minor(&dev->primary);
333 if (drm_put_dev(dev)) 335 if (drm_put_dev(dev))
334 DRM_ERROR("Cannot unload module\n"); 336 DRM_ERROR("Cannot unload module\n");
335} 337}
336 338
337static int drm_minors_cleanup(int id, void *ptr, void *data)
338{
339 struct drm_minor *minor = ptr;
340 struct drm_device *dev;
341 struct drm_driver *driver = data;
342
343 dev = minor->dev;
344 if (minor->dev->driver != driver)
345 return 0;
346
347 if (minor->type != DRM_MINOR_LEGACY)
348 return 0;
349
350 if (dev)
351 pci_dev_put(dev->pdev);
352 drm_cleanup(dev);
353 return 1;
354}
355
356void drm_exit(struct drm_driver *driver) 339void drm_exit(struct drm_driver *driver)
357{ 340{
341 struct drm_device *dev, *tmp;
358 DRM_DEBUG("\n"); 342 DRM_DEBUG("\n");
359 343
360 idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver); 344 list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
345 drm_cleanup(dev);
361 346
362 DRM_INFO("Module unloaded\n"); 347 DRM_INFO("Module unloaded\n");
363} 348}
@@ -503,7 +488,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
503 retcode = -EINVAL; 488 retcode = -EINVAL;
504 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || 489 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
505 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || 490 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
506 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { 491 ((ioctl->flags & DRM_MASTER) && !file_priv->is_master)) {
507 retcode = -EACCES; 492 retcode = -EACCES;
508 } else { 493 } else {
509 if (cmd & (IOC_IN | IOC_OUT)) { 494 if (cmd & (IOC_IN | IOC_OUT)) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
new file mode 100644
index 000000000000..0fbb0da342cb
--- /dev/null
+++ b/drivers/gpu/drm/drm_edid.c
@@ -0,0 +1,732 @@
1/*
2 * Copyright (c) 2006 Luc Verhaegen (quirks list)
3 * Copyright (c) 2007-2008 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com>
5 *
6 * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
7 * FB layer.
8 * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the "Software"),
12 * to deal in the Software without restriction, including without limitation
13 * the rights to use, copy, modify, merge, publish, distribute, sub license,
14 * and/or sell copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 * DEALINGS IN THE SOFTWARE.
28 */
29#include <linux/kernel.h>
30#include <linux/i2c.h>
31#include <linux/i2c-algo-bit.h>
32#include "drmP.h"
33#include "drm_edid.h"
34
35/*
36 * TODO:
37 * - support EDID 1.4 (incl. CE blocks)
38 */
39
40/*
41 * EDID blocks out in the wild have a variety of bugs, try to collect
42 * them here (note that userspace may work around broken monitors first,
43 * but fixes should make their way here so that the kernel "just works"
44 * on as many displays as possible).
45 */
46
47/* First detailed mode wrong, use largest 60Hz mode */
48#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
49/* Reported 135MHz pixel clock is too high, needs adjustment */
50#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
51/* Prefer the largest mode at 75 Hz */
52#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
53/* Detail timing is in cm not mm */
54#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
55/* Detailed timing descriptors have bogus size values, so just take the
56 * maximum size and use that.
57 */
58#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
59/* Monitor forgot to set the first detailed is preferred bit. */
60#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
61/* use +hsync +vsync for detailed mode */
62#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
63
64static struct edid_quirk {
65 char *vendor;
66 int product_id;
67 u32 quirks;
68} edid_quirk_list[] = {
69 /* Acer AL1706 */
70 { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
71 /* Acer F51 */
72 { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
73 /* Unknown Acer */
74 { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
75
76 /* Belinea 10 15 55 */
77 { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
78 { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
79
80 /* Envision Peripherals, Inc. EN-7100e */
81 { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
82
83 /* Funai Electronics PM36B */
84 { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
85 EDID_QUIRK_DETAILED_IN_CM },
86
87 /* LG Philips LCD LP154W01-A5 */
88 { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
89 { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
90
91 /* Philips 107p5 CRT */
92 { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
93
94 /* Proview AY765C */
95 { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
96
97 /* Samsung SyncMaster 205BW. Note: irony */
98 { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
99 /* Samsung SyncMaster 22[5-6]BW */
100 { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
101 { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
102};
103
104
105/* Valid EDID header has these bytes */
106static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
107
108/**
109 * edid_is_valid - sanity check EDID data
110 * @edid: EDID data
111 *
112 * Sanity check the EDID block by looking at the header, the version number
113 * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
114 * valid.
115 */
116static bool edid_is_valid(struct edid *edid)
117{
118 int i;
119 u8 csum = 0;
120 u8 *raw_edid = (u8 *)edid;
121
122 if (memcmp(edid->header, edid_header, sizeof(edid_header)))
123 goto bad;
124 if (edid->version != 1) {
125 DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
126 goto bad;
127 }
128 if (edid->revision <= 0 || edid->revision > 3) {
129 DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision);
130 goto bad;
131 }
132
133 for (i = 0; i < EDID_LENGTH; i++)
134 csum += raw_edid[i];
135 if (csum) {
136 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
137 goto bad;
138 }
139
140 return 1;
141
142bad:
143 if (raw_edid) {
144 DRM_ERROR("Raw EDID:\n");
145 print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
146 printk("\n");
147 }
148 return 0;
149}
150
151/**
152 * edid_vendor - match a string against EDID's obfuscated vendor field
153 * @edid: EDID to match
154 * @vendor: vendor string
155 *
156 * Returns true if @vendor is in @edid, false otherwise
157 */
158static bool edid_vendor(struct edid *edid, char *vendor)
159{
160 char edid_vendor[3];
161
162 edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
163 edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
164 ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
165 edid_vendor[2] = (edid->mfg_id[2] & 0x1f) + '@';
166
167 return !strncmp(edid_vendor, vendor, 3);
168}
169
170/**
171 * edid_get_quirks - return quirk flags for a given EDID
172 * @edid: EDID to process
173 *
174 * This tells subsequent routines what fixes they need to apply.
175 */
176static u32 edid_get_quirks(struct edid *edid)
177{
178 struct edid_quirk *quirk;
179 int i;
180
181 for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
182 quirk = &edid_quirk_list[i];
183
184 if (edid_vendor(edid, quirk->vendor) &&
185 (EDID_PRODUCT_ID(edid) == quirk->product_id))
186 return quirk->quirks;
187 }
188
189 return 0;
190}
191
192#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
193#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
194
195
196/**
197 * edid_fixup_preferred - set preferred modes based on quirk list
198 * @connector: has mode list to fix up
199 * @quirks: quirks list
200 *
201 * Walk the mode list for @connector, clearing the preferred status
202 * on existing modes and setting it anew for the right mode ala @quirks.
203 */
204static void edid_fixup_preferred(struct drm_connector *connector,
205 u32 quirks)
206{
207 struct drm_display_mode *t, *cur_mode, *preferred_mode;
208 int target_refresh = 0;
209
210 if (list_empty(&connector->probed_modes))
211 return;
212
213 if (quirks & EDID_QUIRK_PREFER_LARGE_60)
214 target_refresh = 60;
215 if (quirks & EDID_QUIRK_PREFER_LARGE_75)
216 target_refresh = 75;
217
218 preferred_mode = list_first_entry(&connector->probed_modes,
219 struct drm_display_mode, head);
220
221 list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
222 cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
223
224 if (cur_mode == preferred_mode)
225 continue;
226
227 /* Largest mode is preferred */
228 if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
229 preferred_mode = cur_mode;
230
231 /* At a given size, try to get closest to target refresh */
232 if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
233 MODE_REFRESH_DIFF(cur_mode, target_refresh) <
234 MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
235 preferred_mode = cur_mode;
236 }
237 }
238
239 preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
240}
241
242/**
243 * drm_mode_std - convert standard mode info (width, height, refresh) into mode
244 * @t: standard timing params
245 *
246 * Take the standard timing params (in this case width, aspect, and refresh)
247 * and convert them into a real mode using CVT.
248 *
249 * Punts for now, but should eventually use the FB layer's CVT based mode
250 * generation code.
251 */
252struct drm_display_mode *drm_mode_std(struct drm_device *dev,
253 struct std_timing *t)
254{
255 struct drm_display_mode *mode;
256 int hsize = t->hsize * 8 + 248, vsize;
257
258 mode = drm_mode_create(dev);
259 if (!mode)
260 return NULL;
261
262 if (t->aspect_ratio == 0)
263 vsize = (hsize * 10) / 16;
264 else if (t->aspect_ratio == 1)
265 vsize = (hsize * 3) / 4;
266 else if (t->aspect_ratio == 2)
267 vsize = (hsize * 4) / 5;
268 else
269 vsize = (hsize * 9) / 16;
270
271 drm_mode_set_name(mode);
272
273 return mode;
274}
275
276/**
277 * drm_mode_detailed - create a new mode from an EDID detailed timing section
278 * @dev: DRM device (needed to create new mode)
279 * @edid: EDID block
280 * @timing: EDID detailed timing info
281 * @quirks: quirks to apply
282 *
283 * An EDID detailed timing block contains enough info for us to create and
284 * return a new struct drm_display_mode.
285 */
286static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
287 struct edid *edid,
288 struct detailed_timing *timing,
289 u32 quirks)
290{
291 struct drm_display_mode *mode;
292 struct detailed_pixel_timing *pt = &timing->data.pixel_data;
293
294 if (pt->stereo) {
295 printk(KERN_WARNING "stereo mode not supported\n");
296 return NULL;
297 }
298 if (!pt->separate_sync) {
299 printk(KERN_WARNING "integrated sync not supported\n");
300 return NULL;
301 }
302
303 mode = drm_mode_create(dev);
304 if (!mode)
305 return NULL;
306
307 mode->type = DRM_MODE_TYPE_DRIVER;
308
309 if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
310 timing->pixel_clock = 1088;
311
312 mode->clock = timing->pixel_clock * 10;
313
314 mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo;
315 mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) |
316 pt->hsync_offset_lo);
317 mode->hsync_end = mode->hsync_start +
318 ((pt->hsync_pulse_width_hi << 8) |
319 pt->hsync_pulse_width_lo);
320 mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo);
321
322 mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo;
323 mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) |
324 pt->vsync_offset_lo);
325 mode->vsync_end = mode->vsync_start +
326 ((pt->vsync_pulse_width_hi << 8) |
327 pt->vsync_pulse_width_lo);
328 mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
329
330 drm_mode_set_name(mode);
331
332 if (pt->interlaced)
333 mode->flags |= DRM_MODE_FLAG_INTERLACE;
334
335 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
336 pt->hsync_positive = 1;
337 pt->vsync_positive = 1;
338 }
339
340 mode->flags |= pt->hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
341 mode->flags |= pt->vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
342
343 mode->width_mm = pt->width_mm_lo | (pt->width_mm_hi << 8);
344 mode->height_mm = pt->height_mm_lo | (pt->height_mm_hi << 8);
345
346 if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
347 mode->width_mm *= 10;
348 mode->height_mm *= 10;
349 }
350
351 if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
352 mode->width_mm = edid->width_cm * 10;
353 mode->height_mm = edid->height_cm * 10;
354 }
355
356 return mode;
357}
358
359/*
360 * Detailed mode info for the EDID "established modes" data to use.
361 */
362static struct drm_display_mode edid_est_modes[] = {
363 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
364 968, 1056, 0, 600, 601, 605, 628, 0,
365 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
366 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
367 896, 1024, 0, 600, 601, 603, 625, 0,
368 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
369 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
370 720, 840, 0, 480, 481, 484, 500, 0,
371 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
372 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
373 704, 832, 0, 480, 489, 491, 520, 0,
374 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
375 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
376 768, 864, 0, 480, 483, 486, 525, 0,
377 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
378 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
379 752, 800, 0, 480, 490, 492, 525, 0,
380 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
381 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
382 846, 900, 0, 400, 421, 423, 449, 0,
383 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
384 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
385 846, 900, 0, 400, 412, 414, 449, 0,
386 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
387 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
388 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
389 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
390 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
391 1136, 1312, 0, 768, 769, 772, 800, 0,
392 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
393 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
394 1184, 1328, 0, 768, 771, 777, 806, 0,
395 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
396 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
397 1184, 1344, 0, 768, 771, 777, 806, 0,
398 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
399 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
400 1208, 1264, 0, 768, 768, 776, 817, 0,
401 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
402 { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
403 928, 1152, 0, 624, 625, 628, 667, 0,
404 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
405 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
406 896, 1056, 0, 600, 601, 604, 625, 0,
407 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
408 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
409 976, 1040, 0, 600, 637, 643, 666, 0,
410 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
411 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
412 1344, 1600, 0, 864, 865, 868, 900, 0,
413 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
414};
415
416#define EDID_EST_TIMINGS 16
417#define EDID_STD_TIMINGS 8
418#define EDID_DETAILED_TIMINGS 4
419
420/**
421 * add_established_modes - get est. modes from EDID and add them
422 * @edid: EDID block to scan
423 *
424 * Each EDID block contains a bitmap of the supported "established modes" list
425 * (defined above). Tease them out and add them to the global modes list.
426 */
427static int add_established_modes(struct drm_connector *connector, struct edid *edid)
428{
429 struct drm_device *dev = connector->dev;
430 unsigned long est_bits = edid->established_timings.t1 |
431 (edid->established_timings.t2 << 8) |
432 ((edid->established_timings.mfg_rsvd & 0x80) << 9);
433 int i, modes = 0;
434
435 for (i = 0; i <= EDID_EST_TIMINGS; i++)
436 if (est_bits & (1<<i)) {
437 struct drm_display_mode *newmode;
438 newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
439 if (newmode) {
440 drm_mode_probed_add(connector, newmode);
441 modes++;
442 }
443 }
444
445 return modes;
446}
447
448/**
449 * add_standard_modes - get std. modes from EDID and add them
450 * @edid: EDID block to scan
451 *
452 * Standard modes can be calculated using the CVT standard. Grab them from
453 * @edid, calculate them, and add them to the list.
454 */
455static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
456{
457 struct drm_device *dev = connector->dev;
458 int i, modes = 0;
459
460 for (i = 0; i < EDID_STD_TIMINGS; i++) {
461 struct std_timing *t = &edid->standard_timings[i];
462 struct drm_display_mode *newmode;
463
464 /* If std timings bytes are 1, 1 it's empty */
465 if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1)
466 continue;
467
468 newmode = drm_mode_std(dev, &edid->standard_timings[i]);
469 if (newmode) {
470 drm_mode_probed_add(connector, newmode);
471 modes++;
472 }
473 }
474
475 return modes;
476}
477
478/**
479 * add_detailed_modes - get detailed mode info from EDID data
480 * @connector: attached connector
481 * @edid: EDID block to scan
482 * @quirks: quirks to apply
483 *
484 * Some of the detailed timing sections may contain mode information. Grab
485 * it and add it to the list.
486 */
487static int add_detailed_info(struct drm_connector *connector,
488 struct edid *edid, u32 quirks)
489{
490 struct drm_device *dev = connector->dev;
491 int i, j, modes = 0;
492
493 for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
494 struct detailed_timing *timing = &edid->detailed_timings[i];
495 struct detailed_non_pixel *data = &timing->data.other_data;
496 struct drm_display_mode *newmode;
497
498 /* EDID up to and including 1.2 may put monitor info here */
499 if (edid->version == 1 && edid->revision < 3)
500 continue;
501
502 /* Detailed mode timing */
503 if (timing->pixel_clock) {
504 newmode = drm_mode_detailed(dev, edid, timing, quirks);
505 if (!newmode)
506 continue;
507
508 /* First detailed mode is preferred */
509 if (i == 0 && edid->preferred_timing)
510 newmode->type |= DRM_MODE_TYPE_PREFERRED;
511 drm_mode_probed_add(connector, newmode);
512
513 modes++;
514 continue;
515 }
516
517 /* Other timing or info */
518 switch (data->type) {
519 case EDID_DETAIL_MONITOR_SERIAL:
520 break;
521 case EDID_DETAIL_MONITOR_STRING:
522 break;
523 case EDID_DETAIL_MONITOR_RANGE:
524 /* Get monitor range data */
525 break;
526 case EDID_DETAIL_MONITOR_NAME:
527 break;
528 case EDID_DETAIL_MONITOR_CPDATA:
529 break;
530 case EDID_DETAIL_STD_MODES:
531 /* Five modes per detailed section */
532 for (j = 0; j < 5; i++) {
533 struct std_timing *std;
534 struct drm_display_mode *newmode;
535
536 std = &data->data.timings[j];
537 newmode = drm_mode_std(dev, std);
538 if (newmode) {
539 drm_mode_probed_add(connector, newmode);
540 modes++;
541 }
542 }
543 break;
544 default:
545 break;
546 }
547 }
548
549 return modes;
550}
551
552#define DDC_ADDR 0x50
553
554unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter)
555{
556 unsigned char start = 0x0;
557 unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
558 struct i2c_msg msgs[] = {
559 {
560 .addr = DDC_ADDR,
561 .flags = 0,
562 .len = 1,
563 .buf = &start,
564 }, {
565 .addr = DDC_ADDR,
566 .flags = I2C_M_RD,
567 .len = EDID_LENGTH,
568 .buf = buf,
569 }
570 };
571
572 if (!buf) {
573 dev_warn(&adapter->dev, "unable to allocate memory for EDID "
574 "block.\n");
575 return NULL;
576 }
577
578 if (i2c_transfer(adapter, msgs, 2) == 2)
579 return buf;
580
581 dev_info(&adapter->dev, "unable to read EDID block.\n");
582 kfree(buf);
583 return NULL;
584}
585EXPORT_SYMBOL(drm_do_probe_ddc_edid);
586
587static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
588{
589 struct i2c_algo_bit_data *algo_data = adapter->algo_data;
590 unsigned char *edid = NULL;
591 int i, j;
592
593 algo_data->setscl(algo_data->data, 1);
594
595 for (i = 0; i < 1; i++) {
596 /* For some old monitors we need the
597 * following process to initialize/stop DDC
598 */
599 algo_data->setsda(algo_data->data, 1);
600 msleep(13);
601
602 algo_data->setscl(algo_data->data, 1);
603 for (j = 0; j < 5; j++) {
604 msleep(10);
605 if (algo_data->getscl(algo_data->data))
606 break;
607 }
608 if (j == 5)
609 continue;
610
611 algo_data->setsda(algo_data->data, 0);
612 msleep(15);
613 algo_data->setscl(algo_data->data, 0);
614 msleep(15);
615 algo_data->setsda(algo_data->data, 1);
616 msleep(15);
617
618 /* Do the real work */
619 edid = drm_do_probe_ddc_edid(adapter);
620 algo_data->setsda(algo_data->data, 0);
621 algo_data->setscl(algo_data->data, 0);
622 msleep(15);
623
624 algo_data->setscl(algo_data->data, 1);
625 for (j = 0; j < 10; j++) {
626 msleep(10);
627 if (algo_data->getscl(algo_data->data))
628 break;
629 }
630
631 algo_data->setsda(algo_data->data, 1);
632 msleep(15);
633 algo_data->setscl(algo_data->data, 0);
634 algo_data->setsda(algo_data->data, 0);
635 if (edid)
636 break;
637 }
638 /* Release the DDC lines when done or the Apple Cinema HD display
639 * will switch off
640 */
641 algo_data->setsda(algo_data->data, 1);
642 algo_data->setscl(algo_data->data, 1);
643
644 return edid;
645}
646
647/**
648 * drm_get_edid - get EDID data, if available
649 * @connector: connector we're probing
650 * @adapter: i2c adapter to use for DDC
651 *
652 * Poke the given connector's i2c channel to grab EDID data if possible.
653 *
654 * Return edid data or NULL if we couldn't find any.
655 */
656struct edid *drm_get_edid(struct drm_connector *connector,
657 struct i2c_adapter *adapter)
658{
659 struct edid *edid;
660
661 edid = (struct edid *)drm_ddc_read(adapter);
662 if (!edid) {
663 dev_warn(&connector->dev->pdev->dev, "%s: no EDID data\n",
664 drm_get_connector_name(connector));
665 return NULL;
666 }
667 if (!edid_is_valid(edid)) {
668 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
669 drm_get_connector_name(connector));
670 kfree(edid);
671 return NULL;
672 }
673
674 connector->display_info.raw_edid = (char *)edid;
675
676 return edid;
677}
678EXPORT_SYMBOL(drm_get_edid);
679
680/**
681 * drm_add_edid_modes - add modes from EDID data, if available
682 * @connector: connector we're probing
683 * @edid: edid data
684 *
685 * Add the specified modes to the connector's mode list.
686 *
687 * Return number of modes added or 0 if we couldn't find any.
688 */
689int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
690{
691 int num_modes = 0;
692 u32 quirks;
693
694 if (edid == NULL) {
695 return 0;
696 }
697 if (!edid_is_valid(edid)) {
698 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
699 drm_get_connector_name(connector));
700 return 0;
701 }
702
703 quirks = edid_get_quirks(edid);
704
705 num_modes += add_established_modes(connector, edid);
706 num_modes += add_standard_modes(connector, edid);
707 num_modes += add_detailed_info(connector, edid, quirks);
708
709 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
710 edid_fixup_preferred(connector, quirks);
711
712 connector->display_info.serration_vsync = edid->serration_vsync;
713 connector->display_info.sync_on_green = edid->sync_on_green;
714 connector->display_info.composite_sync = edid->composite_sync;
715 connector->display_info.separate_syncs = edid->separate_syncs;
716 connector->display_info.blank_to_black = edid->blank_to_black;
717 connector->display_info.video_level = edid->video_level;
718 connector->display_info.digital = edid->digital;
719 connector->display_info.width_mm = edid->width_cm * 10;
720 connector->display_info.height_mm = edid->height_cm * 10;
721 connector->display_info.gamma = edid->gamma;
722 connector->display_info.gtf_supported = edid->default_gtf;
723 connector->display_info.standard_color = edid->standard_color;
724 connector->display_info.display_type = edid->display_type;
725 connector->display_info.active_off_supported = edid->pm_active_off;
726 connector->display_info.suspend_supported = edid->pm_suspend;
727 connector->display_info.standby_supported = edid->pm_standby;
728 connector->display_info.gamma = edid->gamma;
729
730 return num_modes;
731}
732EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 78eeed5caaff..3733e36d135e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -35,7 +35,6 @@
35 */ 35 */
36 36
37#include "drmP.h" 37#include "drmP.h"
38#include "drm_sarea.h"
39#include <linux/poll.h> 38#include <linux/poll.h>
40#include <linux/smp_lock.h> 39#include <linux/smp_lock.h>
41 40
@@ -44,10 +43,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
44 43
45static int drm_setup(struct drm_device * dev) 44static int drm_setup(struct drm_device * dev)
46{ 45{
47 drm_local_map_t *map;
48 int i; 46 int i;
49 int ret; 47 int ret;
50 u32 sareapage;
51 48
52 if (dev->driver->firstopen) { 49 if (dev->driver->firstopen) {
53 ret = dev->driver->firstopen(dev); 50 ret = dev->driver->firstopen(dev);
@@ -55,20 +52,14 @@ static int drm_setup(struct drm_device * dev)
55 return ret; 52 return ret;
56 } 53 }
57 54
58 dev->magicfree.next = NULL;
59
60 /* prebuild the SAREA */
61 sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE);
62 i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
63 if (i != 0)
64 return i;
65
66 atomic_set(&dev->ioctl_count, 0); 55 atomic_set(&dev->ioctl_count, 0);
67 atomic_set(&dev->vma_count, 0); 56 atomic_set(&dev->vma_count, 0);
68 dev->buf_use = 0;
69 atomic_set(&dev->buf_alloc, 0);
70 57
71 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { 58 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
59 !drm_core_check_feature(dev, DRIVER_MODESET)) {
60 dev->buf_use = 0;
61 atomic_set(&dev->buf_alloc, 0);
62
72 i = drm_dma_setup(dev); 63 i = drm_dma_setup(dev);
73 if (i < 0) 64 if (i < 0)
74 return i; 65 return i;
@@ -77,16 +68,12 @@ static int drm_setup(struct drm_device * dev)
77 for (i = 0; i < ARRAY_SIZE(dev->counts); i++) 68 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
78 atomic_set(&dev->counts[i], 0); 69 atomic_set(&dev->counts[i], 0);
79 70
80 drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
81 INIT_LIST_HEAD(&dev->magicfree);
82
83 dev->sigdata.lock = NULL; 71 dev->sigdata.lock = NULL;
84 init_waitqueue_head(&dev->lock.lock_queue); 72
85 dev->queue_count = 0; 73 dev->queue_count = 0;
86 dev->queue_reserved = 0; 74 dev->queue_reserved = 0;
87 dev->queue_slots = 0; 75 dev->queue_slots = 0;
88 dev->queuelist = NULL; 76 dev->queuelist = NULL;
89 dev->irq_enabled = 0;
90 dev->context_flag = 0; 77 dev->context_flag = 0;
91 dev->interrupt_flag = 0; 78 dev->interrupt_flag = 0;
92 dev->dma_flag = 0; 79 dev->dma_flag = 0;
@@ -147,10 +134,20 @@ int drm_open(struct inode *inode, struct file *filp)
147 spin_lock(&dev->count_lock); 134 spin_lock(&dev->count_lock);
148 if (!dev->open_count++) { 135 if (!dev->open_count++) {
149 spin_unlock(&dev->count_lock); 136 spin_unlock(&dev->count_lock);
150 return drm_setup(dev); 137 retcode = drm_setup(dev);
138 goto out;
151 } 139 }
152 spin_unlock(&dev->count_lock); 140 spin_unlock(&dev->count_lock);
153 } 141 }
142out:
143 mutex_lock(&dev->struct_mutex);
144 if (minor->type == DRM_MINOR_LEGACY) {
145 BUG_ON((dev->dev_mapping != NULL) &&
146 (dev->dev_mapping != inode->i_mapping));
147 if (dev->dev_mapping == NULL)
148 dev->dev_mapping = inode->i_mapping;
149 }
150 mutex_unlock(&dev->struct_mutex);
154 151
155 return retcode; 152 return retcode;
156} 153}
@@ -255,6 +252,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
255 priv->lock_count = 0; 252 priv->lock_count = 0;
256 253
257 INIT_LIST_HEAD(&priv->lhead); 254 INIT_LIST_HEAD(&priv->lhead);
255 INIT_LIST_HEAD(&priv->fbs);
258 256
259 if (dev->driver->driver_features & DRIVER_GEM) 257 if (dev->driver->driver_features & DRIVER_GEM)
260 drm_gem_open(dev, priv); 258 drm_gem_open(dev, priv);
@@ -265,10 +263,42 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
265 goto out_free; 263 goto out_free;
266 } 264 }
267 265
266
267 /* if there is no current master make this fd it */
268 mutex_lock(&dev->struct_mutex); 268 mutex_lock(&dev->struct_mutex);
269 if (list_empty(&dev->filelist)) 269 if (!priv->minor->master) {
270 priv->master = 1; 270 /* create a new master */
271 priv->minor->master = drm_master_create(priv->minor);
272 if (!priv->minor->master) {
273 ret = -ENOMEM;
274 goto out_free;
275 }
271 276
277 priv->is_master = 1;
278 /* take another reference for the copy in the local file priv */
279 priv->master = drm_master_get(priv->minor->master);
280
281 priv->authenticated = 1;
282
283 mutex_unlock(&dev->struct_mutex);
284 if (dev->driver->master_create) {
285 ret = dev->driver->master_create(dev, priv->master);
286 if (ret) {
287 mutex_lock(&dev->struct_mutex);
288 /* drop both references if this fails */
289 drm_master_put(&priv->minor->master);
290 drm_master_put(&priv->master);
291 mutex_unlock(&dev->struct_mutex);
292 goto out_free;
293 }
294 }
295 } else {
296 /* get a reference to the master */
297 priv->master = drm_master_get(priv->minor->master);
298 mutex_unlock(&dev->struct_mutex);
299 }
300
301 mutex_lock(&dev->struct_mutex);
272 list_add(&priv->lhead, &dev->filelist); 302 list_add(&priv->lhead, &dev->filelist);
273 mutex_unlock(&dev->struct_mutex); 303 mutex_unlock(&dev->struct_mutex);
274 304
@@ -314,6 +344,74 @@ int drm_fasync(int fd, struct file *filp, int on)
314} 344}
315EXPORT_SYMBOL(drm_fasync); 345EXPORT_SYMBOL(drm_fasync);
316 346
347/*
348 * Reclaim locked buffers; note that this may be a bad idea if the current
349 * context doesn't have the hw lock...
350 */
351static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
352{
353 struct drm_file *file_priv = f->private_data;
354
355 if (drm_i_have_hw_lock(dev, file_priv)) {
356 dev->driver->reclaim_buffers_locked(dev, file_priv);
357 } else {
358 unsigned long _end = jiffies + 3 * DRM_HZ;
359 int locked = 0;
360
361 drm_idlelock_take(&file_priv->master->lock);
362
363 /*
364 * Wait for a while.
365 */
366 do {
367 spin_lock_bh(&file_priv->master->lock.spinlock);
368 locked = file_priv->master->lock.idle_has_lock;
369 spin_unlock_bh(&file_priv->master->lock.spinlock);
370 if (locked)
371 break;
372 schedule();
373 } while (!time_after_eq(jiffies, _end));
374
375 if (!locked) {
376 DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
377 "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
378 "\tI will go on reclaiming the buffers anyway.\n");
379 }
380
381 dev->driver->reclaim_buffers_locked(dev, file_priv);
382 drm_idlelock_release(&file_priv->master->lock);
383 }
384}
385
386static void drm_master_release(struct drm_device *dev, struct file *filp)
387{
388 struct drm_file *file_priv = filp->private_data;
389
390 if (dev->driver->reclaim_buffers_locked &&
391 file_priv->master->lock.hw_lock)
392 drm_reclaim_locked_buffers(dev, filp);
393
394 if (dev->driver->reclaim_buffers_idlelocked &&
395 file_priv->master->lock.hw_lock) {
396 drm_idlelock_take(&file_priv->master->lock);
397 dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
398 drm_idlelock_release(&file_priv->master->lock);
399 }
400
401
402 if (drm_i_have_hw_lock(dev, file_priv)) {
403 DRM_DEBUG("File %p released, freeing lock for context %d\n",
404 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
405 drm_lock_free(&file_priv->master->lock,
406 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
407 }
408
409 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
410 !dev->driver->reclaim_buffers_locked) {
411 dev->driver->reclaim_buffers(dev, file_priv);
412 }
413}
414
317/** 415/**
318 * Release file. 416 * Release file.
319 * 417 *
@@ -348,60 +446,9 @@ int drm_release(struct inode *inode, struct file *filp)
348 (long)old_encode_dev(file_priv->minor->device), 446 (long)old_encode_dev(file_priv->minor->device),
349 dev->open_count); 447 dev->open_count);
350 448
351 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { 449 /* if the master has gone away we can't do anything with the lock */
352 if (drm_i_have_hw_lock(dev, file_priv)) { 450 if (file_priv->minor->master)
353 dev->driver->reclaim_buffers_locked(dev, file_priv); 451 drm_master_release(dev, filp);
354 } else {
355 unsigned long endtime = jiffies + 3 * DRM_HZ;
356 int locked = 0;
357
358 drm_idlelock_take(&dev->lock);
359
360 /*
361 * Wait for a while.
362 */
363
364 do{
365 spin_lock_bh(&dev->lock.spinlock);
366 locked = dev->lock.idle_has_lock;
367 spin_unlock_bh(&dev->lock.spinlock);
368 if (locked)
369 break;
370 schedule();
371 } while (!time_after_eq(jiffies, endtime));
372
373 if (!locked) {
374 DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
375 "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
376 "\tI will go on reclaiming the buffers anyway.\n");
377 }
378
379 dev->driver->reclaim_buffers_locked(dev, file_priv);
380 drm_idlelock_release(&dev->lock);
381 }
382 }
383
384 if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
385
386 drm_idlelock_take(&dev->lock);
387 dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
388 drm_idlelock_release(&dev->lock);
389
390 }
391
392 if (drm_i_have_hw_lock(dev, file_priv)) {
393 DRM_DEBUG("File %p released, freeing lock for context %d\n",
394 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
395
396 drm_lock_free(&dev->lock,
397 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
398 }
399
400
401 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
402 !dev->driver->reclaim_buffers_locked) {
403 dev->driver->reclaim_buffers(dev, file_priv);
404 }
405 452
406 if (dev->driver->driver_features & DRIVER_GEM) 453 if (dev->driver->driver_features & DRIVER_GEM)
407 drm_gem_release(dev, file_priv); 454 drm_gem_release(dev, file_priv);
@@ -428,12 +475,24 @@ int drm_release(struct inode *inode, struct file *filp)
428 mutex_unlock(&dev->ctxlist_mutex); 475 mutex_unlock(&dev->ctxlist_mutex);
429 476
430 mutex_lock(&dev->struct_mutex); 477 mutex_lock(&dev->struct_mutex);
431 if (file_priv->remove_auth_on_close == 1) { 478
479 if (file_priv->is_master) {
432 struct drm_file *temp; 480 struct drm_file *temp;
481 list_for_each_entry(temp, &dev->filelist, lhead) {
482 if ((temp->master == file_priv->master) &&
483 (temp != file_priv))
484 temp->authenticated = 0;
485 }
433 486
434 list_for_each_entry(temp, &dev->filelist, lhead) 487 if (file_priv->minor->master == file_priv->master) {
435 temp->authenticated = 0; 488 /* drop the reference held my the minor */
489 drm_master_put(&file_priv->minor->master);
490 }
436 } 491 }
492
493 /* drop the reference held my the file priv */
494 drm_master_put(&file_priv->master);
495 file_priv->is_master = 0;
437 list_del(&file_priv->lhead); 496 list_del(&file_priv->lhead);
438 mutex_unlock(&dev->struct_mutex); 497 mutex_unlock(&dev->struct_mutex);
439 498
@@ -448,9 +507,9 @@ int drm_release(struct inode *inode, struct file *filp)
448 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); 507 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
449 spin_lock(&dev->count_lock); 508 spin_lock(&dev->count_lock);
450 if (!--dev->open_count) { 509 if (!--dev->open_count) {
451 if (atomic_read(&dev->ioctl_count) || dev->blocked) { 510 if (atomic_read(&dev->ioctl_count)) {
452 DRM_ERROR("Device busy: %d %d\n", 511 DRM_ERROR("Device busy: %d\n",
453 atomic_read(&dev->ioctl_count), dev->blocked); 512 atomic_read(&dev->ioctl_count));
454 spin_unlock(&dev->count_lock); 513 spin_unlock(&dev->count_lock);
455 unlock_kernel(); 514 unlock_kernel();
456 return -EBUSY; 515 return -EBUSY;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ccd1afdede02..9da581452874 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -64,6 +64,13 @@
64 * up at a later date, and as our interface with shmfs for memory allocation. 64 * up at a later date, and as our interface with shmfs for memory allocation.
65 */ 65 */
66 66
67/*
68 * We make up offsets for buffer objects so we can recognize them at
69 * mmap time.
70 */
71#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
72#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
73
67/** 74/**
68 * Initialize the GEM device fields 75 * Initialize the GEM device fields
69 */ 76 */
@@ -71,6 +78,8 @@
71int 78int
72drm_gem_init(struct drm_device *dev) 79drm_gem_init(struct drm_device *dev)
73{ 80{
81 struct drm_gem_mm *mm;
82
74 spin_lock_init(&dev->object_name_lock); 83 spin_lock_init(&dev->object_name_lock);
75 idr_init(&dev->object_name_idr); 84 idr_init(&dev->object_name_idr);
76 atomic_set(&dev->object_count, 0); 85 atomic_set(&dev->object_count, 0);
@@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev)
79 atomic_set(&dev->pin_memory, 0); 88 atomic_set(&dev->pin_memory, 0);
80 atomic_set(&dev->gtt_count, 0); 89 atomic_set(&dev->gtt_count, 0);
81 atomic_set(&dev->gtt_memory, 0); 90 atomic_set(&dev->gtt_memory, 0);
91
92 mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM);
93 if (!mm) {
94 DRM_ERROR("out of memory\n");
95 return -ENOMEM;
96 }
97
98 dev->mm_private = mm;
99
100 if (drm_ht_create(&mm->offset_hash, 19)) {
101 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
102 return -ENOMEM;
103 }
104
105 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
106 DRM_FILE_PAGE_OFFSET_SIZE)) {
107 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
108 drm_ht_remove(&mm->offset_hash);
109 return -ENOMEM;
110 }
111
82 return 0; 112 return 0;
83} 113}
84 114
115void
116drm_gem_destroy(struct drm_device *dev)
117{
118 struct drm_gem_mm *mm = dev->mm_private;
119
120 drm_mm_takedown(&mm->offset_manager);
121 drm_ht_remove(&mm->offset_hash);
122 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
123 dev->mm_private = NULL;
124}
125
85/** 126/**
86 * Allocate a GEM object of the specified size with shmfs backing store 127 * Allocate a GEM object of the specified size with shmfs backing store
87 */ 128 */
@@ -419,3 +460,73 @@ drm_gem_object_handle_free(struct kref *kref)
419} 460}
420EXPORT_SYMBOL(drm_gem_object_handle_free); 461EXPORT_SYMBOL(drm_gem_object_handle_free);
421 462
463/**
464 * drm_gem_mmap - memory map routine for GEM objects
465 * @filp: DRM file pointer
466 * @vma: VMA for the area to be mapped
467 *
468 * If a driver supports GEM object mapping, mmap calls on the DRM file
469 * descriptor will end up here.
470 *
471 * If we find the object based on the offset passed in (vma->vm_pgoff will
472 * contain the fake offset we created when the GTT map ioctl was called on
473 * the object), we set up the driver fault handler so that any accesses
474 * to the object can be trapped, to perform migration, GTT binding, surface
475 * register allocation, or performance monitoring.
476 */
477int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
478{
479 struct drm_file *priv = filp->private_data;
480 struct drm_device *dev = priv->minor->dev;
481 struct drm_gem_mm *mm = dev->mm_private;
482 struct drm_map *map = NULL;
483 struct drm_gem_object *obj;
484 struct drm_hash_item *hash;
485 unsigned long prot;
486 int ret = 0;
487
488 mutex_lock(&dev->struct_mutex);
489
490 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
491 mutex_unlock(&dev->struct_mutex);
492 return drm_mmap(filp, vma);
493 }
494
495 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
496 if (!map ||
497 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
498 ret = -EPERM;
499 goto out_unlock;
500 }
501
502 /* Check for valid size. */
503 if (map->size < vma->vm_end - vma->vm_start) {
504 ret = -EINVAL;
505 goto out_unlock;
506 }
507
508 obj = map->handle;
509 if (!obj->dev->driver->gem_vm_ops) {
510 ret = -EINVAL;
511 goto out_unlock;
512 }
513
514 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
515 vma->vm_ops = obj->dev->driver->gem_vm_ops;
516 vma->vm_private_data = map->handle;
517 /* FIXME: use pgprot_writecombine when available */
518 prot = pgprot_val(vma->vm_page_prot);
519#ifdef CONFIG_X86
520 prot |= _PAGE_CACHE_WC;
521#endif
522 vma->vm_page_prot = __pgprot(prot);
523
524 vma->vm_file = filp; /* Needed for drm_vm_open() */
525 drm_vm_open_locked(vma);
526
527out_unlock:
528 mutex_unlock(&dev->struct_mutex);
529
530 return ret;
531}
532EXPORT_SYMBOL(drm_gem_mmap);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 33160673a7b7..af539f7d87dd 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
127 } 127 }
128 return 0; 128 return 0;
129} 129}
130EXPORT_SYMBOL(drm_ht_insert_item);
130 131
131/* 132/*
132 * Just insert an item and return any "bits" bit key that hasn't been 133 * Just insert an item and return any "bits" bit key that hasn't been
@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
188 ht->fill--; 189 ht->fill--;
189 return 0; 190 return 0;
190} 191}
192EXPORT_SYMBOL(drm_ht_remove_item);
191 193
192void drm_ht_remove(struct drm_open_hash *ht) 194void drm_ht_remove(struct drm_open_hash *ht)
193{ 195{
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 16829fb3089d..1fad76289e66 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -53,12 +53,13 @@ int drm_getunique(struct drm_device *dev, void *data,
53 struct drm_file *file_priv) 53 struct drm_file *file_priv)
54{ 54{
55 struct drm_unique *u = data; 55 struct drm_unique *u = data;
56 struct drm_master *master = file_priv->master;
56 57
57 if (u->unique_len >= dev->unique_len) { 58 if (u->unique_len >= master->unique_len) {
58 if (copy_to_user(u->unique, dev->unique, dev->unique_len)) 59 if (copy_to_user(u->unique, master->unique, master->unique_len))
59 return -EFAULT; 60 return -EFAULT;
60 } 61 }
61 u->unique_len = dev->unique_len; 62 u->unique_len = master->unique_len;
62 63
63 return 0; 64 return 0;
64} 65}
@@ -81,36 +82,38 @@ int drm_setunique(struct drm_device *dev, void *data,
81 struct drm_file *file_priv) 82 struct drm_file *file_priv)
82{ 83{
83 struct drm_unique *u = data; 84 struct drm_unique *u = data;
85 struct drm_master *master = file_priv->master;
84 int domain, bus, slot, func, ret; 86 int domain, bus, slot, func, ret;
85 87
86 if (dev->unique_len || dev->unique) 88 if (master->unique_len || master->unique)
87 return -EBUSY; 89 return -EBUSY;
88 90
89 if (!u->unique_len || u->unique_len > 1024) 91 if (!u->unique_len || u->unique_len > 1024)
90 return -EINVAL; 92 return -EINVAL;
91 93
92 dev->unique_len = u->unique_len; 94 master->unique_len = u->unique_len;
93 dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER); 95 master->unique_size = u->unique_len + 1;
94 if (!dev->unique) 96 master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER);
97 if (!master->unique)
95 return -ENOMEM; 98 return -ENOMEM;
96 if (copy_from_user(dev->unique, u->unique, dev->unique_len)) 99 if (copy_from_user(master->unique, u->unique, master->unique_len))
97 return -EFAULT; 100 return -EFAULT;
98 101
99 dev->unique[dev->unique_len] = '\0'; 102 master->unique[master->unique_len] = '\0';
100 103
101 dev->devname = 104 dev->devname =
102 drm_alloc(strlen(dev->driver->pci_driver.name) + 105 drm_alloc(strlen(dev->driver->pci_driver.name) +
103 strlen(dev->unique) + 2, DRM_MEM_DRIVER); 106 strlen(master->unique) + 2, DRM_MEM_DRIVER);
104 if (!dev->devname) 107 if (!dev->devname)
105 return -ENOMEM; 108 return -ENOMEM;
106 109
107 sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, 110 sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
108 dev->unique); 111 master->unique);
109 112
110 /* Return error if the busid submitted doesn't match the device's actual 113 /* Return error if the busid submitted doesn't match the device's actual
111 * busid. 114 * busid.
112 */ 115 */
113 ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); 116 ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
114 if (ret != 3) 117 if (ret != 3)
115 return -EINVAL; 118 return -EINVAL;
116 domain = bus >> 8; 119 domain = bus >> 8;
@@ -125,34 +128,38 @@ int drm_setunique(struct drm_device *dev, void *data,
125 return 0; 128 return 0;
126} 129}
127 130
128static int drm_set_busid(struct drm_device * dev) 131static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
129{ 132{
133 struct drm_master *master = file_priv->master;
130 int len; 134 int len;
131 135
132 if (dev->unique != NULL) 136 if (master->unique != NULL)
133 return 0; 137 return -EBUSY;
134 138
135 dev->unique_len = 40; 139 master->unique_len = 40;
136 dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); 140 master->unique_size = master->unique_len;
137 if (dev->unique == NULL) 141 master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER);
142 if (master->unique == NULL)
138 return -ENOMEM; 143 return -ENOMEM;
139 144
140 len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", 145 len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d",
141 drm_get_pci_domain(dev), dev->pdev->bus->number, 146 drm_get_pci_domain(dev),
147 dev->pdev->bus->number,
142 PCI_SLOT(dev->pdev->devfn), 148 PCI_SLOT(dev->pdev->devfn),
143 PCI_FUNC(dev->pdev->devfn)); 149 PCI_FUNC(dev->pdev->devfn));
144 150 if (len >= master->unique_len)
145 if (len > dev->unique_len) 151 DRM_ERROR("buffer overflow");
146 DRM_ERROR("Unique buffer overflowed\n"); 152 else
153 master->unique_len = len;
147 154
148 dev->devname = 155 dev->devname =
149 drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + 156 drm_alloc(strlen(dev->driver->pci_driver.name) + master->unique_len +
150 2, DRM_MEM_DRIVER); 157 2, DRM_MEM_DRIVER);
151 if (dev->devname == NULL) 158 if (dev->devname == NULL)
152 return -ENOMEM; 159 return -ENOMEM;
153 160
154 sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, 161 sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
155 dev->unique); 162 master->unique);
156 163
157 return 0; 164 return 0;
158} 165}
@@ -276,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
276 for (i = 0; i < dev->counters; i++) { 283 for (i = 0; i < dev->counters; i++) {
277 if (dev->types[i] == _DRM_STAT_LOCK) 284 if (dev->types[i] == _DRM_STAT_LOCK)
278 stats->data[i].value = 285 stats->data[i].value =
279 (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); 286 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
280 else 287 else
281 stats->data[i].value = atomic_read(&dev->counts[i]); 288 stats->data[i].value = atomic_read(&dev->counts[i]);
282 stats->data[i].type = dev->types[i]; 289 stats->data[i].type = dev->types[i];
@@ -318,7 +325,7 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
318 /* 325 /*
319 * Version 1.1 includes tying of DRM to specific device 326 * Version 1.1 includes tying of DRM to specific device
320 */ 327 */
321 drm_set_busid(dev); 328 drm_set_busid(dev, file_priv);
322 } 329 }
323 } 330 }
324 331
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 1e787f894b3c..724e505873cf 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -116,6 +116,9 @@ void drm_vblank_cleanup(struct drm_device *dev)
116 dev->num_crtcs, DRM_MEM_DRIVER); 116 dev->num_crtcs, DRM_MEM_DRIVER);
117 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, 117 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
118 DRM_MEM_DRIVER); 118 DRM_MEM_DRIVER);
119 drm_free(dev->last_vblank_wait,
120 sizeof(*dev->last_vblank_wait) * dev->num_crtcs,
121 DRM_MEM_DRIVER);
119 drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) * 122 drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
120 dev->num_crtcs, DRM_MEM_DRIVER); 123 dev->num_crtcs, DRM_MEM_DRIVER);
121 124
@@ -161,6 +164,11 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
161 if (!dev->last_vblank) 164 if (!dev->last_vblank)
162 goto err; 165 goto err;
163 166
167 dev->last_vblank_wait = drm_calloc(num_crtcs, sizeof(u32),
168 DRM_MEM_DRIVER);
169 if (!dev->last_vblank_wait)
170 goto err;
171
164 dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), 172 dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
165 DRM_MEM_DRIVER); 173 DRM_MEM_DRIVER);
166 if (!dev->vblank_inmodeset) 174 if (!dev->vblank_inmodeset)
@@ -305,6 +313,8 @@ int drm_control(struct drm_device *dev, void *data,
305 case DRM_INST_HANDLER: 313 case DRM_INST_HANDLER:
306 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 314 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
307 return 0; 315 return 0;
316 if (drm_core_check_feature(dev, DRIVER_MODESET))
317 return 0;
308 if (dev->if_version < DRM_IF_VERSION(1, 2) && 318 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
309 ctl->irq != dev->pdev->irq) 319 ctl->irq != dev->pdev->irq)
310 return -EINVAL; 320 return -EINVAL;
@@ -312,6 +322,8 @@ int drm_control(struct drm_device *dev, void *data,
312 case DRM_UNINST_HANDLER: 322 case DRM_UNINST_HANDLER:
313 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 323 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
314 return 0; 324 return 0;
325 if (drm_core_check_feature(dev, DRIVER_MODESET))
326 return 0;
315 return drm_irq_uninstall(dev); 327 return drm_irq_uninstall(dev);
316 default: 328 default:
317 return -EINVAL; 329 return -EINVAL;
@@ -427,6 +439,45 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
427EXPORT_SYMBOL(drm_vblank_put); 439EXPORT_SYMBOL(drm_vblank_put);
428 440
429/** 441/**
442 * drm_vblank_pre_modeset - account for vblanks across mode sets
443 * @dev: DRM device
444 * @crtc: CRTC in question
445 * @post: post or pre mode set?
446 *
447 * Account for vblank events across mode setting events, which will likely
448 * reset the hardware frame counter.
449 */
450void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
451{
452 /*
453 * To avoid all the problems that might happen if interrupts
454 * were enabled/disabled around or between these calls, we just
455 * have the kernel take a reference on the CRTC (just once though
456 * to avoid corrupting the count if multiple, mismatch calls occur),
457 * so that interrupts remain enabled in the interim.
458 */
459 if (!dev->vblank_inmodeset[crtc]) {
460 dev->vblank_inmodeset[crtc] = 1;
461 drm_vblank_get(dev, crtc);
462 }
463}
464EXPORT_SYMBOL(drm_vblank_pre_modeset);
465
466void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
467{
468 unsigned long irqflags;
469
470 if (dev->vblank_inmodeset[crtc]) {
471 spin_lock_irqsave(&dev->vbl_lock, irqflags);
472 dev->vblank_disable_allowed = 1;
473 dev->vblank_inmodeset[crtc] = 0;
474 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
475 drm_vblank_put(dev, crtc);
476 }
477}
478EXPORT_SYMBOL(drm_vblank_post_modeset);
479
480/**
430 * drm_modeset_ctl - handle vblank event counter changes across mode switch 481 * drm_modeset_ctl - handle vblank event counter changes across mode switch
431 * @DRM_IOCTL_ARGS: standard ioctl arguments 482 * @DRM_IOCTL_ARGS: standard ioctl arguments
432 * 483 *
@@ -441,7 +492,6 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
441 struct drm_file *file_priv) 492 struct drm_file *file_priv)
442{ 493{
443 struct drm_modeset_ctl *modeset = data; 494 struct drm_modeset_ctl *modeset = data;
444 unsigned long irqflags;
445 int crtc, ret = 0; 495 int crtc, ret = 0;
446 496
447 /* If drm_vblank_init() hasn't been called yet, just no-op */ 497 /* If drm_vblank_init() hasn't been called yet, just no-op */
@@ -454,28 +504,12 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
454 goto out; 504 goto out;
455 } 505 }
456 506
457 /*
458 * To avoid all the problems that might happen if interrupts
459 * were enabled/disabled around or between these calls, we just
460 * have the kernel take a reference on the CRTC (just once though
461 * to avoid corrupting the count if multiple, mismatch calls occur),
462 * so that interrupts remain enabled in the interim.
463 */
464 switch (modeset->cmd) { 507 switch (modeset->cmd) {
465 case _DRM_PRE_MODESET: 508 case _DRM_PRE_MODESET:
466 if (!dev->vblank_inmodeset[crtc]) { 509 drm_vblank_pre_modeset(dev, crtc);
467 dev->vblank_inmodeset[crtc] = 1;
468 drm_vblank_get(dev, crtc);
469 }
470 break; 510 break;
471 case _DRM_POST_MODESET: 511 case _DRM_POST_MODESET:
472 if (dev->vblank_inmodeset[crtc]) { 512 drm_vblank_post_modeset(dev, crtc);
473 spin_lock_irqsave(&dev->vbl_lock, irqflags);
474 dev->vblank_disable_allowed = 1;
475 dev->vblank_inmodeset[crtc] = 0;
476 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
477 drm_vblank_put(dev, crtc);
478 }
479 break; 513 break;
480 default: 514 default:
481 ret = -EINVAL; 515 ret = -EINVAL;
@@ -616,6 +650,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
616 } else { 650 } else {
617 DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 651 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
618 vblwait->request.sequence, crtc); 652 vblwait->request.sequence, crtc);
653 dev->last_vblank_wait[crtc] = vblwait->request.sequence;
619 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, 654 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
620 ((drm_vblank_count(dev, crtc) 655 ((drm_vblank_count(dev, crtc)
621 - vblwait->request.sequence) <= (1 << 23))); 656 - vblwait->request.sequence) <= (1 << 23)));
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 1cfa72031f8f..46e7b28f0707 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -52,6 +52,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
52{ 52{
53 DECLARE_WAITQUEUE(entry, current); 53 DECLARE_WAITQUEUE(entry, current);
54 struct drm_lock *lock = data; 54 struct drm_lock *lock = data;
55 struct drm_master *master = file_priv->master;
55 int ret = 0; 56 int ret = 0;
56 57
57 ++file_priv->lock_count; 58 ++file_priv->lock_count;
@@ -64,26 +65,27 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
64 65
65 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", 66 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
66 lock->context, task_pid_nr(current), 67 lock->context, task_pid_nr(current),
67 dev->lock.hw_lock->lock, lock->flags); 68 master->lock.hw_lock->lock, lock->flags);
68 69
69 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) 70 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
70 if (lock->context < 0) 71 if (lock->context < 0)
71 return -EINVAL; 72 return -EINVAL;
72 73
73 add_wait_queue(&dev->lock.lock_queue, &entry); 74 add_wait_queue(&master->lock.lock_queue, &entry);
74 spin_lock_bh(&dev->lock.spinlock); 75 spin_lock_bh(&master->lock.spinlock);
75 dev->lock.user_waiters++; 76 master->lock.user_waiters++;
76 spin_unlock_bh(&dev->lock.spinlock); 77 spin_unlock_bh(&master->lock.spinlock);
78
77 for (;;) { 79 for (;;) {
78 __set_current_state(TASK_INTERRUPTIBLE); 80 __set_current_state(TASK_INTERRUPTIBLE);
79 if (!dev->lock.hw_lock) { 81 if (!master->lock.hw_lock) {
80 /* Device has been unregistered */ 82 /* Device has been unregistered */
81 ret = -EINTR; 83 ret = -EINTR;
82 break; 84 break;
83 } 85 }
84 if (drm_lock_take(&dev->lock, lock->context)) { 86 if (drm_lock_take(&master->lock, lock->context)) {
85 dev->lock.file_priv = file_priv; 87 master->lock.file_priv = file_priv;
86 dev->lock.lock_time = jiffies; 88 master->lock.lock_time = jiffies;
87 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 89 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
88 break; /* Got lock */ 90 break; /* Got lock */
89 } 91 }
@@ -95,11 +97,11 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
95 break; 97 break;
96 } 98 }
97 } 99 }
98 spin_lock_bh(&dev->lock.spinlock); 100 spin_lock_bh(&master->lock.spinlock);
99 dev->lock.user_waiters--; 101 master->lock.user_waiters--;
100 spin_unlock_bh(&dev->lock.spinlock); 102 spin_unlock_bh(&master->lock.spinlock);
101 __set_current_state(TASK_RUNNING); 103 __set_current_state(TASK_RUNNING);
102 remove_wait_queue(&dev->lock.lock_queue, &entry); 104 remove_wait_queue(&master->lock.lock_queue, &entry);
103 105
104 DRM_DEBUG("%d %s\n", lock->context, 106 DRM_DEBUG("%d %s\n", lock->context,
105 ret ? "interrupted" : "has lock"); 107 ret ? "interrupted" : "has lock");
@@ -108,14 +110,14 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
108 /* don't set the block all signals on the master process for now 110 /* don't set the block all signals on the master process for now
109 * really probably not the correct answer but lets us debug xkb 111 * really probably not the correct answer but lets us debug xkb
110 * xserver for now */ 112 * xserver for now */
111 if (!file_priv->master) { 113 if (!file_priv->is_master) {
112 sigemptyset(&dev->sigmask); 114 sigemptyset(&dev->sigmask);
113 sigaddset(&dev->sigmask, SIGSTOP); 115 sigaddset(&dev->sigmask, SIGSTOP);
114 sigaddset(&dev->sigmask, SIGTSTP); 116 sigaddset(&dev->sigmask, SIGTSTP);
115 sigaddset(&dev->sigmask, SIGTTIN); 117 sigaddset(&dev->sigmask, SIGTTIN);
116 sigaddset(&dev->sigmask, SIGTTOU); 118 sigaddset(&dev->sigmask, SIGTTOU);
117 dev->sigdata.context = lock->context; 119 dev->sigdata.context = lock->context;
118 dev->sigdata.lock = dev->lock.hw_lock; 120 dev->sigdata.lock = master->lock.hw_lock;
119 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); 121 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
120 } 122 }
121 123
@@ -154,6 +156,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
154int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) 156int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
155{ 157{
156 struct drm_lock *lock = data; 158 struct drm_lock *lock = data;
159 struct drm_master *master = file_priv->master;
157 160
158 if (lock->context == DRM_KERNEL_CONTEXT) { 161 if (lock->context == DRM_KERNEL_CONTEXT) {
159 DRM_ERROR("Process %d using kernel context %d\n", 162 DRM_ERROR("Process %d using kernel context %d\n",
@@ -169,7 +172,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
169 if (dev->driver->kernel_context_switch_unlock) 172 if (dev->driver->kernel_context_switch_unlock)
170 dev->driver->kernel_context_switch_unlock(dev); 173 dev->driver->kernel_context_switch_unlock(dev);
171 else { 174 else {
172 if (drm_lock_free(&dev->lock,lock->context)) { 175 if (drm_lock_free(&master->lock, lock->context)) {
173 /* FIXME: Should really bail out here. */ 176 /* FIXME: Should really bail out here. */
174 } 177 }
175 } 178 }
@@ -379,9 +382,10 @@ EXPORT_SYMBOL(drm_idlelock_release);
379 382
380int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) 383int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
381{ 384{
382 return (file_priv->lock_count && dev->lock.hw_lock && 385 struct drm_master *master = file_priv->master;
383 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && 386 return (file_priv->lock_count && master->lock.hw_lock &&
384 dev->lock.file_priv == file_priv); 387 _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
388 master->lock.file_priv == file_priv);
385} 389}
386 390
387EXPORT_SYMBOL(drm_i_have_hw_lock); 391EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 217ad7dc7076..367c590ffbba 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -296,3 +296,4 @@ void drm_mm_takedown(struct drm_mm * mm)
296 296
297 drm_free(entry, sizeof(*entry), DRM_MEM_MM); 297 drm_free(entry, sizeof(*entry), DRM_MEM_MM);
298} 298}
299EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
new file mode 100644
index 000000000000..c9b80fdd4630
--- /dev/null
+++ b/drivers/gpu/drm/drm_modes.c
@@ -0,0 +1,576 @@
1/*
2 * The list_sort function is (presumably) licensed under the GPL (see the
3 * top level "COPYING" file for details).
4 *
5 * The remainder of this file is:
6 *
7 * Copyright © 1997-2003 by The XFree86 Project, Inc.
8 * Copyright © 2007 Dave Airlie
9 * Copyright © 2007-2008 Intel Corporation
10 * Jesse Barnes <jesse.barnes@intel.com>
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a
13 * copy of this software and associated documentation files (the "Software"),
14 * to deal in the Software without restriction, including without limitation
15 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
16 * and/or sell copies of the Software, and to permit persons to whom the
17 * Software is furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 *
30 * Except as contained in this notice, the name of the copyright holder(s)
31 * and author(s) shall not be used in advertising or otherwise to promote
32 * the sale, use or other dealings in this Software without prior written
33 * authorization from the copyright holder(s) and author(s).
34 */
35
36#include <linux/list.h>
37#include "drmP.h"
38#include "drm.h"
39#include "drm_crtc.h"
40
41/**
42 * drm_mode_debug_printmodeline - debug print a mode
43 * @dev: DRM device
44 * @mode: mode to print
45 *
46 * LOCKING:
47 * None.
48 *
49 * Describe @mode using DRM_DEBUG.
50 */
51void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
52{
53 DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
54 mode->base.id, mode->name, mode->vrefresh, mode->clock,
55 mode->hdisplay, mode->hsync_start,
56 mode->hsync_end, mode->htotal,
57 mode->vdisplay, mode->vsync_start,
58 mode->vsync_end, mode->vtotal, mode->type, mode->flags);
59}
60EXPORT_SYMBOL(drm_mode_debug_printmodeline);
61
62/**
63 * drm_mode_set_name - set the name on a mode
64 * @mode: name will be set in this mode
65 *
66 * LOCKING:
67 * None.
68 *
69 * Set the name of @mode to a standard format.
70 */
71void drm_mode_set_name(struct drm_display_mode *mode)
72{
73 snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
74 mode->vdisplay);
75}
76EXPORT_SYMBOL(drm_mode_set_name);
77
78/**
79 * drm_mode_list_concat - move modes from one list to another
80 * @head: source list
81 * @new: dst list
82 *
83 * LOCKING:
84 * Caller must ensure both lists are locked.
85 *
86 * Move all the modes from @head to @new.
87 */
88void drm_mode_list_concat(struct list_head *head, struct list_head *new)
89{
90
91 struct list_head *entry, *tmp;
92
93 list_for_each_safe(entry, tmp, head) {
94 list_move_tail(entry, new);
95 }
96}
97EXPORT_SYMBOL(drm_mode_list_concat);
98
99/**
100 * drm_mode_width - get the width of a mode
101 * @mode: mode
102 *
103 * LOCKING:
104 * None.
105 *
106 * Return @mode's width (hdisplay) value.
107 *
108 * FIXME: is this needed?
109 *
110 * RETURNS:
111 * @mode->hdisplay
112 */
113int drm_mode_width(struct drm_display_mode *mode)
114{
115 return mode->hdisplay;
116
117}
118EXPORT_SYMBOL(drm_mode_width);
119
120/**
121 * drm_mode_height - get the height of a mode
122 * @mode: mode
123 *
124 * LOCKING:
125 * None.
126 *
127 * Return @mode's height (vdisplay) value.
128 *
129 * FIXME: is this needed?
130 *
131 * RETURNS:
132 * @mode->vdisplay
133 */
134int drm_mode_height(struct drm_display_mode *mode)
135{
136 return mode->vdisplay;
137}
138EXPORT_SYMBOL(drm_mode_height);
139
140/**
141 * drm_mode_vrefresh - get the vrefresh of a mode
142 * @mode: mode
143 *
144 * LOCKING:
145 * None.
146 *
147 * Return @mode's vrefresh rate or calculate it if necessary.
148 *
149 * FIXME: why is this needed? shouldn't vrefresh be set already?
150 *
151 * RETURNS:
152 * Vertical refresh rate of @mode x 1000. For precision reasons.
153 */
154int drm_mode_vrefresh(struct drm_display_mode *mode)
155{
156 int refresh = 0;
157 unsigned int calc_val;
158
159 if (mode->vrefresh > 0)
160 refresh = mode->vrefresh;
161 else if (mode->htotal > 0 && mode->vtotal > 0) {
162 /* work out vrefresh the value will be x1000 */
163 calc_val = (mode->clock * 1000);
164
165 calc_val /= mode->htotal;
166 calc_val *= 1000;
167 calc_val /= mode->vtotal;
168
169 refresh = calc_val;
170 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
171 refresh *= 2;
172 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
173 refresh /= 2;
174 if (mode->vscan > 1)
175 refresh /= mode->vscan;
176 }
177 return refresh;
178}
179EXPORT_SYMBOL(drm_mode_vrefresh);
180
181/**
182 * drm_mode_set_crtcinfo - set CRTC modesetting parameters
183 * @p: mode
184 * @adjust_flags: unused? (FIXME)
185 *
186 * LOCKING:
187 * None.
188 *
189 * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
190 */
191void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
192{
193 if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
194 return;
195
196 p->crtc_hdisplay = p->hdisplay;
197 p->crtc_hsync_start = p->hsync_start;
198 p->crtc_hsync_end = p->hsync_end;
199 p->crtc_htotal = p->htotal;
200 p->crtc_hskew = p->hskew;
201 p->crtc_vdisplay = p->vdisplay;
202 p->crtc_vsync_start = p->vsync_start;
203 p->crtc_vsync_end = p->vsync_end;
204 p->crtc_vtotal = p->vtotal;
205
206 if (p->flags & DRM_MODE_FLAG_INTERLACE) {
207 if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
208 p->crtc_vdisplay /= 2;
209 p->crtc_vsync_start /= 2;
210 p->crtc_vsync_end /= 2;
211 p->crtc_vtotal /= 2;
212 }
213
214 p->crtc_vtotal |= 1;
215 }
216
217 if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
218 p->crtc_vdisplay *= 2;
219 p->crtc_vsync_start *= 2;
220 p->crtc_vsync_end *= 2;
221 p->crtc_vtotal *= 2;
222 }
223
224 if (p->vscan > 1) {
225 p->crtc_vdisplay *= p->vscan;
226 p->crtc_vsync_start *= p->vscan;
227 p->crtc_vsync_end *= p->vscan;
228 p->crtc_vtotal *= p->vscan;
229 }
230
231 p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
232 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
233 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
234 p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
235
236 p->crtc_hadjusted = false;
237 p->crtc_vadjusted = false;
238}
239EXPORT_SYMBOL(drm_mode_set_crtcinfo);
240
241
242/**
243 * drm_mode_duplicate - allocate and duplicate an existing mode
244 * @m: mode to duplicate
245 *
246 * LOCKING:
247 * None.
248 *
249 * Just allocate a new mode, copy the existing mode into it, and return
250 * a pointer to it. Used to create new instances of established modes.
251 */
252struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
253 struct drm_display_mode *mode)
254{
255 struct drm_display_mode *nmode;
256 int new_id;
257
258 nmode = drm_mode_create(dev);
259 if (!nmode)
260 return NULL;
261
262 new_id = nmode->base.id;
263 *nmode = *mode;
264 nmode->base.id = new_id;
265 INIT_LIST_HEAD(&nmode->head);
266 return nmode;
267}
268EXPORT_SYMBOL(drm_mode_duplicate);
269
270/**
271 * drm_mode_equal - test modes for equality
272 * @mode1: first mode
273 * @mode2: second mode
274 *
275 * LOCKING:
276 * None.
277 *
278 * Check to see if @mode1 and @mode2 are equivalent.
279 *
280 * RETURNS:
281 * True if the modes are equal, false otherwise.
282 */
283bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
284{
285 /* do clock check convert to PICOS so fb modes get matched
286 * the same */
287 if (mode1->clock && mode2->clock) {
288 if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
289 return false;
290 } else if (mode1->clock != mode2->clock)
291 return false;
292
293 if (mode1->hdisplay == mode2->hdisplay &&
294 mode1->hsync_start == mode2->hsync_start &&
295 mode1->hsync_end == mode2->hsync_end &&
296 mode1->htotal == mode2->htotal &&
297 mode1->hskew == mode2->hskew &&
298 mode1->vdisplay == mode2->vdisplay &&
299 mode1->vsync_start == mode2->vsync_start &&
300 mode1->vsync_end == mode2->vsync_end &&
301 mode1->vtotal == mode2->vtotal &&
302 mode1->vscan == mode2->vscan &&
303 mode1->flags == mode2->flags)
304 return true;
305
306 return false;
307}
308EXPORT_SYMBOL(drm_mode_equal);
309
310/**
311 * drm_mode_validate_size - make sure modes adhere to size constraints
312 * @dev: DRM device
313 * @mode_list: list of modes to check
314 * @maxX: maximum width
315 * @maxY: maximum height
316 * @maxPitch: max pitch
317 *
318 * LOCKING:
319 * Caller must hold a lock protecting @mode_list.
320 *
321 * The DRM device (@dev) has size and pitch limits. Here we validate the
322 * modes we probed for @dev against those limits and set their status as
323 * necessary.
324 */
325void drm_mode_validate_size(struct drm_device *dev,
326 struct list_head *mode_list,
327 int maxX, int maxY, int maxPitch)
328{
329 struct drm_display_mode *mode;
330
331 list_for_each_entry(mode, mode_list, head) {
332 if (maxPitch > 0 && mode->hdisplay > maxPitch)
333 mode->status = MODE_BAD_WIDTH;
334
335 if (maxX > 0 && mode->hdisplay > maxX)
336 mode->status = MODE_VIRTUAL_X;
337
338 if (maxY > 0 && mode->vdisplay > maxY)
339 mode->status = MODE_VIRTUAL_Y;
340 }
341}
342EXPORT_SYMBOL(drm_mode_validate_size);
343
344/**
345 * drm_mode_validate_clocks - validate modes against clock limits
346 * @dev: DRM device
347 * @mode_list: list of modes to check
348 * @min: minimum clock rate array
349 * @max: maximum clock rate array
350 * @n_ranges: number of clock ranges (size of arrays)
351 *
352 * LOCKING:
353 * Caller must hold a lock protecting @mode_list.
354 *
355 * Some code may need to check a mode list against the clock limits of the
356 * device in question. This function walks the mode list, testing to make
357 * sure each mode falls within a given range (defined by @min and @max
358 * arrays) and sets @mode->status as needed.
359 */
360void drm_mode_validate_clocks(struct drm_device *dev,
361 struct list_head *mode_list,
362 int *min, int *max, int n_ranges)
363{
364 struct drm_display_mode *mode;
365 int i;
366
367 list_for_each_entry(mode, mode_list, head) {
368 bool good = false;
369 for (i = 0; i < n_ranges; i++) {
370 if (mode->clock >= min[i] && mode->clock <= max[i]) {
371 good = true;
372 break;
373 }
374 }
375 if (!good)
376 mode->status = MODE_CLOCK_RANGE;
377 }
378}
379EXPORT_SYMBOL(drm_mode_validate_clocks);
380
381/**
382 * drm_mode_prune_invalid - remove invalid modes from mode list
383 * @dev: DRM device
384 * @mode_list: list of modes to check
385 * @verbose: be verbose about it
386 *
387 * LOCKING:
388 * Caller must hold a lock protecting @mode_list.
389 *
390 * Once mode list generation is complete, a caller can use this routine to
391 * remove invalid modes from a mode list. If any of the modes have a
392 * status other than %MODE_OK, they are removed from @mode_list and freed.
393 */
394void drm_mode_prune_invalid(struct drm_device *dev,
395 struct list_head *mode_list, bool verbose)
396{
397 struct drm_display_mode *mode, *t;
398
399 list_for_each_entry_safe(mode, t, mode_list, head) {
400 if (mode->status != MODE_OK) {
401 list_del(&mode->head);
402 if (verbose) {
403 drm_mode_debug_printmodeline(mode);
404 DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status);
405 }
406 drm_mode_destroy(dev, mode);
407 }
408 }
409}
410EXPORT_SYMBOL(drm_mode_prune_invalid);
411
412/**
413 * drm_mode_compare - compare modes for favorability
414 * @lh_a: list_head for first mode
415 * @lh_b: list_head for second mode
416 *
417 * LOCKING:
418 * None.
419 *
420 * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
421 * which is better.
422 *
423 * RETURNS:
424 * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
425 * positive if @lh_b is better than @lh_a.
426 */
427static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
428{
429 struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
430 struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
431 int diff;
432
433 diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
434 ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
435 if (diff)
436 return diff;
437 diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
438 if (diff)
439 return diff;
440 diff = b->clock - a->clock;
441 return diff;
442}
443
444/* FIXME: what we don't have a list sort function? */
445/* list sort from Mark J Roberts (mjr@znex.org) */
446void list_sort(struct list_head *head,
447 int (*cmp)(struct list_head *a, struct list_head *b))
448{
449 struct list_head *p, *q, *e, *list, *tail, *oldhead;
450 int insize, nmerges, psize, qsize, i;
451
452 list = head->next;
453 list_del(head);
454 insize = 1;
455 for (;;) {
456 p = oldhead = list;
457 list = tail = NULL;
458 nmerges = 0;
459
460 while (p) {
461 nmerges++;
462 q = p;
463 psize = 0;
464 for (i = 0; i < insize; i++) {
465 psize++;
466 q = q->next == oldhead ? NULL : q->next;
467 if (!q)
468 break;
469 }
470
471 qsize = insize;
472 while (psize > 0 || (qsize > 0 && q)) {
473 if (!psize) {
474 e = q;
475 q = q->next;
476 qsize--;
477 if (q == oldhead)
478 q = NULL;
479 } else if (!qsize || !q) {
480 e = p;
481 p = p->next;
482 psize--;
483 if (p == oldhead)
484 p = NULL;
485 } else if (cmp(p, q) <= 0) {
486 e = p;
487 p = p->next;
488 psize--;
489 if (p == oldhead)
490 p = NULL;
491 } else {
492 e = q;
493 q = q->next;
494 qsize--;
495 if (q == oldhead)
496 q = NULL;
497 }
498 if (tail)
499 tail->next = e;
500 else
501 list = e;
502 e->prev = tail;
503 tail = e;
504 }
505 p = q;
506 }
507
508 tail->next = list;
509 list->prev = tail;
510
511 if (nmerges <= 1)
512 break;
513
514 insize *= 2;
515 }
516
517 head->next = list;
518 head->prev = list->prev;
519 list->prev->next = head;
520 list->prev = head;
521}
522
523/**
524 * drm_mode_sort - sort mode list
525 * @mode_list: list to sort
526 *
527 * LOCKING:
528 * Caller must hold a lock protecting @mode_list.
529 *
530 * Sort @mode_list by favorability, putting good modes first.
531 */
532void drm_mode_sort(struct list_head *mode_list)
533{
534 list_sort(mode_list, drm_mode_compare);
535}
536EXPORT_SYMBOL(drm_mode_sort);
537
538/**
539 * drm_mode_connector_list_update - update the mode list for the connector
540 * @connector: the connector to update
541 *
542 * LOCKING:
543 * Caller must hold a lock protecting @mode_list.
544 *
545 * This moves the modes from the @connector probed_modes list
546 * to the actual mode list. It compares the probed mode against the current
547 * list and only adds different modes. All modes unverified after this point
548 * will be removed by the prune invalid modes.
549 */
550void drm_mode_connector_list_update(struct drm_connector *connector)
551{
552 struct drm_display_mode *mode;
553 struct drm_display_mode *pmode, *pt;
554 int found_it;
555
556 list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
557 head) {
558 found_it = 0;
559 /* go through current modes checking for the new probed mode */
560 list_for_each_entry(mode, &connector->modes, head) {
561 if (drm_mode_equal(pmode, mode)) {
562 found_it = 1;
563 /* if equal delete the probed mode */
564 mode->status = pmode->status;
565 list_del(&pmode->head);
566 drm_mode_destroy(connector->dev, pmode);
567 break;
568 }
569 }
570
571 if (!found_it) {
572 list_move_tail(&pmode->head, &connector->modes);
573 }
574 }
575}
576EXPORT_SYMBOL(drm_mode_connector_list_update);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index ae73b7f7249a..8df849f66830 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -49,6 +49,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
49 int request, int *eof, void *data); 49 int request, int *eof, void *data);
50static int drm_bufs_info(char *buf, char **start, off_t offset, 50static int drm_bufs_info(char *buf, char **start, off_t offset,
51 int request, int *eof, void *data); 51 int request, int *eof, void *data);
52static int drm_vblank_info(char *buf, char **start, off_t offset,
53 int request, int *eof, void *data);
52static int drm_gem_name_info(char *buf, char **start, off_t offset, 54static int drm_gem_name_info(char *buf, char **start, off_t offset,
53 int request, int *eof, void *data); 55 int request, int *eof, void *data);
54static int drm_gem_object_info(char *buf, char **start, off_t offset, 56static int drm_gem_object_info(char *buf, char **start, off_t offset,
@@ -72,6 +74,7 @@ static struct drm_proc_list {
72 {"clients", drm_clients_info, 0}, 74 {"clients", drm_clients_info, 0},
73 {"queues", drm_queues_info, 0}, 75 {"queues", drm_queues_info, 0},
74 {"bufs", drm_bufs_info, 0}, 76 {"bufs", drm_bufs_info, 0},
77 {"vblank", drm_vblank_info, 0},
75 {"gem_names", drm_gem_name_info, DRIVER_GEM}, 78 {"gem_names", drm_gem_name_info, DRIVER_GEM},
76 {"gem_objects", drm_gem_object_info, DRIVER_GEM}, 79 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
77#if DRM_DEBUG_CODE 80#if DRM_DEBUG_CODE
@@ -195,6 +198,7 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
195 int *eof, void *data) 198 int *eof, void *data)
196{ 199{
197 struct drm_minor *minor = (struct drm_minor *) data; 200 struct drm_minor *minor = (struct drm_minor *) data;
201 struct drm_master *master = minor->master;
198 struct drm_device *dev = minor->dev; 202 struct drm_device *dev = minor->dev;
199 int len = 0; 203 int len = 0;
200 204
@@ -203,13 +207,16 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
203 return 0; 207 return 0;
204 } 208 }
205 209
210 if (!master)
211 return 0;
212
206 *start = &buf[offset]; 213 *start = &buf[offset];
207 *eof = 0; 214 *eof = 0;
208 215
209 if (dev->unique) { 216 if (master->unique) {
210 DRM_PROC_PRINT("%s %s %s\n", 217 DRM_PROC_PRINT("%s %s %s\n",
211 dev->driver->pci_driver.name, 218 dev->driver->pci_driver.name,
212 pci_name(dev->pdev), dev->unique); 219 pci_name(dev->pdev), master->unique);
213 } else { 220 } else {
214 DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, 221 DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
215 pci_name(dev->pdev)); 222 pci_name(dev->pdev));
@@ -454,6 +461,66 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
454} 461}
455 462
456/** 463/**
464 * Called when "/proc/dri/.../vblank" is read.
465 *
466 * \param buf output buffer.
467 * \param start start of output data.
468 * \param offset requested start offset.
469 * \param request requested number of bytes.
470 * \param eof whether there is no more data to return.
471 * \param data private data.
472 * \return number of written bytes.
473 */
474static int drm__vblank_info(char *buf, char **start, off_t offset, int request,
475 int *eof, void *data)
476{
477 struct drm_minor *minor = (struct drm_minor *) data;
478 struct drm_device *dev = minor->dev;
479 int len = 0;
480 int crtc;
481
482 if (offset > DRM_PROC_LIMIT) {
483 *eof = 1;
484 return 0;
485 }
486
487 *start = &buf[offset];
488 *eof = 0;
489
490 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
491 DRM_PROC_PRINT("CRTC %d enable: %d\n",
492 crtc, atomic_read(&dev->vblank_refcount[crtc]));
493 DRM_PROC_PRINT("CRTC %d counter: %d\n",
494 crtc, drm_vblank_count(dev, crtc));
495 DRM_PROC_PRINT("CRTC %d last wait: %d\n",
496 crtc, dev->last_vblank_wait[crtc]);
497 DRM_PROC_PRINT("CRTC %d in modeset: %d\n",
498 crtc, dev->vblank_inmodeset[crtc]);
499 }
500
501 if (len > request + offset)
502 return request;
503 *eof = 1;
504 return len - offset;
505}
506
507/**
508 * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock.
509 */
510static int drm_vblank_info(char *buf, char **start, off_t offset, int request,
511 int *eof, void *data)
512{
513 struct drm_minor *minor = (struct drm_minor *) data;
514 struct drm_device *dev = minor->dev;
515 int ret;
516
517 mutex_lock(&dev->struct_mutex);
518 ret = drm__vblank_info(buf, start, offset, request, eof, data);
519 mutex_unlock(&dev->struct_mutex);
520 return ret;
521}
522
523/**
457 * Called when "/proc/dri/.../clients" is read. 524 * Called when "/proc/dri/.../clients" is read.
458 * 525 *
459 * \param buf output buffer. 526 * \param buf output buffer.
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 66c96ec66672..5ca132afa4f2 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -57,6 +57,14 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
57 int ret; 57 int ret;
58 int base = 0, limit = 63; 58 int base = 0, limit = 63;
59 59
60 if (type == DRM_MINOR_CONTROL) {
61 base += 64;
62 limit = base + 127;
63 } else if (type == DRM_MINOR_RENDER) {
64 base += 128;
65 limit = base + 255;
66 }
67
60again: 68again:
61 if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) { 69 if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
62 DRM_ERROR("Out of memory expanding drawable idr\n"); 70 DRM_ERROR("Out of memory expanding drawable idr\n");
@@ -79,6 +87,104 @@ again:
79 return new_id; 87 return new_id;
80} 88}
81 89
90struct drm_master *drm_master_create(struct drm_minor *minor)
91{
92 struct drm_master *master;
93
94 master = drm_calloc(1, sizeof(*master), DRM_MEM_DRIVER);
95 if (!master)
96 return NULL;
97
98 kref_init(&master->refcount);
99 spin_lock_init(&master->lock.spinlock);
100 init_waitqueue_head(&master->lock.lock_queue);
101 drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
102 INIT_LIST_HEAD(&master->magicfree);
103 master->minor = minor;
104
105 list_add_tail(&master->head, &minor->master_list);
106
107 return master;
108}
109
110struct drm_master *drm_master_get(struct drm_master *master)
111{
112 kref_get(&master->refcount);
113 return master;
114}
115
116static void drm_master_destroy(struct kref *kref)
117{
118 struct drm_master *master = container_of(kref, struct drm_master, refcount);
119 struct drm_magic_entry *pt, *next;
120 struct drm_device *dev = master->minor->dev;
121
122 list_del(&master->head);
123
124 if (dev->driver->master_destroy)
125 dev->driver->master_destroy(dev, master);
126
127 if (master->unique) {
128 drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER);
129 master->unique = NULL;
130 master->unique_len = 0;
131 }
132
133 list_for_each_entry_safe(pt, next, &master->magicfree, head) {
134 list_del(&pt->head);
135 drm_ht_remove_item(&master->magiclist, &pt->hash_item);
136 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
137 }
138
139 drm_ht_remove(&master->magiclist);
140
141 if (master->lock.hw_lock) {
142 if (dev->sigdata.lock == master->lock.hw_lock)
143 dev->sigdata.lock = NULL;
144 master->lock.hw_lock = NULL;
145 master->lock.file_priv = NULL;
146 wake_up_interruptible(&master->lock.lock_queue);
147 }
148
149 drm_free(master, sizeof(*master), DRM_MEM_DRIVER);
150}
151
152void drm_master_put(struct drm_master **master)
153{
154 kref_put(&(*master)->refcount, drm_master_destroy);
155 *master = NULL;
156}
157
158int drm_setmaster_ioctl(struct drm_device *dev, void *data,
159 struct drm_file *file_priv)
160{
161 if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
162 return -EINVAL;
163
164 if (!file_priv->master)
165 return -EINVAL;
166
167 if (!file_priv->minor->master &&
168 file_priv->minor->master != file_priv->master) {
169 mutex_lock(&dev->struct_mutex);
170 file_priv->minor->master = drm_master_get(file_priv->master);
171 mutex_lock(&dev->struct_mutex);
172 }
173
174 return 0;
175}
176
177int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
178 struct drm_file *file_priv)
179{
180 if (!file_priv->master)
181 return -EINVAL;
182 mutex_lock(&dev->struct_mutex);
183 drm_master_put(&file_priv->minor->master);
184 mutex_unlock(&dev->struct_mutex);
185 return 0;
186}
187
82static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, 188static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
83 const struct pci_device_id *ent, 189 const struct pci_device_id *ent,
84 struct drm_driver *driver) 190 struct drm_driver *driver)
@@ -92,7 +198,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
92 198
93 spin_lock_init(&dev->count_lock); 199 spin_lock_init(&dev->count_lock);
94 spin_lock_init(&dev->drw_lock); 200 spin_lock_init(&dev->drw_lock);
95 spin_lock_init(&dev->lock.spinlock);
96 init_timer(&dev->timer); 201 init_timer(&dev->timer);
97 mutex_init(&dev->struct_mutex); 202 mutex_init(&dev->struct_mutex);
98 mutex_init(&dev->ctxlist_mutex); 203 mutex_init(&dev->ctxlist_mutex);
@@ -140,9 +245,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
140 } 245 }
141 } 246 }
142 247
143 if (dev->driver->load)
144 if ((retcode = dev->driver->load(dev, ent->driver_data)))
145 goto error_out_unreg;
146 248
147 retcode = drm_ctxbitmap_init(dev); 249 retcode = drm_ctxbitmap_init(dev);
148 if (retcode) { 250 if (retcode) {
@@ -200,6 +302,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
200 new_minor->device = MKDEV(DRM_MAJOR, minor_id); 302 new_minor->device = MKDEV(DRM_MAJOR, minor_id);
201 new_minor->dev = dev; 303 new_minor->dev = dev;
202 new_minor->index = minor_id; 304 new_minor->index = minor_id;
305 INIT_LIST_HEAD(&new_minor->master_list);
203 306
204 idr_replace(&drm_minors_idr, new_minor, minor_id); 307 idr_replace(&drm_minors_idr, new_minor, minor_id);
205 308
@@ -267,8 +370,30 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
267 printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); 370 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
268 goto err_g2; 371 goto err_g2;
269 } 372 }
373
374 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
375 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
376 if (ret)
377 goto err_g2;
378 }
379
270 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) 380 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
271 goto err_g2; 381 goto err_g3;
382
383 if (dev->driver->load) {
384 ret = dev->driver->load(dev, ent->driver_data);
385 if (ret)
386 goto err_g3;
387 }
388
389 /* setup the grouping for the legacy output */
390 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
391 ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
392 if (ret)
393 goto err_g3;
394 }
395
396 list_add_tail(&dev->driver_item, &driver->device_list);
272 397
273 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 398 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
274 driver->name, driver->major, driver->minor, driver->patchlevel, 399 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -276,6 +401,8 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
276 401
277 return 0; 402 return 0;
278 403
404err_g3:
405 drm_put_minor(&dev->primary);
279err_g2: 406err_g2:
280 pci_disable_device(pdev); 407 pci_disable_device(pdev);
281err_g1: 408err_g1:
@@ -297,11 +424,6 @@ int drm_put_dev(struct drm_device * dev)
297{ 424{
298 DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); 425 DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
299 426
300 if (dev->unique) {
301 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
302 dev->unique = NULL;
303 dev->unique_len = 0;
304 }
305 if (dev->devname) { 427 if (dev->devname) {
306 drm_free(dev->devname, strlen(dev->devname) + 1, 428 drm_free(dev->devname, strlen(dev->devname) + 1,
307 DRM_MEM_DRIVER); 429 DRM_MEM_DRIVER);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 1611b9bcbe7f..65d72d094c81 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -20,6 +20,7 @@
20#include "drmP.h" 20#include "drmP.h"
21 21
22#define to_drm_minor(d) container_of(d, struct drm_minor, kdev) 22#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
23#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
23 24
24/** 25/**
25 * drm_sysfs_suspend - DRM class suspend hook 26 * drm_sysfs_suspend - DRM class suspend hook
@@ -34,7 +35,7 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
34 struct drm_minor *drm_minor = to_drm_minor(dev); 35 struct drm_minor *drm_minor = to_drm_minor(dev);
35 struct drm_device *drm_dev = drm_minor->dev; 36 struct drm_device *drm_dev = drm_minor->dev;
36 37
37 if (drm_dev->driver->suspend) 38 if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->suspend)
38 return drm_dev->driver->suspend(drm_dev, state); 39 return drm_dev->driver->suspend(drm_dev, state);
39 40
40 return 0; 41 return 0;
@@ -52,7 +53,7 @@ static int drm_sysfs_resume(struct device *dev)
52 struct drm_minor *drm_minor = to_drm_minor(dev); 53 struct drm_minor *drm_minor = to_drm_minor(dev);
53 struct drm_device *drm_dev = drm_minor->dev; 54 struct drm_device *drm_dev = drm_minor->dev;
54 55
55 if (drm_dev->driver->resume) 56 if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->resume)
56 return drm_dev->driver->resume(drm_dev); 57 return drm_dev->driver->resume(drm_dev);
57 58
58 return 0; 59 return 0;
@@ -144,6 +145,323 @@ static void drm_sysfs_device_release(struct device *dev)
144 return; 145 return;
145} 146}
146 147
148/*
149 * Connector properties
150 */
151static ssize_t status_show(struct device *device,
152 struct device_attribute *attr,
153 char *buf)
154{
155 struct drm_connector *connector = to_drm_connector(device);
156 enum drm_connector_status status;
157
158 status = connector->funcs->detect(connector);
159 return snprintf(buf, PAGE_SIZE, "%s",
160 drm_get_connector_status_name(status));
161}
162
163static ssize_t dpms_show(struct device *device,
164 struct device_attribute *attr,
165 char *buf)
166{
167 struct drm_connector *connector = to_drm_connector(device);
168 struct drm_device *dev = connector->dev;
169 uint64_t dpms_status;
170 int ret;
171
172 ret = drm_connector_property_get_value(connector,
173 dev->mode_config.dpms_property,
174 &dpms_status);
175 if (ret)
176 return 0;
177
178 return snprintf(buf, PAGE_SIZE, "%s",
179 drm_get_dpms_name((int)dpms_status));
180}
181
182static ssize_t enabled_show(struct device *device,
183 struct device_attribute *attr,
184 char *buf)
185{
186 struct drm_connector *connector = to_drm_connector(device);
187
188 return snprintf(buf, PAGE_SIZE, connector->encoder ? "enabled" :
189 "disabled");
190}
191
192static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
193 char *buf, loff_t off, size_t count)
194{
195 struct device *connector_dev = container_of(kobj, struct device, kobj);
196 struct drm_connector *connector = to_drm_connector(connector_dev);
197 unsigned char *edid;
198 size_t size;
199
200 if (!connector->edid_blob_ptr)
201 return 0;
202
203 edid = connector->edid_blob_ptr->data;
204 size = connector->edid_blob_ptr->length;
205 if (!edid)
206 return 0;
207
208 if (off >= size)
209 return 0;
210
211 if (off + count > size)
212 count = size - off;
213 memcpy(buf, edid + off, count);
214
215 return count;
216}
217
218static ssize_t modes_show(struct device *device,
219 struct device_attribute *attr,
220 char *buf)
221{
222 struct drm_connector *connector = to_drm_connector(device);
223 struct drm_display_mode *mode;
224 int written = 0;
225
226 list_for_each_entry(mode, &connector->modes, head) {
227 written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
228 mode->name);
229 }
230
231 return written;
232}
233
234static ssize_t subconnector_show(struct device *device,
235 struct device_attribute *attr,
236 char *buf)
237{
238 struct drm_connector *connector = to_drm_connector(device);
239 struct drm_device *dev = connector->dev;
240 struct drm_property *prop = NULL;
241 uint64_t subconnector;
242 int is_tv = 0;
243 int ret;
244
245 switch (connector->connector_type) {
246 case DRM_MODE_CONNECTOR_DVII:
247 prop = dev->mode_config.dvi_i_subconnector_property;
248 break;
249 case DRM_MODE_CONNECTOR_Composite:
250 case DRM_MODE_CONNECTOR_SVIDEO:
251 case DRM_MODE_CONNECTOR_Component:
252 prop = dev->mode_config.tv_subconnector_property;
253 is_tv = 1;
254 break;
255 default:
256 DRM_ERROR("Wrong connector type for this property\n");
257 return 0;
258 }
259
260 if (!prop) {
261 DRM_ERROR("Unable to find subconnector property\n");
262 return 0;
263 }
264
265 ret = drm_connector_property_get_value(connector, prop, &subconnector);
266 if (ret)
267 return 0;
268
269 return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
270 drm_get_tv_subconnector_name((int)subconnector) :
271 drm_get_dvi_i_subconnector_name((int)subconnector));
272}
273
274static ssize_t select_subconnector_show(struct device *device,
275 struct device_attribute *attr,
276 char *buf)
277{
278 struct drm_connector *connector = to_drm_connector(device);
279 struct drm_device *dev = connector->dev;
280 struct drm_property *prop = NULL;
281 uint64_t subconnector;
282 int is_tv = 0;
283 int ret;
284
285 switch (connector->connector_type) {
286 case DRM_MODE_CONNECTOR_DVII:
287 prop = dev->mode_config.dvi_i_select_subconnector_property;
288 break;
289 case DRM_MODE_CONNECTOR_Composite:
290 case DRM_MODE_CONNECTOR_SVIDEO:
291 case DRM_MODE_CONNECTOR_Component:
292 prop = dev->mode_config.tv_select_subconnector_property;
293 is_tv = 1;
294 break;
295 default:
296 DRM_ERROR("Wrong connector type for this property\n");
297 return 0;
298 }
299
300 if (!prop) {
301 DRM_ERROR("Unable to find select subconnector property\n");
302 return 0;
303 }
304
305 ret = drm_connector_property_get_value(connector, prop, &subconnector);
306 if (ret)
307 return 0;
308
309 return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
310 drm_get_tv_select_name((int)subconnector) :
311 drm_get_dvi_i_select_name((int)subconnector));
312}
313
314static struct device_attribute connector_attrs[] = {
315 __ATTR_RO(status),
316 __ATTR_RO(enabled),
317 __ATTR_RO(dpms),
318 __ATTR_RO(modes),
319};
320
321/* These attributes are for both DVI-I connectors and all types of tv-out. */
322static struct device_attribute connector_attrs_opt1[] = {
323 __ATTR_RO(subconnector),
324 __ATTR_RO(select_subconnector),
325};
326
327static struct bin_attribute edid_attr = {
328 .attr.name = "edid",
329 .size = 128,
330 .read = edid_show,
331};
332
333/**
334 * drm_sysfs_connector_add - add an connector to sysfs
335 * @connector: connector to add
336 *
337 * Create an connector device in sysfs, along with its associated connector
338 * properties (so far, connection status, dpms, mode list & edid) and
339 * generate a hotplug event so userspace knows there's a new connector
340 * available.
341 *
342 * Note:
343 * This routine should only be called *once* for each DRM minor registered.
344 * A second call for an already registered device will trigger the BUG_ON
345 * below.
346 */
347int drm_sysfs_connector_add(struct drm_connector *connector)
348{
349 struct drm_device *dev = connector->dev;
350 int ret = 0, i, j;
351
352 /* We shouldn't get called more than once for the same connector */
353 BUG_ON(device_is_registered(&connector->kdev));
354
355 connector->kdev.parent = &dev->primary->kdev;
356 connector->kdev.class = drm_class;
357 connector->kdev.release = drm_sysfs_device_release;
358
359 DRM_DEBUG("adding \"%s\" to sysfs\n",
360 drm_get_connector_name(connector));
361
362 snprintf(connector->kdev.bus_id, BUS_ID_SIZE, "card%d-%s",
363 dev->primary->index, drm_get_connector_name(connector));
364 ret = device_register(&connector->kdev);
365
366 if (ret) {
367 DRM_ERROR("failed to register connector device: %d\n", ret);
368 goto out;
369 }
370
371 /* Standard attributes */
372
373 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
374 ret = device_create_file(&connector->kdev, &connector_attrs[i]);
375 if (ret)
376 goto err_out_files;
377 }
378
379 /* Optional attributes */
380 /*
381 * In the long run it maybe a good idea to make one set of
382 * optionals per connector type.
383 */
384 switch (connector->connector_type) {
385 case DRM_MODE_CONNECTOR_DVII:
386 case DRM_MODE_CONNECTOR_Composite:
387 case DRM_MODE_CONNECTOR_SVIDEO:
388 case DRM_MODE_CONNECTOR_Component:
389 for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
390 ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
391 if (ret)
392 goto err_out_files;
393 }
394 break;
395 default:
396 break;
397 }
398
399 ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
400 if (ret)
401 goto err_out_files;
402
403 /* Let userspace know we have a new connector */
404 drm_sysfs_hotplug_event(dev);
405
406 return 0;
407
408err_out_files:
409 if (i > 0)
410 for (j = 0; j < i; j++)
411 device_remove_file(&connector->kdev,
412 &connector_attrs[i]);
413 device_unregister(&connector->kdev);
414
415out:
416 return ret;
417}
418EXPORT_SYMBOL(drm_sysfs_connector_add);
419
420/**
421 * drm_sysfs_connector_remove - remove an connector device from sysfs
422 * @connector: connector to remove
423 *
424 * Remove @connector and its associated attributes from sysfs. Note that
425 * the device model core will take care of sending the "remove" uevent
426 * at this time, so we don't need to do it.
427 *
428 * Note:
429 * This routine should only be called if the connector was previously
430 * successfully registered. If @connector hasn't been registered yet,
431 * you'll likely see a panic somewhere deep in sysfs code when called.
432 */
433void drm_sysfs_connector_remove(struct drm_connector *connector)
434{
435 int i;
436
437 DRM_DEBUG("removing \"%s\" from sysfs\n",
438 drm_get_connector_name(connector));
439
440 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
441 device_remove_file(&connector->kdev, &connector_attrs[i]);
442 sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
443 device_unregister(&connector->kdev);
444}
445EXPORT_SYMBOL(drm_sysfs_connector_remove);
446
447/**
448 * drm_sysfs_hotplug_event - generate a DRM uevent
449 * @dev: DRM device
450 *
451 * Send a uevent for the DRM device specified by @dev. Currently we only
452 * set HOTPLUG=1 in the uevent environment, but this could be expanded to
453 * deal with other types of events.
454 */
455void drm_sysfs_hotplug_event(struct drm_device *dev)
456{
457 char *event_string = "HOTPLUG=1";
458 char *envp[] = { event_string, NULL };
459
460 DRM_DEBUG("generating hotplug event\n");
461
462 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
463}
464
147/** 465/**
148 * drm_sysfs_device_add - adds a class device to sysfs for a character driver 466 * drm_sysfs_device_add - adds a class device to sysfs for a character driver
149 * @dev: DRM device to be added 467 * @dev: DRM device to be added
@@ -163,7 +481,12 @@ int drm_sysfs_device_add(struct drm_minor *minor)
163 minor->kdev.class = drm_class; 481 minor->kdev.class = drm_class;
164 minor->kdev.release = drm_sysfs_device_release; 482 minor->kdev.release = drm_sysfs_device_release;
165 minor->kdev.devt = minor->device; 483 minor->kdev.devt = minor->device;
166 minor_str = "card%d"; 484 if (minor->type == DRM_MINOR_CONTROL)
485 minor_str = "controlD%d";
486 else if (minor->type == DRM_MINOR_RENDER)
487 minor_str = "renderD%d";
488 else
489 minor_str = "card%d";
167 490
168 snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index); 491 snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
169 492
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index c234c6f24a8d..3ffae021d280 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -267,6 +267,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
267 dmah.size = map->size; 267 dmah.size = map->size;
268 __drm_pci_free(dev, &dmah); 268 __drm_pci_free(dev, &dmah);
269 break; 269 break;
270 case _DRM_GEM:
271 DRM_ERROR("tried to rmmap GEM object\n");
272 break;
270 } 273 }
271 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 274 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
272 } 275 }
@@ -399,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
399 * Create a new drm_vma_entry structure as the \p vma private data entry and 402 * Create a new drm_vma_entry structure as the \p vma private data entry and
400 * add it to drm_device::vmalist. 403 * add it to drm_device::vmalist.
401 */ 404 */
402static void drm_vm_open_locked(struct vm_area_struct *vma) 405void drm_vm_open_locked(struct vm_area_struct *vma)
403{ 406{
404 struct drm_file *priv = vma->vm_file->private_data; 407 struct drm_file *priv = vma->vm_file->private_data;
405 struct drm_device *dev = priv->minor->dev; 408 struct drm_device *dev = priv->minor->dev;
@@ -540,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
540 * according to the mapping type and remaps the pages. Finally sets the file 543 * according to the mapping type and remaps the pages. Finally sets the file
541 * pointer and calls vm_open(). 544 * pointer and calls vm_open().
542 */ 545 */
543static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) 546int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
544{ 547{
545 struct drm_file *priv = filp->private_data; 548 struct drm_file *priv = filp->private_data;
546 struct drm_device *dev = priv->minor->dev; 549 struct drm_device *dev = priv->minor->dev;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d8fb5d8ee7ea..dd57a5bd4572 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -8,7 +8,22 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
8 i915_gem.o \ 8 i915_gem.o \
9 i915_gem_debug.o \ 9 i915_gem_debug.o \
10 i915_gem_proc.o \ 10 i915_gem_proc.o \
11 i915_gem_tiling.o 11 i915_gem_tiling.o \
12 intel_display.o \
13 intel_crt.o \
14 intel_lvds.o \
15 intel_bios.o \
16 intel_sdvo.o \
17 intel_modes.o \
18 intel_i2c.o \
19 intel_fb.o \
20 intel_tv.o \
21 intel_dvo.o \
22 dvo_ch7xxx.o \
23 dvo_ch7017.o \
24 dvo_ivch.o \
25 dvo_tfp410.o \
26 dvo_sil164.o
12 27
13i915-$(CONFIG_ACPI) += i915_opregion.o 28i915-$(CONFIG_ACPI) += i915_opregion.o
14i915-$(CONFIG_COMPAT) += i915_ioc32.o 29i915-$(CONFIG_COMPAT) += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
new file mode 100644
index 000000000000..e747ac42fe3a
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -0,0 +1,157 @@
1/*
2 * Copyright © 2006 Eric Anholt
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#ifndef _INTEL_DVO_H
24#define _INTEL_DVO_H
25
26#include <linux/i2c.h>
27#include "drmP.h"
28#include "drm.h"
29#include "drm_crtc.h"
30#include "intel_drv.h"
31
32struct intel_dvo_device {
33 char *name;
34 int type;
35 /* DVOA/B/C output register */
36 u32 dvo_reg;
37 /* GPIO register used for i2c bus to control this device */
38 u32 gpio;
39 int slave_addr;
40 struct intel_i2c_chan *i2c_bus;
41
42 const struct intel_dvo_dev_ops *dev_ops;
43 void *dev_priv;
44
45 struct drm_display_mode *panel_fixed_mode;
46 bool panel_wants_dither;
47};
48
49struct intel_dvo_dev_ops {
50 /*
51 * Initialize the device at startup time.
52 * Returns NULL if the device does not exist.
53 */
54 bool (*init)(struct intel_dvo_device *dvo,
55 struct intel_i2c_chan *i2cbus);
56
57 /*
58 * Called to allow the output a chance to create properties after the
59 * RandR objects have been created.
60 */
61 void (*create_resources)(struct intel_dvo_device *dvo);
62
63 /*
64 * Turn on/off output or set intermediate power levels if available.
65 *
66 * Unsupported intermediate modes drop to the lower power setting.
67 * If the mode is DPMSModeOff, the output must be disabled,
68 * as the DPLL may be disabled afterwards.
69 */
70 void (*dpms)(struct intel_dvo_device *dvo, int mode);
71
72 /*
73 * Saves the output's state for restoration on VT switch.
74 */
75 void (*save)(struct intel_dvo_device *dvo);
76
77 /*
78 * Restore's the output's state at VT switch.
79 */
80 void (*restore)(struct intel_dvo_device *dvo);
81
82 /*
83 * Callback for testing a video mode for a given output.
84 *
85 * This function should only check for cases where a mode can't
86 * be supported on the output specifically, and not represent
87 * generic CRTC limitations.
88 *
89 * \return MODE_OK if the mode is valid, or another MODE_* otherwise.
90 */
91 int (*mode_valid)(struct intel_dvo_device *dvo,
92 struct drm_display_mode *mode);
93
94 /*
95 * Callback to adjust the mode to be set in the CRTC.
96 *
97 * This allows an output to adjust the clock or even the entire set of
98 * timings, which is used for panels with fixed timings or for
99 * buses with clock limitations.
100 */
101 bool (*mode_fixup)(struct intel_dvo_device *dvo,
102 struct drm_display_mode *mode,
103 struct drm_display_mode *adjusted_mode);
104
105 /*
106 * Callback for preparing mode changes on an output
107 */
108 void (*prepare)(struct intel_dvo_device *dvo);
109
110 /*
111 * Callback for committing mode changes on an output
112 */
113 void (*commit)(struct intel_dvo_device *dvo);
114
115 /*
116 * Callback for setting up a video mode after fixups have been made.
117 *
118 * This is only called while the output is disabled. The dpms callback
119 * must be all that's necessary for the output, to turn the output on
120 * after this function is called.
121 */
122 void (*mode_set)(struct intel_dvo_device *dvo,
123 struct drm_display_mode *mode,
124 struct drm_display_mode *adjusted_mode);
125
126 /*
127 * Probe for a connected output, and return detect_status.
128 */
129 enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
130
131 /**
132 * Query the device for the modes it provides.
133 *
134 * This function may also update MonInfo, mm_width, and mm_height.
135 *
136 * \return singly-linked list of modes or NULL if no modes found.
137 */
138 struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
139
140 /**
141 * Clean up driver-specific bits of the output
142 */
143 void (*destroy) (struct intel_dvo_device *dvo);
144
145 /**
146 * Debugging hook to dump device registers to log file
147 */
148 void (*dump_regs)(struct intel_dvo_device *dvo);
149};
150
151extern struct intel_dvo_dev_ops sil164_ops;
152extern struct intel_dvo_dev_ops ch7xxx_ops;
153extern struct intel_dvo_dev_ops ivch_ops;
154extern struct intel_dvo_dev_ops tfp410_ops;
155extern struct intel_dvo_dev_ops ch7017_ops;
156
157#endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
new file mode 100644
index 000000000000..03d4b4973b02
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -0,0 +1,454 @@
1/*
2 * Copyright © 2006 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "dvo.h"
29
30#define CH7017_TV_DISPLAY_MODE 0x00
31#define CH7017_FLICKER_FILTER 0x01
32#define CH7017_VIDEO_BANDWIDTH 0x02
33#define CH7017_TEXT_ENHANCEMENT 0x03
34#define CH7017_START_ACTIVE_VIDEO 0x04
35#define CH7017_HORIZONTAL_POSITION 0x05
36#define CH7017_VERTICAL_POSITION 0x06
37#define CH7017_BLACK_LEVEL 0x07
38#define CH7017_CONTRAST_ENHANCEMENT 0x08
39#define CH7017_TV_PLL 0x09
40#define CH7017_TV_PLL_M 0x0a
41#define CH7017_TV_PLL_N 0x0b
42#define CH7017_SUB_CARRIER_0 0x0c
43#define CH7017_CIV_CONTROL 0x10
44#define CH7017_CIV_0 0x11
45#define CH7017_CHROMA_BOOST 0x14
46#define CH7017_CLOCK_MODE 0x1c
47#define CH7017_INPUT_CLOCK 0x1d
48#define CH7017_GPIO_CONTROL 0x1e
49#define CH7017_INPUT_DATA_FORMAT 0x1f
50#define CH7017_CONNECTION_DETECT 0x20
51#define CH7017_DAC_CONTROL 0x21
52#define CH7017_BUFFERED_CLOCK_OUTPUT 0x22
53#define CH7017_DEFEAT_VSYNC 0x47
54#define CH7017_TEST_PATTERN 0x48
55
56#define CH7017_POWER_MANAGEMENT 0x49
57/** Enables the TV output path. */
58#define CH7017_TV_EN (1 << 0)
59#define CH7017_DAC0_POWER_DOWN (1 << 1)
60#define CH7017_DAC1_POWER_DOWN (1 << 2)
61#define CH7017_DAC2_POWER_DOWN (1 << 3)
62#define CH7017_DAC3_POWER_DOWN (1 << 4)
63/** Powers down the TV out block, and DAC0-3 */
64#define CH7017_TV_POWER_DOWN_EN (1 << 5)
65
66#define CH7017_VERSION_ID 0x4a
67
68#define CH7017_DEVICE_ID 0x4b
69#define CH7017_DEVICE_ID_VALUE 0x1b
70#define CH7018_DEVICE_ID_VALUE 0x1a
71#define CH7019_DEVICE_ID_VALUE 0x19
72
73#define CH7017_XCLK_D2_ADJUST 0x53
74#define CH7017_UP_SCALER_COEFF_0 0x55
75#define CH7017_UP_SCALER_COEFF_1 0x56
76#define CH7017_UP_SCALER_COEFF_2 0x57
77#define CH7017_UP_SCALER_COEFF_3 0x58
78#define CH7017_UP_SCALER_COEFF_4 0x59
79#define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a
80#define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b
81#define CH7017_GPIO_INVERT 0x5c
82#define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d
83#define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e
84
85#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f
86/**< Low bits of horizontal active pixel input */
87
88#define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60
89/** High bits of horizontal active pixel input */
90#define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0)
91/** High bits of vertical active line output */
92#define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3)
93
94#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61
95/**< Low bits of vertical active line output */
96
97#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62
98/**< Low bits of horizontal active pixel output */
99
100#define CH7017_LVDS_POWER_DOWN 0x63
101/** High bits of horizontal active pixel output */
102#define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0)
103/** Enables the LVDS power down state transition */
104#define CH7017_LVDS_POWER_DOWN_EN (1 << 6)
105/** Enables the LVDS upscaler */
106#define CH7017_LVDS_UPSCALER_EN (1 << 7)
107#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08
108
109#define CH7017_LVDS_ENCODING 0x64
110#define CH7017_LVDS_DITHER_2D (1 << 2)
111#define CH7017_LVDS_DITHER_DIS (1 << 3)
112#define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4)
113#define CH7017_LVDS_24_BIT (1 << 5)
114
115#define CH7017_LVDS_ENCODING_2 0x65
116
117#define CH7017_LVDS_PLL_CONTROL 0x66
118/** Enables the LVDS panel output path */
119#define CH7017_LVDS_PANEN (1 << 0)
120/** Enables the LVDS panel backlight */
121#define CH7017_LVDS_BKLEN (1 << 3)
122
123#define CH7017_POWER_SEQUENCING_T1 0x67
124#define CH7017_POWER_SEQUENCING_T2 0x68
125#define CH7017_POWER_SEQUENCING_T3 0x69
126#define CH7017_POWER_SEQUENCING_T4 0x6a
127#define CH7017_POWER_SEQUENCING_T5 0x6b
128#define CH7017_GPIO_DRIVER_TYPE 0x6c
129#define CH7017_GPIO_DATA 0x6d
130#define CH7017_GPIO_DIRECTION_CONTROL 0x6e
131
132#define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71
133# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4
134# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0
135# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80
136
137#define CH7017_LVDS_PLL_VCO_CONTROL 0x72
138# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80
139# define CH7017_LVDS_PLL_VCO_SHIFT 4
140# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0
141
142#define CH7017_OUTPUTS_ENABLE 0x73
143# define CH7017_CHARGE_PUMP_LOW 0x0
144# define CH7017_CHARGE_PUMP_HIGH 0x3
145# define CH7017_LVDS_CHANNEL_A (1 << 3)
146# define CH7017_LVDS_CHANNEL_B (1 << 4)
147# define CH7017_TV_DAC_A (1 << 5)
148# define CH7017_TV_DAC_B (1 << 6)
149# define CH7017_DDC_SELECT_DC2 (1 << 7)
150
151#define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74
152#define CH7017_LVDS_PLL_EMI_REDUCTION 0x75
153#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76
154
155#define CH7017_LVDS_CONTROL_2 0x78
156# define CH7017_LOOP_FILTER_SHIFT 5
157# define CH7017_PHASE_DETECTOR_SHIFT 0
158
159#define CH7017_BANG_LIMIT_CONTROL 0x7f
160
161struct ch7017_priv {
162 uint8_t save_hapi;
163 uint8_t save_vali;
164 uint8_t save_valo;
165 uint8_t save_ailo;
166 uint8_t save_lvds_pll_vco;
167 uint8_t save_feedback_div;
168 uint8_t save_lvds_control_2;
169 uint8_t save_outputs_enable;
170 uint8_t save_lvds_power_down;
171 uint8_t save_power_management;
172};
173
174static void ch7017_dump_regs(struct intel_dvo_device *dvo);
175static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
176
177static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
178{
179 struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
180 u8 out_buf[2];
181 u8 in_buf[2];
182
183 struct i2c_msg msgs[] = {
184 {
185 .addr = i2cbus->slave_addr,
186 .flags = 0,
187 .len = 1,
188 .buf = out_buf,
189 },
190 {
191 .addr = i2cbus->slave_addr,
192 .flags = I2C_M_RD,
193 .len = 1,
194 .buf = in_buf,
195 }
196 };
197
198 out_buf[0] = addr;
199 out_buf[1] = 0;
200
201 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
202 *val= in_buf[0];
203 return true;
204 };
205
206 return false;
207}
208
209static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
210{
211 struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
212 uint8_t out_buf[2];
213 struct i2c_msg msg = {
214 .addr = i2cbus->slave_addr,
215 .flags = 0,
216 .len = 2,
217 .buf = out_buf,
218 };
219
220 out_buf[0] = addr;
221 out_buf[1] = val;
222
223 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
224 return true;
225
226 return false;
227}
228
229/** Probes for a CH7017 on the given bus and slave address. */
230static bool ch7017_init(struct intel_dvo_device *dvo,
231 struct intel_i2c_chan *i2cbus)
232{
233 struct ch7017_priv *priv;
234 uint8_t val;
235
236 priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
237 if (priv == NULL)
238 return false;
239
240 dvo->i2c_bus = i2cbus;
241 dvo->i2c_bus->slave_addr = dvo->slave_addr;
242 dvo->dev_priv = priv;
243
244 if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
245 goto fail;
246
247 if (val != CH7017_DEVICE_ID_VALUE &&
248 val != CH7018_DEVICE_ID_VALUE &&
249 val != CH7019_DEVICE_ID_VALUE) {
250 DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
251 val, i2cbus->adapter.name,i2cbus->slave_addr);
252 goto fail;
253 }
254
255 return true;
256fail:
257 kfree(priv);
258 return false;
259}
260
261static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
262{
263 return connector_status_unknown;
264}
265
266static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
267 struct drm_display_mode *mode)
268{
269 if (mode->clock > 160000)
270 return MODE_CLOCK_HIGH;
271
272 return MODE_OK;
273}
274
275static void ch7017_mode_set(struct intel_dvo_device *dvo,
276 struct drm_display_mode *mode,
277 struct drm_display_mode *adjusted_mode)
278{
279 uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
280 uint8_t outputs_enable, lvds_control_2, lvds_power_down;
281 uint8_t horizontal_active_pixel_input;
282 uint8_t horizontal_active_pixel_output, vertical_active_line_output;
283 uint8_t active_input_line_output;
284
285 DRM_DEBUG("Registers before mode setting\n");
286 ch7017_dump_regs(dvo);
287
288 /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
289 if (mode->clock < 100000) {
290 outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW;
291 lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
292 (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
293 (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
294 lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
295 (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
296 (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
297 lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) |
298 (0 << CH7017_PHASE_DETECTOR_SHIFT);
299 } else {
300 outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
301 lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
302 (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
303 (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
304 lvds_pll_feedback_div = 35;
305 lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
306 (0 << CH7017_PHASE_DETECTOR_SHIFT);
307 if (1) { /* XXX: dual channel panel detection. Assume yes for now. */
308 outputs_enable |= CH7017_LVDS_CHANNEL_B;
309 lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
310 (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
311 (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
312 } else {
313 lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
314 (1 << CH7017_LVDS_PLL_VCO_SHIFT) |
315 (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
316 }
317 }
318
319 horizontal_active_pixel_input = mode->hdisplay & 0x00ff;
320
321 vertical_active_line_output = mode->vdisplay & 0x00ff;
322 horizontal_active_pixel_output = mode->hdisplay & 0x00ff;
323
324 active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) |
325 (((mode->vdisplay & 0x0700) >> 8) << 3);
326
327 lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
328 (mode->hdisplay & 0x0700) >> 8;
329
330 ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
331 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
332 horizontal_active_pixel_input);
333 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
334 horizontal_active_pixel_output);
335 ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT,
336 vertical_active_line_output);
337 ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT,
338 active_input_line_output);
339 ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control);
340 ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div);
341 ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2);
342 ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable);
343
344 /* Turn the LVDS back on with new settings. */
345 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
346
347 DRM_DEBUG("Registers after mode setting\n");
348 ch7017_dump_regs(dvo);
349}
350
351/* set the CH7017 power state */
352static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
353{
354 uint8_t val;
355
356 ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
357
358 /* Turn off TV/VGA, and never turn it on since we don't support it. */
359 ch7017_write(dvo, CH7017_POWER_MANAGEMENT,
360 CH7017_DAC0_POWER_DOWN |
361 CH7017_DAC1_POWER_DOWN |
362 CH7017_DAC2_POWER_DOWN |
363 CH7017_DAC3_POWER_DOWN |
364 CH7017_TV_POWER_DOWN_EN);
365
366 if (mode == DRM_MODE_DPMS_ON) {
367 /* Turn on the LVDS */
368 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
369 val & ~CH7017_LVDS_POWER_DOWN_EN);
370 } else {
371 /* Turn off the LVDS */
372 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
373 val | CH7017_LVDS_POWER_DOWN_EN);
374 }
375
376 /* XXX: Should actually wait for update power status somehow */
377 udelay(20000);
378}
379
380static void ch7017_dump_regs(struct intel_dvo_device *dvo)
381{
382 uint8_t val;
383
384#define DUMP(reg) \
385do { \
386 ch7017_read(dvo, reg, &val); \
387 DRM_DEBUG(#reg ": %02x\n", val); \
388} while (0)
389
390 DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
391 DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT);
392 DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT);
393 DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT);
394 DUMP(CH7017_LVDS_PLL_VCO_CONTROL);
395 DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV);
396 DUMP(CH7017_LVDS_CONTROL_2);
397 DUMP(CH7017_OUTPUTS_ENABLE);
398 DUMP(CH7017_LVDS_POWER_DOWN);
399}
400
401static void ch7017_save(struct intel_dvo_device *dvo)
402{
403 struct ch7017_priv *priv = dvo->dev_priv;
404
405 ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
406 ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
407 ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
408 ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
409 ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
410 ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
411 ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
412 ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
413 ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
414}
415
416static void ch7017_restore(struct intel_dvo_device *dvo)
417{
418 struct ch7017_priv *priv = dvo->dev_priv;
419
420 /* Power down before changing mode */
421 ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
422
423 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
424 ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
425 ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
426 ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
427 ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
428 ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
429 ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
430 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
431 ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
432}
433
434static void ch7017_destroy(struct intel_dvo_device *dvo)
435{
436 struct ch7017_priv *priv = dvo->dev_priv;
437
438 if (priv) {
439 kfree(priv);
440 dvo->dev_priv = NULL;
441 }
442}
443
444struct intel_dvo_dev_ops ch7017_ops = {
445 .init = ch7017_init,
446 .detect = ch7017_detect,
447 .mode_valid = ch7017_mode_valid,
448 .mode_set = ch7017_mode_set,
449 .dpms = ch7017_dpms,
450 .dump_regs = ch7017_dump_regs,
451 .save = ch7017_save,
452 .restore = ch7017_restore,
453 .destroy = ch7017_destroy,
454};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
new file mode 100644
index 000000000000..d2fd95dbd034
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -0,0 +1,368 @@
1/**************************************************************************
2
3Copyright © 2006 Dave Airlie
4
5All Rights Reserved.
6
7Permission is hereby granted, free of charge, to any person obtaining a
8copy of this software and associated documentation files (the
9"Software"), to deal in the Software without restriction, including
10without limitation the rights to use, copy, modify, merge, publish,
11distribute, sub license, and/or sell copies of the Software, and to
12permit persons to whom the Software is furnished to do so, subject to
13the following conditions:
14
15The above copyright notice and this permission notice (including the
16next paragraph) shall be included in all copies or substantial portions
17of the Software.
18
19THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27**************************************************************************/
28
29#include "dvo.h"
30
31#define CH7xxx_REG_VID 0x4a
32#define CH7xxx_REG_DID 0x4b
33
34#define CH7011_VID 0x83 /* 7010 as well */
35#define CH7009A_VID 0x84
36#define CH7009B_VID 0x85
37#define CH7301_VID 0x95
38
39#define CH7xxx_VID 0x84
40#define CH7xxx_DID 0x17
41
42#define CH7xxx_NUM_REGS 0x4c
43
44#define CH7xxx_CM 0x1c
45#define CH7xxx_CM_XCM (1<<0)
46#define CH7xxx_CM_MCP (1<<2)
47#define CH7xxx_INPUT_CLOCK 0x1d
48#define CH7xxx_GPIO 0x1e
49#define CH7xxx_GPIO_HPIR (1<<3)
50#define CH7xxx_IDF 0x1f
51
52#define CH7xxx_IDF_HSP (1<<3)
53#define CH7xxx_IDF_VSP (1<<4)
54
55#define CH7xxx_CONNECTION_DETECT 0x20
56#define CH7xxx_CDET_DVI (1<<5)
57
58#define CH7301_DAC_CNTL 0x21
59#define CH7301_HOTPLUG 0x23
60#define CH7xxx_TCTL 0x31
61#define CH7xxx_TVCO 0x32
62#define CH7xxx_TPCP 0x33
63#define CH7xxx_TPD 0x34
64#define CH7xxx_TPVT 0x35
65#define CH7xxx_TLPF 0x36
66#define CH7xxx_TCT 0x37
67#define CH7301_TEST_PATTERN 0x48
68
69#define CH7xxx_PM 0x49
70#define CH7xxx_PM_FPD (1<<0)
71#define CH7301_PM_DACPD0 (1<<1)
72#define CH7301_PM_DACPD1 (1<<2)
73#define CH7301_PM_DACPD2 (1<<3)
74#define CH7xxx_PM_DVIL (1<<6)
75#define CH7xxx_PM_DVIP (1<<7)
76
77#define CH7301_SYNC_POLARITY 0x56
78#define CH7301_SYNC_RGB_YUV (1<<0)
79#define CH7301_SYNC_POL_DVI (1<<5)
80
81/** @file
82 * driver for the Chrontel 7xxx DVI chip over DVO.
83 */
84
85static struct ch7xxx_id_struct {
86 uint8_t vid;
87 char *name;
88} ch7xxx_ids[] = {
89 { CH7011_VID, "CH7011" },
90 { CH7009A_VID, "CH7009A" },
91 { CH7009B_VID, "CH7009B" },
92 { CH7301_VID, "CH7301" },
93};
94
95struct ch7xxx_reg_state {
96 uint8_t regs[CH7xxx_NUM_REGS];
97};
98
99struct ch7xxx_priv {
100 bool quiet;
101
102 struct ch7xxx_reg_state save_reg;
103 struct ch7xxx_reg_state mode_reg;
104 uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
105 uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
106};
107
108static void ch7xxx_save(struct intel_dvo_device *dvo);
109
110static char *ch7xxx_get_id(uint8_t vid)
111{
112 int i;
113
114 for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
115 if (ch7xxx_ids[i].vid == vid)
116 return ch7xxx_ids[i].name;
117 }
118
119 return NULL;
120}
121
122/** Reads an 8 bit register */
123static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
124{
125 struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
126 struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
127 u8 out_buf[2];
128 u8 in_buf[2];
129
130 struct i2c_msg msgs[] = {
131 {
132 .addr = i2cbus->slave_addr,
133 .flags = 0,
134 .len = 1,
135 .buf = out_buf,
136 },
137 {
138 .addr = i2cbus->slave_addr,
139 .flags = I2C_M_RD,
140 .len = 1,
141 .buf = in_buf,
142 }
143 };
144
145 out_buf[0] = addr;
146 out_buf[1] = 0;
147
148 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
149 *ch = in_buf[0];
150 return true;
151 };
152
153 if (!ch7xxx->quiet) {
154 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
155 addr, i2cbus->adapter.name, i2cbus->slave_addr);
156 }
157 return false;
158}
159
160/** Writes an 8 bit register */
161static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
162{
163 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
164 struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
165 uint8_t out_buf[2];
166 struct i2c_msg msg = {
167 .addr = i2cbus->slave_addr,
168 .flags = 0,
169 .len = 2,
170 .buf = out_buf,
171 };
172
173 out_buf[0] = addr;
174 out_buf[1] = ch;
175
176 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
177 return true;
178
179 if (!ch7xxx->quiet) {
180 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
181 addr, i2cbus->adapter.name, i2cbus->slave_addr);
182 }
183
184 return false;
185}
186
187static bool ch7xxx_init(struct intel_dvo_device *dvo,
188 struct intel_i2c_chan *i2cbus)
189{
190 /* this will detect the CH7xxx chip on the specified i2c bus */
191 struct ch7xxx_priv *ch7xxx;
192 uint8_t vendor, device;
193 char *name;
194
195 ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
196 if (ch7xxx == NULL)
197 return false;
198
199 dvo->i2c_bus = i2cbus;
200 dvo->i2c_bus->slave_addr = dvo->slave_addr;
201 dvo->dev_priv = ch7xxx;
202 ch7xxx->quiet = true;
203
204 if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
205 goto out;
206
207 name = ch7xxx_get_id(vendor);
208 if (!name) {
209 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
210 vendor, i2cbus->adapter.name, i2cbus->slave_addr);
211 goto out;
212 }
213
214
215 if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
216 goto out;
217
218 if (device != CH7xxx_DID) {
219 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
220 vendor, i2cbus->adapter.name, i2cbus->slave_addr);
221 goto out;
222 }
223
224 ch7xxx->quiet = false;
225 DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
226 name, vendor, device);
227 return true;
228out:
229 kfree(ch7xxx);
230 return false;
231}
232
233static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
234{
235 uint8_t cdet, orig_pm, pm;
236
237 ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
238
239 pm = orig_pm;
240 pm &= ~CH7xxx_PM_FPD;
241 pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
242
243 ch7xxx_writeb(dvo, CH7xxx_PM, pm);
244
245 ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
246
247 ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
248
249 if (cdet & CH7xxx_CDET_DVI)
250 return connector_status_connected;
251 return connector_status_disconnected;
252}
253
254static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
255 struct drm_display_mode *mode)
256{
257 if (mode->clock > 165000)
258 return MODE_CLOCK_HIGH;
259
260 return MODE_OK;
261}
262
263static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
264 struct drm_display_mode *mode,
265 struct drm_display_mode *adjusted_mode)
266{
267 uint8_t tvco, tpcp, tpd, tlpf, idf;
268
269 if (mode->clock <= 65000) {
270 tvco = 0x23;
271 tpcp = 0x08;
272 tpd = 0x16;
273 tlpf = 0x60;
274 } else {
275 tvco = 0x2d;
276 tpcp = 0x06;
277 tpd = 0x26;
278 tlpf = 0xa0;
279 }
280
281 ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
282 ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
283 ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
284 ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
285 ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
286 ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
287 ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
288
289 ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
290
291 idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
292 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
293 idf |= CH7xxx_IDF_HSP;
294
295 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
296 idf |= CH7xxx_IDF_HSP;
297
298 ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
299}
300
301/* set the CH7xxx power state */
302static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
303{
304 if (mode == DRM_MODE_DPMS_ON)
305 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
306 else
307 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
308}
309
310static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
311{
312 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
313 int i;
314
315 for (i = 0; i < CH7xxx_NUM_REGS; i++) {
316 if ((i % 8) == 0 )
317 DRM_DEBUG("\n %02X: ", i);
318 DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]);
319 }
320}
321
322static void ch7xxx_save(struct intel_dvo_device *dvo)
323{
324 struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
325
326 ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
327 ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
328 ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
329 ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
330 ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
331 ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
332 ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
333}
334
335static void ch7xxx_restore(struct intel_dvo_device *dvo)
336{
337 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
338
339 ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
340 ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
341 ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
342 ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
343 ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
344 ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
345 ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
346}
347
348static void ch7xxx_destroy(struct intel_dvo_device *dvo)
349{
350 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
351
352 if (ch7xxx) {
353 kfree(ch7xxx);
354 dvo->dev_priv = NULL;
355 }
356}
357
358struct intel_dvo_dev_ops ch7xxx_ops = {
359 .init = ch7xxx_init,
360 .detect = ch7xxx_detect,
361 .mode_valid = ch7xxx_mode_valid,
362 .mode_set = ch7xxx_mode_set,
363 .dpms = ch7xxx_dpms,
364 .dump_regs = ch7xxx_dump_regs,
365 .save = ch7xxx_save,
366 .restore = ch7xxx_restore,
367 .destroy = ch7xxx_destroy,
368};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
new file mode 100644
index 000000000000..0c8d375e8e37
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -0,0 +1,442 @@
1/*
2 * Copyright © 2006 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "dvo.h"
29
30/*
31 * register definitions for the i82807aa.
32 *
33 * Documentation on this chipset can be found in datasheet #29069001 at
34 * intel.com.
35 */
36
37/*
38 * VCH Revision & GMBus Base Addr
39 */
40#define VR00 0x00
41# define VR00_BASE_ADDRESS_MASK 0x007f
42
43/*
44 * Functionality Enable
45 */
46#define VR01 0x01
47
48/*
49 * Enable the panel fitter
50 */
51# define VR01_PANEL_FIT_ENABLE (1 << 3)
52/*
53 * Enables the LCD display.
54 *
55 * This must not be set while VR01_DVO_BYPASS_ENABLE is set.
56 */
57# define VR01_LCD_ENABLE (1 << 2)
58/** Enables the DVO repeater. */
59# define VR01_DVO_BYPASS_ENABLE (1 << 1)
60/** Enables the DVO clock */
61# define VR01_DVO_ENABLE (1 << 0)
62
63/*
64 * LCD Interface Format
65 */
66#define VR10 0x10
67/** Enables LVDS output instead of CMOS */
68# define VR10_LVDS_ENABLE (1 << 4)
69/** Enables 18-bit LVDS output. */
70# define VR10_INTERFACE_1X18 (0 << 2)
71/** Enables 24-bit LVDS or CMOS output */
72# define VR10_INTERFACE_1X24 (1 << 2)
73/** Enables 2x18-bit LVDS or CMOS output. */
74# define VR10_INTERFACE_2X18 (2 << 2)
75/** Enables 2x24-bit LVDS output */
76# define VR10_INTERFACE_2X24 (3 << 2)
77
78/*
79 * VR20 LCD Horizontal Display Size
80 */
81#define VR20 0x20
82
83/*
84 * LCD Vertical Display Size
85 */
86#define VR21 0x20
87
88/*
89 * Panel power down status
90 */
91#define VR30 0x30
92/** Read only bit indicating that the panel is not in a safe poweroff state. */
93# define VR30_PANEL_ON (1 << 15)
94
95#define VR40 0x40
96# define VR40_STALL_ENABLE (1 << 13)
97# define VR40_VERTICAL_INTERP_ENABLE (1 << 12)
98# define VR40_ENHANCED_PANEL_FITTING (1 << 11)
99# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10)
100# define VR40_AUTO_RATIO_ENABLE (1 << 9)
101# define VR40_CLOCK_GATING_ENABLE (1 << 8)
102
103/*
104 * Panel Fitting Vertical Ratio
105 * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2
106 */
107#define VR41 0x41
108
109/*
110 * Panel Fitting Horizontal Ratio
111 * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2
112 */
113#define VR42 0x42
114
115/*
116 * Horizontal Image Size
117 */
118#define VR43 0x43
119
120/* VR80 GPIO 0
121 */
122#define VR80 0x80
123#define VR81 0x81
124#define VR82 0x82
125#define VR83 0x83
126#define VR84 0x84
127#define VR85 0x85
128#define VR86 0x86
129#define VR87 0x87
130
131/* VR88 GPIO 8
132 */
133#define VR88 0x88
134
135/* Graphics BIOS scratch 0
136 */
137#define VR8E 0x8E
138# define VR8E_PANEL_TYPE_MASK (0xf << 0)
139# define VR8E_PANEL_INTERFACE_CMOS (0 << 4)
140# define VR8E_PANEL_INTERFACE_LVDS (1 << 4)
141# define VR8E_FORCE_DEFAULT_PANEL (1 << 5)
142
143/* Graphics BIOS scratch 1
144 */
145#define VR8F 0x8F
146# define VR8F_VCH_PRESENT (1 << 0)
147# define VR8F_DISPLAY_CONN (1 << 1)
148# define VR8F_POWER_MASK (0x3c)
149# define VR8F_POWER_POS (2)
150
151
152struct ivch_priv {
153 bool quiet;
154
155 uint16_t width, height;
156
157 uint16_t save_VR01;
158 uint16_t save_VR40;
159};
160
161
162static void ivch_dump_regs(struct intel_dvo_device *dvo);
163
164/**
165 * Reads a register on the ivch.
166 *
167 * Each of the 256 registers are 16 bits long.
168 */
169static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
170{
171 struct ivch_priv *priv = dvo->dev_priv;
172 struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
173 u8 out_buf[1];
174 u8 in_buf[2];
175
176 struct i2c_msg msgs[] = {
177 {
178 .addr = i2cbus->slave_addr,
179 .flags = I2C_M_RD,
180 .len = 0,
181 },
182 {
183 .addr = 0,
184 .flags = I2C_M_NOSTART,
185 .len = 1,
186 .buf = out_buf,
187 },
188 {
189 .addr = i2cbus->slave_addr,
190 .flags = I2C_M_RD | I2C_M_NOSTART,
191 .len = 2,
192 .buf = in_buf,
193 }
194 };
195
196 out_buf[0] = addr;
197
198 if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
199 *data = (in_buf[1] << 8) | in_buf[0];
200 return true;
201 };
202
203 if (!priv->quiet) {
204 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
205 addr, i2cbus->adapter.name, i2cbus->slave_addr);
206 }
207 return false;
208}
209
210/** Writes a 16-bit register on the ivch */
211static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
212{
213 struct ivch_priv *priv = dvo->dev_priv;
214 struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
215 u8 out_buf[3];
216 struct i2c_msg msg = {
217 .addr = i2cbus->slave_addr,
218 .flags = 0,
219 .len = 3,
220 .buf = out_buf,
221 };
222
223 out_buf[0] = addr;
224 out_buf[1] = data & 0xff;
225 out_buf[2] = data >> 8;
226
227 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
228 return true;
229
230 if (!priv->quiet) {
231 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
232 addr, i2cbus->adapter.name, i2cbus->slave_addr);
233 }
234
235 return false;
236}
237
238/** Probes the given bus and slave address for an ivch */
239static bool ivch_init(struct intel_dvo_device *dvo,
240 struct intel_i2c_chan *i2cbus)
241{
242 struct ivch_priv *priv;
243 uint16_t temp;
244
245 priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
246 if (priv == NULL)
247 return false;
248
249 dvo->i2c_bus = i2cbus;
250 dvo->i2c_bus->slave_addr = dvo->slave_addr;
251 dvo->dev_priv = priv;
252 priv->quiet = true;
253
254 if (!ivch_read(dvo, VR00, &temp))
255 goto out;
256 priv->quiet = false;
257
258 /* Since the identification bits are probably zeroes, which doesn't seem
259 * very unique, check that the value in the base address field matches
260 * the address it's responding on.
261 */
262 if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
263 DRM_DEBUG("ivch detect failed due to address mismatch "
264 "(%d vs %d)\n",
265 (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
266 goto out;
267 }
268
269 ivch_read(dvo, VR20, &priv->width);
270 ivch_read(dvo, VR21, &priv->height);
271
272 return true;
273
274out:
275 kfree(priv);
276 return false;
277}
278
279static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
280{
281 return connector_status_connected;
282}
283
284static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
285 struct drm_display_mode *mode)
286{
287 if (mode->clock > 112000)
288 return MODE_CLOCK_HIGH;
289
290 return MODE_OK;
291}
292
293/** Sets the power state of the panel connected to the ivch */
294static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
295{
296 int i;
297 uint16_t vr01, vr30, backlight;
298
299 /* Set the new power state of the panel. */
300 if (!ivch_read(dvo, VR01, &vr01))
301 return;
302
303 if (mode == DRM_MODE_DPMS_ON)
304 backlight = 1;
305 else
306 backlight = 0;
307 ivch_write(dvo, VR80, backlight);
308
309 if (mode == DRM_MODE_DPMS_ON)
310 vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
311 else
312 vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
313
314 ivch_write(dvo, VR01, vr01);
315
316 /* Wait for the panel to make its state transition */
317 for (i = 0; i < 100; i++) {
318 if (!ivch_read(dvo, VR30, &vr30))
319 break;
320
321 if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
322 break;
323 udelay(1000);
324 }
325 /* wait some more; vch may fail to resync sometimes without this */
326 udelay(16 * 1000);
327}
328
329static void ivch_mode_set(struct intel_dvo_device *dvo,
330 struct drm_display_mode *mode,
331 struct drm_display_mode *adjusted_mode)
332{
333 uint16_t vr40 = 0;
334 uint16_t vr01;
335
336 vr01 = 0;
337 vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
338 VR40_HORIZONTAL_INTERP_ENABLE);
339
340 if (mode->hdisplay != adjusted_mode->hdisplay ||
341 mode->vdisplay != adjusted_mode->vdisplay) {
342 uint16_t x_ratio, y_ratio;
343
344 vr01 |= VR01_PANEL_FIT_ENABLE;
345 vr40 |= VR40_CLOCK_GATING_ENABLE;
346 x_ratio = (((mode->hdisplay - 1) << 16) /
347 (adjusted_mode->hdisplay - 1)) >> 2;
348 y_ratio = (((mode->vdisplay - 1) << 16) /
349 (adjusted_mode->vdisplay - 1)) >> 2;
350 ivch_write (dvo, VR42, x_ratio);
351 ivch_write (dvo, VR41, y_ratio);
352 } else {
353 vr01 &= ~VR01_PANEL_FIT_ENABLE;
354 vr40 &= ~VR40_CLOCK_GATING_ENABLE;
355 }
356 vr40 &= ~VR40_AUTO_RATIO_ENABLE;
357
358 ivch_write(dvo, VR01, vr01);
359 ivch_write(dvo, VR40, vr40);
360
361 ivch_dump_regs(dvo);
362}
363
364static void ivch_dump_regs(struct intel_dvo_device *dvo)
365{
366 uint16_t val;
367
368 ivch_read(dvo, VR00, &val);
369 DRM_DEBUG("VR00: 0x%04x\n", val);
370 ivch_read(dvo, VR01, &val);
371 DRM_DEBUG("VR01: 0x%04x\n", val);
372 ivch_read(dvo, VR30, &val);
373 DRM_DEBUG("VR30: 0x%04x\n", val);
374 ivch_read(dvo, VR40, &val);
375 DRM_DEBUG("VR40: 0x%04x\n", val);
376
377 /* GPIO registers */
378 ivch_read(dvo, VR80, &val);
379 DRM_DEBUG("VR80: 0x%04x\n", val);
380 ivch_read(dvo, VR81, &val);
381 DRM_DEBUG("VR81: 0x%04x\n", val);
382 ivch_read(dvo, VR82, &val);
383 DRM_DEBUG("VR82: 0x%04x\n", val);
384 ivch_read(dvo, VR83, &val);
385 DRM_DEBUG("VR83: 0x%04x\n", val);
386 ivch_read(dvo, VR84, &val);
387 DRM_DEBUG("VR84: 0x%04x\n", val);
388 ivch_read(dvo, VR85, &val);
389 DRM_DEBUG("VR85: 0x%04x\n", val);
390 ivch_read(dvo, VR86, &val);
391 DRM_DEBUG("VR86: 0x%04x\n", val);
392 ivch_read(dvo, VR87, &val);
393 DRM_DEBUG("VR87: 0x%04x\n", val);
394 ivch_read(dvo, VR88, &val);
395 DRM_DEBUG("VR88: 0x%04x\n", val);
396
397 /* Scratch register 0 - AIM Panel type */
398 ivch_read(dvo, VR8E, &val);
399 DRM_DEBUG("VR8E: 0x%04x\n", val);
400
401 /* Scratch register 1 - Status register */
402 ivch_read(dvo, VR8F, &val);
403 DRM_DEBUG("VR8F: 0x%04x\n", val);
404}
405
406static void ivch_save(struct intel_dvo_device *dvo)
407{
408 struct ivch_priv *priv = dvo->dev_priv;
409
410 ivch_read(dvo, VR01, &priv->save_VR01);
411 ivch_read(dvo, VR40, &priv->save_VR40);
412}
413
414static void ivch_restore(struct intel_dvo_device *dvo)
415{
416 struct ivch_priv *priv = dvo->dev_priv;
417
418 ivch_write(dvo, VR01, priv->save_VR01);
419 ivch_write(dvo, VR40, priv->save_VR40);
420}
421
422static void ivch_destroy(struct intel_dvo_device *dvo)
423{
424 struct ivch_priv *priv = dvo->dev_priv;
425
426 if (priv) {
427 kfree(priv);
428 dvo->dev_priv = NULL;
429 }
430}
431
432struct intel_dvo_dev_ops ivch_ops= {
433 .init = ivch_init,
434 .dpms = ivch_dpms,
435 .save = ivch_save,
436 .restore = ivch_restore,
437 .mode_valid = ivch_mode_valid,
438 .mode_set = ivch_mode_set,
439 .detect = ivch_detect,
440 .dump_regs = ivch_dump_regs,
441 .destroy = ivch_destroy,
442};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
new file mode 100644
index 000000000000..033a4bb070b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -0,0 +1,302 @@
1/**************************************************************************
2
3Copyright © 2006 Dave Airlie
4
5All Rights Reserved.
6
7Permission is hereby granted, free of charge, to any person obtaining a
8copy of this software and associated documentation files (the
9"Software"), to deal in the Software without restriction, including
10without limitation the rights to use, copy, modify, merge, publish,
11distribute, sub license, and/or sell copies of the Software, and to
12permit persons to whom the Software is furnished to do so, subject to
13the following conditions:
14
15The above copyright notice and this permission notice (including the
16next paragraph) shall be included in all copies or substantial portions
17of the Software.
18
19THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27**************************************************************************/
28
29#include "dvo.h"
30
31#define SIL164_VID 0x0001
32#define SIL164_DID 0x0006
33
34#define SIL164_VID_LO 0x00
35#define SIL164_VID_HI 0x01
36#define SIL164_DID_LO 0x02
37#define SIL164_DID_HI 0x03
38#define SIL164_REV 0x04
39#define SIL164_RSVD 0x05
40#define SIL164_FREQ_LO 0x06
41#define SIL164_FREQ_HI 0x07
42
43#define SIL164_REG8 0x08
44#define SIL164_8_VEN (1<<5)
45#define SIL164_8_HEN (1<<4)
46#define SIL164_8_DSEL (1<<3)
47#define SIL164_8_BSEL (1<<2)
48#define SIL164_8_EDGE (1<<1)
49#define SIL164_8_PD (1<<0)
50
51#define SIL164_REG9 0x09
52#define SIL164_9_VLOW (1<<7)
53#define SIL164_9_MSEL_MASK (0x7<<4)
54#define SIL164_9_TSEL (1<<3)
55#define SIL164_9_RSEN (1<<2)
56#define SIL164_9_HTPLG (1<<1)
57#define SIL164_9_MDI (1<<0)
58
59#define SIL164_REGC 0x0c
60
61struct sil164_save_rec {
62 uint8_t reg8;
63 uint8_t reg9;
64 uint8_t regc;
65};
66
67struct sil164_priv {
68 //I2CDevRec d;
69 bool quiet;
70 struct sil164_save_rec save_regs;
71 struct sil164_save_rec mode_regs;
72};
73
74#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
75
76static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
77{
78 struct sil164_priv *sil = dvo->dev_priv;
79 struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
80 u8 out_buf[2];
81 u8 in_buf[2];
82
83 struct i2c_msg msgs[] = {
84 {
85 .addr = i2cbus->slave_addr,
86 .flags = 0,
87 .len = 1,
88 .buf = out_buf,
89 },
90 {
91 .addr = i2cbus->slave_addr,
92 .flags = I2C_M_RD,
93 .len = 1,
94 .buf = in_buf,
95 }
96 };
97
98 out_buf[0] = addr;
99 out_buf[1] = 0;
100
101 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
102 *ch = in_buf[0];
103 return true;
104 };
105
106 if (!sil->quiet) {
107 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
108 addr, i2cbus->adapter.name, i2cbus->slave_addr);
109 }
110 return false;
111}
112
113static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
114{
115 struct sil164_priv *sil= dvo->dev_priv;
116 struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
117 uint8_t out_buf[2];
118 struct i2c_msg msg = {
119 .addr = i2cbus->slave_addr,
120 .flags = 0,
121 .len = 2,
122 .buf = out_buf,
123 };
124
125 out_buf[0] = addr;
126 out_buf[1] = ch;
127
128 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
129 return true;
130
131 if (!sil->quiet) {
132 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
133 addr, i2cbus->adapter.name, i2cbus->slave_addr);
134 }
135
136 return false;
137}
138
139/* Silicon Image 164 driver for chip on i2c bus */
140static bool sil164_init(struct intel_dvo_device *dvo,
141 struct intel_i2c_chan *i2cbus)
142{
143 /* this will detect the SIL164 chip on the specified i2c bus */
144 struct sil164_priv *sil;
145 unsigned char ch;
146
147 sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
148 if (sil == NULL)
149 return false;
150
151 dvo->i2c_bus = i2cbus;
152 dvo->i2c_bus->slave_addr = dvo->slave_addr;
153 dvo->dev_priv = sil;
154 sil->quiet = true;
155
156 if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
157 goto out;
158
159 if (ch != (SIL164_VID & 0xff)) {
160 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
161 ch, i2cbus->adapter.name, i2cbus->slave_addr);
162 goto out;
163 }
164
165 if (!sil164_readb(dvo, SIL164_DID_LO, &ch))
166 goto out;
167
168 if (ch != (SIL164_DID & 0xff)) {
169 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
170 ch, i2cbus->adapter.name, i2cbus->slave_addr);
171 goto out;
172 }
173 sil->quiet = false;
174
175 DRM_DEBUG("init sil164 dvo controller successfully!\n");
176 return true;
177
178out:
179 kfree(sil);
180 return false;
181}
182
183static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
184{
185 uint8_t reg9;
186
187 sil164_readb(dvo, SIL164_REG9, &reg9);
188
189 if (reg9 & SIL164_9_HTPLG)
190 return connector_status_connected;
191 else
192 return connector_status_disconnected;
193}
194
195static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
196 struct drm_display_mode *mode)
197{
198 return MODE_OK;
199}
200
201static void sil164_mode_set(struct intel_dvo_device *dvo,
202 struct drm_display_mode *mode,
203 struct drm_display_mode *adjusted_mode)
204{
205 /* As long as the basics are set up, since we don't have clock
206 * dependencies in the mode setup, we can just leave the
207 * registers alone and everything will work fine.
208 */
209 /* recommended programming sequence from doc */
210 /*sil164_writeb(sil, 0x08, 0x30);
211 sil164_writeb(sil, 0x09, 0x00);
212 sil164_writeb(sil, 0x0a, 0x90);
213 sil164_writeb(sil, 0x0c, 0x89);
214 sil164_writeb(sil, 0x08, 0x31);*/
215 /* don't do much */
216 return;
217}
218
219/* set the SIL164 power state */
220static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
221{
222 int ret;
223 unsigned char ch;
224
225 ret = sil164_readb(dvo, SIL164_REG8, &ch);
226 if (ret == false)
227 return;
228
229 if (mode == DRM_MODE_DPMS_ON)
230 ch |= SIL164_8_PD;
231 else
232 ch &= ~SIL164_8_PD;
233
234 sil164_writeb(dvo, SIL164_REG8, ch);
235 return;
236}
237
238static void sil164_dump_regs(struct intel_dvo_device *dvo)
239{
240 uint8_t val;
241
242 sil164_readb(dvo, SIL164_FREQ_LO, &val);
243 DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val);
244 sil164_readb(dvo, SIL164_FREQ_HI, &val);
245 DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val);
246 sil164_readb(dvo, SIL164_REG8, &val);
247 DRM_DEBUG("SIL164_REG8: 0x%02x\n", val);
248 sil164_readb(dvo, SIL164_REG9, &val);
249 DRM_DEBUG("SIL164_REG9: 0x%02x\n", val);
250 sil164_readb(dvo, SIL164_REGC, &val);
251 DRM_DEBUG("SIL164_REGC: 0x%02x\n", val);
252}
253
254static void sil164_save(struct intel_dvo_device *dvo)
255{
256 struct sil164_priv *sil= dvo->dev_priv;
257
258 if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
259 return;
260
261 if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
262 return;
263
264 if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
265 return;
266
267 return;
268}
269
270static void sil164_restore(struct intel_dvo_device *dvo)
271{
272 struct sil164_priv *sil = dvo->dev_priv;
273
274 /* Restore it powered down initially */
275 sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
276
277 sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
278 sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
279 sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
280}
281
282static void sil164_destroy(struct intel_dvo_device *dvo)
283{
284 struct sil164_priv *sil = dvo->dev_priv;
285
286 if (sil) {
287 kfree(sil);
288 dvo->dev_priv = NULL;
289 }
290}
291
292struct intel_dvo_dev_ops sil164_ops = {
293 .init = sil164_init,
294 .detect = sil164_detect,
295 .mode_valid = sil164_mode_valid,
296 .mode_set = sil164_mode_set,
297 .dpms = sil164_dpms,
298 .dump_regs = sil164_dump_regs,
299 .save = sil164_save,
300 .restore = sil164_restore,
301 .destroy = sil164_destroy,
302};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
new file mode 100644
index 000000000000..207fda806ebf
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -0,0 +1,335 @@
1/*
2 * Copyright © 2007 Dave Mueller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Mueller <dave.mueller@gmx.ch>
25 *
26 */
27
28#include "dvo.h"
29
30/* register definitions according to the TFP410 data sheet */
31#define TFP410_VID 0x014C
32#define TFP410_DID 0x0410
33
34#define TFP410_VID_LO 0x00
35#define TFP410_VID_HI 0x01
36#define TFP410_DID_LO 0x02
37#define TFP410_DID_HI 0x03
38#define TFP410_REV 0x04
39
40#define TFP410_CTL_1 0x08
41#define TFP410_CTL_1_TDIS (1<<6)
42#define TFP410_CTL_1_VEN (1<<5)
43#define TFP410_CTL_1_HEN (1<<4)
44#define TFP410_CTL_1_DSEL (1<<3)
45#define TFP410_CTL_1_BSEL (1<<2)
46#define TFP410_CTL_1_EDGE (1<<1)
47#define TFP410_CTL_1_PD (1<<0)
48
49#define TFP410_CTL_2 0x09
50#define TFP410_CTL_2_VLOW (1<<7)
51#define TFP410_CTL_2_MSEL_MASK (0x7<<4)
52#define TFP410_CTL_2_MSEL (1<<4)
53#define TFP410_CTL_2_TSEL (1<<3)
54#define TFP410_CTL_2_RSEN (1<<2)
55#define TFP410_CTL_2_HTPLG (1<<1)
56#define TFP410_CTL_2_MDI (1<<0)
57
58#define TFP410_CTL_3 0x0A
59#define TFP410_CTL_3_DK_MASK (0x7<<5)
60#define TFP410_CTL_3_DK (1<<5)
61#define TFP410_CTL_3_DKEN (1<<4)
62#define TFP410_CTL_3_CTL_MASK (0x7<<1)
63#define TFP410_CTL_3_CTL (1<<1)
64
65#define TFP410_USERCFG 0x0B
66
67#define TFP410_DE_DLY 0x32
68
69#define TFP410_DE_CTL 0x33
70#define TFP410_DE_CTL_DEGEN (1<<6)
71#define TFP410_DE_CTL_VSPOL (1<<5)
72#define TFP410_DE_CTL_HSPOL (1<<4)
73#define TFP410_DE_CTL_DEDLY8 (1<<0)
74
75#define TFP410_DE_TOP 0x34
76
77#define TFP410_DE_CNT_LO 0x36
78#define TFP410_DE_CNT_HI 0x37
79
80#define TFP410_DE_LIN_LO 0x38
81#define TFP410_DE_LIN_HI 0x39
82
83#define TFP410_H_RES_LO 0x3A
84#define TFP410_H_RES_HI 0x3B
85
86#define TFP410_V_RES_LO 0x3C
87#define TFP410_V_RES_HI 0x3D
88
89struct tfp410_save_rec {
90 uint8_t ctl1;
91 uint8_t ctl2;
92};
93
94struct tfp410_priv {
95 bool quiet;
96
97 struct tfp410_save_rec saved_reg;
98 struct tfp410_save_rec mode_reg;
99};
100
101static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
102{
103 struct tfp410_priv *tfp = dvo->dev_priv;
104 struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
105 u8 out_buf[2];
106 u8 in_buf[2];
107
108 struct i2c_msg msgs[] = {
109 {
110 .addr = i2cbus->slave_addr,
111 .flags = 0,
112 .len = 1,
113 .buf = out_buf,
114 },
115 {
116 .addr = i2cbus->slave_addr,
117 .flags = I2C_M_RD,
118 .len = 1,
119 .buf = in_buf,
120 }
121 };
122
123 out_buf[0] = addr;
124 out_buf[1] = 0;
125
126 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
127 *ch = in_buf[0];
128 return true;
129 };
130
131 if (!tfp->quiet) {
132 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
133 addr, i2cbus->adapter.name, i2cbus->slave_addr);
134 }
135 return false;
136}
137
138static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
139{
140 struct tfp410_priv *tfp = dvo->dev_priv;
141 struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
142 uint8_t out_buf[2];
143 struct i2c_msg msg = {
144 .addr = i2cbus->slave_addr,
145 .flags = 0,
146 .len = 2,
147 .buf = out_buf,
148 };
149
150 out_buf[0] = addr;
151 out_buf[1] = ch;
152
153 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
154 return true;
155
156 if (!tfp->quiet) {
157 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
158 addr, i2cbus->adapter.name, i2cbus->slave_addr);
159 }
160
161 return false;
162}
163
164static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
165{
166 uint8_t ch1, ch2;
167
168 if (tfp410_readb(dvo, addr+0, &ch1) &&
169 tfp410_readb(dvo, addr+1, &ch2))
170 return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF);
171
172 return -1;
173}
174
175/* Ti TFP410 driver for chip on i2c bus */
176static bool tfp410_init(struct intel_dvo_device *dvo,
177 struct intel_i2c_chan *i2cbus)
178{
179 /* this will detect the tfp410 chip on the specified i2c bus */
180 struct tfp410_priv *tfp;
181 int id;
182
183 tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
184 if (tfp == NULL)
185 return false;
186
187 dvo->i2c_bus = i2cbus;
188 dvo->i2c_bus->slave_addr = dvo->slave_addr;
189 dvo->dev_priv = tfp;
190 tfp->quiet = true;
191
192 if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
193 DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
194 id, i2cbus->adapter.name, i2cbus->slave_addr);
195 goto out;
196 }
197
198 if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
199 DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
200 id, i2cbus->adapter.name, i2cbus->slave_addr);
201 goto out;
202 }
203 tfp->quiet = false;
204 return true;
205out:
206 kfree(tfp);
207 return false;
208}
209
210static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
211{
212 enum drm_connector_status ret = connector_status_disconnected;
213 uint8_t ctl2;
214
215 if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
216 if (ctl2 & TFP410_CTL_2_HTPLG)
217 ret = connector_status_connected;
218 else
219 ret = connector_status_disconnected;
220 }
221
222 return ret;
223}
224
225static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
226 struct drm_display_mode *mode)
227{
228 return MODE_OK;
229}
230
231static void tfp410_mode_set(struct intel_dvo_device *dvo,
232 struct drm_display_mode *mode,
233 struct drm_display_mode *adjusted_mode)
234{
235 /* As long as the basics are set up, since we don't have clock dependencies
236 * in the mode setup, we can just leave the registers alone and everything
237 * will work fine.
238 */
239 /* don't do much */
240 return;
241}
242
243/* set the tfp410 power state */
244static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
245{
246 uint8_t ctl1;
247
248 if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
249 return;
250
251 if (mode == DRM_MODE_DPMS_ON)
252 ctl1 |= TFP410_CTL_1_PD;
253 else
254 ctl1 &= ~TFP410_CTL_1_PD;
255
256 tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
257}
258
259static void tfp410_dump_regs(struct intel_dvo_device *dvo)
260{
261 uint8_t val, val2;
262
263 tfp410_readb(dvo, TFP410_REV, &val);
264 DRM_DEBUG("TFP410_REV: 0x%02X\n", val);
265 tfp410_readb(dvo, TFP410_CTL_1, &val);
266 DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val);
267 tfp410_readb(dvo, TFP410_CTL_2, &val);
268 DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val);
269 tfp410_readb(dvo, TFP410_CTL_3, &val);
270 DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val);
271 tfp410_readb(dvo, TFP410_USERCFG, &val);
272 DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val);
273 tfp410_readb(dvo, TFP410_DE_DLY, &val);
274 DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val);
275 tfp410_readb(dvo, TFP410_DE_CTL, &val);
276 DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val);
277 tfp410_readb(dvo, TFP410_DE_TOP, &val);
278 DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val);
279 tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
280 tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
281 DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
282 tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
283 tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
284 DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
285 tfp410_readb(dvo, TFP410_H_RES_LO, &val);
286 tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
287 DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val);
288 tfp410_readb(dvo, TFP410_V_RES_LO, &val);
289 tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
290 DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val);
291}
292
293static void tfp410_save(struct intel_dvo_device *dvo)
294{
295 struct tfp410_priv *tfp = dvo->dev_priv;
296
297 if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
298 return;
299
300 if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
301 return;
302}
303
304static void tfp410_restore(struct intel_dvo_device *dvo)
305{
306 struct tfp410_priv *tfp = dvo->dev_priv;
307
308 /* Restore it powered down initially */
309 tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
310
311 tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
312 tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
313}
314
315static void tfp410_destroy(struct intel_dvo_device *dvo)
316{
317 struct tfp410_priv *tfp = dvo->dev_priv;
318
319 if (tfp) {
320 kfree(tfp);
321 dvo->dev_priv = NULL;
322 }
323}
324
325struct intel_dvo_dev_ops tfp410_ops = {
326 .init = tfp410_init,
327 .detect = tfp410_detect,
328 .mode_valid = tfp410_mode_valid,
329 .mode_set = tfp410_mode_set,
330 .dpms = tfp410_dpms,
331 .dump_regs = tfp410_dump_regs,
332 .save = tfp410_save,
333 .restore = tfp410_restore,
334 .destroy = tfp410_destroy,
335};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index afa8a12cd009..3d7082af5b72 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -28,6 +28,8 @@
28 28
29#include "drmP.h" 29#include "drmP.h"
30#include "drm.h" 30#include "drm.h"
31#include "drm_crtc_helper.h"
32#include "intel_drv.h"
31#include "i915_drm.h" 33#include "i915_drm.h"
32#include "i915_drv.h" 34#include "i915_drv.h"
33 35
@@ -39,6 +41,7 @@
39int i915_wait_ring(struct drm_device * dev, int n, const char *caller) 41int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
40{ 42{
41 drm_i915_private_t *dev_priv = dev->dev_private; 43 drm_i915_private_t *dev_priv = dev->dev_private;
44 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
42 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 45 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 46 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
44 u32 last_acthd = I915_READ(acthd_reg); 47 u32 last_acthd = I915_READ(acthd_reg);
@@ -55,8 +58,8 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
55 if (ring->space >= n) 58 if (ring->space >= n)
56 return 0; 59 return 0;
57 60
58 if (dev_priv->sarea_priv) 61 if (master_priv->sarea_priv)
59 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 62 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
60 63
61 if (ring->head != last_head) 64 if (ring->head != last_head)
62 i = 0; 65 i = 0;
@@ -121,16 +124,28 @@ static void i915_free_hws(struct drm_device *dev)
121void i915_kernel_lost_context(struct drm_device * dev) 124void i915_kernel_lost_context(struct drm_device * dev)
122{ 125{
123 drm_i915_private_t *dev_priv = dev->dev_private; 126 drm_i915_private_t *dev_priv = dev->dev_private;
127 struct drm_i915_master_private *master_priv;
124 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 128 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
125 129
130 /*
131 * We should never lose context on the ring with modesetting
132 * as we don't expose it to userspace
133 */
134 if (drm_core_check_feature(dev, DRIVER_MODESET))
135 return;
136
126 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 137 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
127 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 138 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
128 ring->space = ring->head - (ring->tail + 8); 139 ring->space = ring->head - (ring->tail + 8);
129 if (ring->space < 0) 140 if (ring->space < 0)
130 ring->space += ring->Size; 141 ring->space += ring->Size;
131 142
132 if (ring->head == ring->tail && dev_priv->sarea_priv) 143 if (!dev->primary->master)
133 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 144 return;
145
146 master_priv = dev->primary->master->driver_priv;
147 if (ring->head == ring->tail && master_priv->sarea_priv)
148 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
134} 149}
135 150
136static int i915_dma_cleanup(struct drm_device * dev) 151static int i915_dma_cleanup(struct drm_device * dev)
@@ -154,25 +169,13 @@ static int i915_dma_cleanup(struct drm_device * dev)
154 if (I915_NEED_GFX_HWS(dev)) 169 if (I915_NEED_GFX_HWS(dev))
155 i915_free_hws(dev); 170 i915_free_hws(dev);
156 171
157 dev_priv->sarea = NULL;
158 dev_priv->sarea_priv = NULL;
159
160 return 0; 172 return 0;
161} 173}
162 174
163static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 175static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
164{ 176{
165 drm_i915_private_t *dev_priv = dev->dev_private; 177 drm_i915_private_t *dev_priv = dev->dev_private;
166 178 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
167 dev_priv->sarea = drm_getsarea(dev);
168 if (!dev_priv->sarea) {
169 DRM_ERROR("can not find sarea!\n");
170 i915_dma_cleanup(dev);
171 return -EINVAL;
172 }
173
174 dev_priv->sarea_priv = (drm_i915_sarea_t *)
175 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
176 179
177 if (init->ring_size != 0) { 180 if (init->ring_size != 0) {
178 if (dev_priv->ring.ring_obj != NULL) { 181 if (dev_priv->ring.ring_obj != NULL) {
@@ -207,7 +210,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
207 dev_priv->back_offset = init->back_offset; 210 dev_priv->back_offset = init->back_offset;
208 dev_priv->front_offset = init->front_offset; 211 dev_priv->front_offset = init->front_offset;
209 dev_priv->current_page = 0; 212 dev_priv->current_page = 0;
210 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 213 if (master_priv->sarea_priv)
214 master_priv->sarea_priv->pf_current_page = 0;
211 215
212 /* Allow hardware batchbuffers unless told otherwise. 216 /* Allow hardware batchbuffers unless told otherwise.
213 */ 217 */
@@ -222,11 +226,6 @@ static int i915_dma_resume(struct drm_device * dev)
222 226
223 DRM_DEBUG("%s\n", __func__); 227 DRM_DEBUG("%s\n", __func__);
224 228
225 if (!dev_priv->sarea) {
226 DRM_ERROR("can not find sarea!\n");
227 return -EINVAL;
228 }
229
230 if (dev_priv->ring.map.handle == NULL) { 229 if (dev_priv->ring.map.handle == NULL) {
231 DRM_ERROR("can not ioremap virtual address for" 230 DRM_ERROR("can not ioremap virtual address for"
232 " ring buffer\n"); 231 " ring buffer\n");
@@ -435,13 +434,14 @@ i915_emit_box(struct drm_device *dev,
435static void i915_emit_breadcrumb(struct drm_device *dev) 434static void i915_emit_breadcrumb(struct drm_device *dev)
436{ 435{
437 drm_i915_private_t *dev_priv = dev->dev_private; 436 drm_i915_private_t *dev_priv = dev->dev_private;
437 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
438 RING_LOCALS; 438 RING_LOCALS;
439 439
440 dev_priv->counter++; 440 dev_priv->counter++;
441 if (dev_priv->counter > 0x7FFFFFFFUL) 441 if (dev_priv->counter > 0x7FFFFFFFUL)
442 dev_priv->counter = 0; 442 dev_priv->counter = 0;
443 if (dev_priv->sarea_priv) 443 if (master_priv->sarea_priv)
444 dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 444 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
445 445
446 BEGIN_LP_RING(4); 446 BEGIN_LP_RING(4);
447 OUT_RING(MI_STORE_DWORD_INDEX); 447 OUT_RING(MI_STORE_DWORD_INDEX);
@@ -537,15 +537,17 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
537static int i915_dispatch_flip(struct drm_device * dev) 537static int i915_dispatch_flip(struct drm_device * dev)
538{ 538{
539 drm_i915_private_t *dev_priv = dev->dev_private; 539 drm_i915_private_t *dev_priv = dev->dev_private;
540 struct drm_i915_master_private *master_priv =
541 dev->primary->master->driver_priv;
540 RING_LOCALS; 542 RING_LOCALS;
541 543
542 if (!dev_priv->sarea_priv) 544 if (!master_priv->sarea_priv)
543 return -EINVAL; 545 return -EINVAL;
544 546
545 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 547 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
546 __func__, 548 __func__,
547 dev_priv->current_page, 549 dev_priv->current_page,
548 dev_priv->sarea_priv->pf_current_page); 550 master_priv->sarea_priv->pf_current_page);
549 551
550 i915_kernel_lost_context(dev); 552 i915_kernel_lost_context(dev);
551 553
@@ -572,7 +574,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
572 OUT_RING(0); 574 OUT_RING(0);
573 ADVANCE_LP_RING(); 575 ADVANCE_LP_RING();
574 576
575 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; 577 master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
576 578
577 BEGIN_LP_RING(4); 579 BEGIN_LP_RING(4);
578 OUT_RING(MI_STORE_DWORD_INDEX); 580 OUT_RING(MI_STORE_DWORD_INDEX);
@@ -581,7 +583,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
581 OUT_RING(0); 583 OUT_RING(0);
582 ADVANCE_LP_RING(); 584 ADVANCE_LP_RING();
583 585
584 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 586 master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
585 return 0; 587 return 0;
586} 588}
587 589
@@ -611,8 +613,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
611 struct drm_file *file_priv) 613 struct drm_file *file_priv)
612{ 614{
613 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 615 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
616 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
614 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 617 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
615 dev_priv->sarea_priv; 618 master_priv->sarea_priv;
616 drm_i915_batchbuffer_t *batch = data; 619 drm_i915_batchbuffer_t *batch = data;
617 int ret; 620 int ret;
618 621
@@ -644,8 +647,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
644 struct drm_file *file_priv) 647 struct drm_file *file_priv)
645{ 648{
646 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 649 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
650 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
647 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 651 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
648 dev_priv->sarea_priv; 652 master_priv->sarea_priv;
649 drm_i915_cmdbuffer_t *cmdbuf = data; 653 drm_i915_cmdbuffer_t *cmdbuf = data;
650 int ret; 654 int ret;
651 655
@@ -774,6 +778,11 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
774 return -EINVAL; 778 return -EINVAL;
775 } 779 }
776 780
781 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
782 WARN(1, "tried to set status page when mode setting active\n");
783 return 0;
784 }
785
777 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); 786 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
778 787
779 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); 788 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
@@ -802,6 +811,214 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
802 return 0; 811 return 0;
803} 812}
804 813
814/**
815 * i915_probe_agp - get AGP bootup configuration
816 * @pdev: PCI device
817 * @aperture_size: returns AGP aperture configured size
818 * @preallocated_size: returns size of BIOS preallocated AGP space
819 *
820 * Since Intel integrated graphics are UMA, the BIOS has to set aside
821 * some RAM for the framebuffer at early boot. This code figures out
822 * how much was set aside so we can use it for our own purposes.
823 */
824static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
825 unsigned long *preallocated_size)
826{
827 struct pci_dev *bridge_dev;
828 u16 tmp = 0;
829 unsigned long overhead;
830
831 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
832 if (!bridge_dev) {
833 DRM_ERROR("bridge device not found\n");
834 return -1;
835 }
836
837 /* Get the fb aperture size and "stolen" memory amount. */
838 pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
839 pci_dev_put(bridge_dev);
840
841 *aperture_size = 1024 * 1024;
842 *preallocated_size = 1024 * 1024;
843
844 switch (dev->pdev->device) {
845 case PCI_DEVICE_ID_INTEL_82830_CGC:
846 case PCI_DEVICE_ID_INTEL_82845G_IG:
847 case PCI_DEVICE_ID_INTEL_82855GM_IG:
848 case PCI_DEVICE_ID_INTEL_82865_IG:
849 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
850 *aperture_size *= 64;
851 else
852 *aperture_size *= 128;
853 break;
854 default:
855 /* 9xx supports large sizes, just look at the length */
856 *aperture_size = pci_resource_len(dev->pdev, 2);
857 break;
858 }
859
860 /*
861 * Some of the preallocated space is taken by the GTT
862 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
863 */
864 if (IS_G4X(dev))
865 overhead = 4096;
866 else
867 overhead = (*aperture_size / 1024) + 4096;
868
869 switch (tmp & INTEL_855_GMCH_GMS_MASK) {
870 case INTEL_855_GMCH_GMS_STOLEN_1M:
871 break; /* 1M already */
872 case INTEL_855_GMCH_GMS_STOLEN_4M:
873 *preallocated_size *= 4;
874 break;
875 case INTEL_855_GMCH_GMS_STOLEN_8M:
876 *preallocated_size *= 8;
877 break;
878 case INTEL_855_GMCH_GMS_STOLEN_16M:
879 *preallocated_size *= 16;
880 break;
881 case INTEL_855_GMCH_GMS_STOLEN_32M:
882 *preallocated_size *= 32;
883 break;
884 case INTEL_915G_GMCH_GMS_STOLEN_48M:
885 *preallocated_size *= 48;
886 break;
887 case INTEL_915G_GMCH_GMS_STOLEN_64M:
888 *preallocated_size *= 64;
889 break;
890 case INTEL_855_GMCH_GMS_DISABLED:
891 DRM_ERROR("video memory is disabled\n");
892 return -1;
893 default:
894 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
895 tmp & INTEL_855_GMCH_GMS_MASK);
896 return -1;
897 }
898 *preallocated_size -= overhead;
899
900 return 0;
901}
902
903static int i915_load_modeset_init(struct drm_device *dev)
904{
905 struct drm_i915_private *dev_priv = dev->dev_private;
906 unsigned long agp_size, prealloc_size;
907 int fb_bar = IS_I9XX(dev) ? 2 : 0;
908 int ret = 0;
909
910 dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
911 if (!dev->devname) {
912 ret = -ENOMEM;
913 goto out;
914 }
915
916 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
917 0xff000000;
918
919 DRM_DEBUG("*** fb base 0x%08lx\n", dev->mode_config.fb_base);
920
921 if (IS_MOBILE(dev) || (IS_I9XX(dev) && !IS_I965G(dev) && !IS_G33(dev)))
922 dev_priv->cursor_needs_physical = true;
923 else
924 dev_priv->cursor_needs_physical = false;
925
926 ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
927 if (ret)
928 goto kfree_devname;
929
930 /* Basic memrange allocator for stolen space (aka vram) */
931 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
932
933 /* Let GEM Manage from end of prealloc space to end of aperture */
934 i915_gem_do_init(dev, prealloc_size, agp_size);
935
936 ret = i915_gem_init_ringbuffer(dev);
937 if (ret)
938 goto kfree_devname;
939
940 dev_priv->mm.gtt_mapping =
941 io_mapping_create_wc(dev->agp->base,
942 dev->agp->agp_info.aper_size * 1024*1024);
943
944 /* Allow hardware batchbuffers unless told otherwise.
945 */
946 dev_priv->allow_batchbuffer = 1;
947
948 ret = intel_init_bios(dev);
949 if (ret)
950 DRM_INFO("failed to find VBIOS tables\n");
951
952 ret = drm_irq_install(dev);
953 if (ret)
954 goto destroy_ringbuffer;
955
956 /* FIXME: re-add hotplug support */
957#if 0
958 ret = drm_hotplug_init(dev);
959 if (ret)
960 goto destroy_ringbuffer;
961#endif
962
963 /* Always safe in the mode setting case. */
964 /* FIXME: do pre/post-mode set stuff in core KMS code */
965 dev->vblank_disable_allowed = 1;
966
967 /*
968 * Initialize the hardware status page IRQ location.
969 */
970
971 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
972
973 intel_modeset_init(dev);
974
975 drm_helper_initial_config(dev, false);
976
977 return 0;
978
979destroy_ringbuffer:
980 i915_gem_cleanup_ringbuffer(dev);
981kfree_devname:
982 kfree(dev->devname);
983out:
984 return ret;
985}
986
987int i915_master_create(struct drm_device *dev, struct drm_master *master)
988{
989 struct drm_i915_master_private *master_priv;
990
991 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
992 if (!master_priv)
993 return -ENOMEM;
994
995 master->driver_priv = master_priv;
996 return 0;
997}
998
999void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1000{
1001 struct drm_i915_master_private *master_priv = master->driver_priv;
1002
1003 if (!master_priv)
1004 return;
1005
1006 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
1007
1008 master->driver_priv = NULL;
1009}
1010
1011/**
1012 * i915_driver_load - setup chip and create an initial config
1013 * @dev: DRM device
1014 * @flags: startup flags
1015 *
1016 * The driver load routine has to do several things:
1017 * - drive output discovery via intel_modeset_init()
1018 * - initialize the memory manager
1019 * - allocate initial config memory
1020 * - setup the DRM framebuffer with the allocated memory
1021 */
805int i915_driver_load(struct drm_device *dev, unsigned long flags) 1022int i915_driver_load(struct drm_device *dev, unsigned long flags)
806{ 1023{
807 struct drm_i915_private *dev_priv = dev->dev_private; 1024 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -829,6 +1046,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
829 size = drm_get_resource_len(dev, mmio_bar); 1046 size = drm_get_resource_len(dev, mmio_bar);
830 1047
831 dev_priv->regs = ioremap(base, size); 1048 dev_priv->regs = ioremap(base, size);
1049 if (!dev_priv->regs) {
1050 DRM_ERROR("failed to map registers\n");
1051 ret = -EIO;
1052 goto free_priv;
1053 }
832 1054
833#ifdef CONFIG_HIGHMEM64G 1055#ifdef CONFIG_HIGHMEM64G
834 /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ 1056 /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
@@ -844,7 +1066,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
844 if (!I915_NEED_GFX_HWS(dev)) { 1066 if (!I915_NEED_GFX_HWS(dev)) {
845 ret = i915_init_phys_hws(dev); 1067 ret = i915_init_phys_hws(dev);
846 if (ret != 0) 1068 if (ret != 0)
847 return ret; 1069 goto out_rmmap;
848 } 1070 }
849 1071
850 /* On the 945G/GM, the chipset reports the MSI capability on the 1072 /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -864,6 +1086,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
864 intel_opregion_init(dev); 1086 intel_opregion_init(dev);
865 1087
866 spin_lock_init(&dev_priv->user_irq_lock); 1088 spin_lock_init(&dev_priv->user_irq_lock);
1089 dev_priv->user_irq_refcount = 0;
867 1090
868 ret = drm_vblank_init(dev, I915_NUM_PIPE); 1091 ret = drm_vblank_init(dev, I915_NUM_PIPE);
869 1092
@@ -872,6 +1095,20 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
872 return ret; 1095 return ret;
873 } 1096 }
874 1097
1098 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1099 ret = i915_load_modeset_init(dev);
1100 if (ret < 0) {
1101 DRM_ERROR("failed to init modeset\n");
1102 goto out_rmmap;
1103 }
1104 }
1105
1106 return 0;
1107
1108out_rmmap:
1109 iounmap(dev_priv->regs);
1110free_priv:
1111 drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
875 return ret; 1112 return ret;
876} 1113}
877 1114
@@ -879,16 +1116,29 @@ int i915_driver_unload(struct drm_device *dev)
879{ 1116{
880 struct drm_i915_private *dev_priv = dev->dev_private; 1117 struct drm_i915_private *dev_priv = dev->dev_private;
881 1118
1119 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1120 io_mapping_free(dev_priv->mm.gtt_mapping);
1121 drm_irq_uninstall(dev);
1122 }
1123
882 if (dev->pdev->msi_enabled) 1124 if (dev->pdev->msi_enabled)
883 pci_disable_msi(dev->pdev); 1125 pci_disable_msi(dev->pdev);
884 1126
885 i915_free_hws(dev);
886
887 if (dev_priv->regs != NULL) 1127 if (dev_priv->regs != NULL)
888 iounmap(dev_priv->regs); 1128 iounmap(dev_priv->regs);
889 1129
890 intel_opregion_free(dev); 1130 intel_opregion_free(dev);
891 1131
1132 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1133 intel_modeset_cleanup(dev);
1134
1135 mutex_lock(&dev->struct_mutex);
1136 i915_gem_cleanup_ringbuffer(dev);
1137 mutex_unlock(&dev->struct_mutex);
1138 drm_mm_takedown(&dev_priv->vram);
1139 i915_gem_lastclose(dev);
1140 }
1141
892 drm_free(dev->dev_private, sizeof(drm_i915_private_t), 1142 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
893 DRM_MEM_DRIVER); 1143 DRM_MEM_DRIVER);
894 1144
@@ -914,12 +1164,26 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
914 return 0; 1164 return 0;
915} 1165}
916 1166
1167/**
1168 * i915_driver_lastclose - clean up after all DRM clients have exited
1169 * @dev: DRM device
1170 *
1171 * Take care of cleaning up after all DRM clients have exited. In the
1172 * mode setting case, we want to restore the kernel's initial mode (just
1173 * in case the last client left us in a bad state).
1174 *
1175 * Additionally, in the non-mode setting case, we'll tear down the AGP
1176 * and DMA structures, since the kernel won't be using them, and clea
1177 * up any GEM state.
1178 */
917void i915_driver_lastclose(struct drm_device * dev) 1179void i915_driver_lastclose(struct drm_device * dev)
918{ 1180{
919 drm_i915_private_t *dev_priv = dev->dev_private; 1181 drm_i915_private_t *dev_priv = dev->dev_private;
920 1182
921 if (!dev_priv) 1183 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1184 intelfb_restore();
922 return; 1185 return;
1186 }
923 1187
924 i915_gem_lastclose(dev); 1188 i915_gem_lastclose(dev);
925 1189
@@ -932,7 +1196,8 @@ void i915_driver_lastclose(struct drm_device * dev)
932void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1196void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
933{ 1197{
934 drm_i915_private_t *dev_priv = dev->dev_private; 1198 drm_i915_private_t *dev_priv = dev->dev_private;
935 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 1199 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1200 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
936} 1201}
937 1202
938void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 1203void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
@@ -972,6 +1237,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
972 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), 1237 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
973 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), 1238 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
974 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), 1239 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1240 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
975 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), 1241 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
976 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), 1242 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
977 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), 1243 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a80ead215282..f8b3df0926c0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -33,11 +33,22 @@
33#include "i915_drv.h" 33#include "i915_drv.h"
34 34
35#include "drm_pciids.h" 35#include "drm_pciids.h"
36#include <linux/console.h>
37
38static unsigned int i915_modeset = -1;
39module_param_named(modeset, i915_modeset, int, 0400);
40
41unsigned int i915_fbpercrtc = 0;
42module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
36 43
37static struct pci_device_id pciidlist[] = { 44static struct pci_device_id pciidlist[] = {
38 i915_PCI_IDS 45 i915_PCI_IDS
39}; 46};
40 47
48#if defined(CONFIG_DRM_I915_KMS)
49MODULE_DEVICE_TABLE(pci, pciidlist);
50#endif
51
41static int i915_suspend(struct drm_device *dev, pm_message_t state) 52static int i915_suspend(struct drm_device *dev, pm_message_t state)
42{ 53{
43 struct drm_i915_private *dev_priv = dev->dev_private; 54 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -81,6 +92,10 @@ static int i915_resume(struct drm_device *dev)
81 return 0; 92 return 0;
82} 93}
83 94
95static struct vm_operations_struct i915_gem_vm_ops = {
96 .fault = i915_gem_fault,
97};
98
84static struct drm_driver driver = { 99static struct drm_driver driver = {
85 /* don't use mtrr's here, the Xserver or user space app should 100 /* don't use mtrr's here, the Xserver or user space app should
86 * deal with them for intel hardware. 101 * deal with them for intel hardware.
@@ -107,17 +122,20 @@ static struct drm_driver driver = {
107 .reclaim_buffers = drm_core_reclaim_buffers, 122 .reclaim_buffers = drm_core_reclaim_buffers,
108 .get_map_ofs = drm_core_get_map_ofs, 123 .get_map_ofs = drm_core_get_map_ofs,
109 .get_reg_ofs = drm_core_get_reg_ofs, 124 .get_reg_ofs = drm_core_get_reg_ofs,
125 .master_create = i915_master_create,
126 .master_destroy = i915_master_destroy,
110 .proc_init = i915_gem_proc_init, 127 .proc_init = i915_gem_proc_init,
111 .proc_cleanup = i915_gem_proc_cleanup, 128 .proc_cleanup = i915_gem_proc_cleanup,
112 .gem_init_object = i915_gem_init_object, 129 .gem_init_object = i915_gem_init_object,
113 .gem_free_object = i915_gem_free_object, 130 .gem_free_object = i915_gem_free_object,
131 .gem_vm_ops = &i915_gem_vm_ops,
114 .ioctls = i915_ioctls, 132 .ioctls = i915_ioctls,
115 .fops = { 133 .fops = {
116 .owner = THIS_MODULE, 134 .owner = THIS_MODULE,
117 .open = drm_open, 135 .open = drm_open,
118 .release = drm_release, 136 .release = drm_release,
119 .ioctl = drm_ioctl, 137 .ioctl = drm_ioctl,
120 .mmap = drm_mmap, 138 .mmap = drm_gem_mmap,
121 .poll = drm_poll, 139 .poll = drm_poll,
122 .fasync = drm_fasync, 140 .fasync = drm_fasync,
123#ifdef CONFIG_COMPAT 141#ifdef CONFIG_COMPAT
@@ -141,6 +159,28 @@ static struct drm_driver driver = {
141static int __init i915_init(void) 159static int __init i915_init(void)
142{ 160{
143 driver.num_ioctls = i915_max_ioctl; 161 driver.num_ioctls = i915_max_ioctl;
162
163 /*
164 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
165 * explicitly disabled with the module pararmeter.
166 *
167 * Otherwise, just follow the parameter (defaulting to off).
168 *
169 * Allow optional vga_text_mode_force boot option to override
170 * the default behavior.
171 */
172#if defined(CONFIG_DRM_I915_KMS)
173 if (i915_modeset != 0)
174 driver.driver_features |= DRIVER_MODESET;
175#endif
176 if (i915_modeset == 1)
177 driver.driver_features |= DRIVER_MODESET;
178
179#ifdef CONFIG_VGA_CONSOLE
180 if (vgacon_text_force() && i915_modeset == -1)
181 driver.driver_features &= ~DRIVER_MODESET;
182#endif
183
144 return drm_init(&driver); 184 return drm_init(&driver);
145} 185}
146 186
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b3cc4731aa7c..4756e5cd6b5e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,6 +31,7 @@
31#define _I915_DRV_H_ 31#define _I915_DRV_H_
32 32
33#include "i915_reg.h" 33#include "i915_reg.h"
34#include "intel_bios.h"
34#include <linux/io-mapping.h> 35#include <linux/io-mapping.h>
35 36
36/* General customization: 37/* General customization:
@@ -103,15 +104,23 @@ struct intel_opregion {
103 int enabled; 104 int enabled;
104}; 105};
105 106
107struct drm_i915_master_private {
108 drm_local_map_t *sarea;
109 struct _drm_i915_sarea *sarea_priv;
110};
111#define I915_FENCE_REG_NONE -1
112
113struct drm_i915_fence_reg {
114 struct drm_gem_object *obj;
115};
116
106typedef struct drm_i915_private { 117typedef struct drm_i915_private {
107 struct drm_device *dev; 118 struct drm_device *dev;
108 119
109 int has_gem; 120 int has_gem;
110 121
111 void __iomem *regs; 122 void __iomem *regs;
112 drm_local_map_t *sarea;
113 123
114 drm_i915_sarea_t *sarea_priv;
115 drm_i915_ring_buffer_t ring; 124 drm_i915_ring_buffer_t ring;
116 125
117 drm_dma_handle_t *status_page_dmah; 126 drm_dma_handle_t *status_page_dmah;
@@ -144,8 +153,30 @@ typedef struct drm_i915_private {
144 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 153 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
145 int vblank_pipe; 154 int vblank_pipe;
146 155
156 bool cursor_needs_physical;
157
158 struct drm_mm vram;
159
160 int irq_enabled;
161
147 struct intel_opregion opregion; 162 struct intel_opregion opregion;
148 163
164 /* LVDS info */
165 int backlight_duty_cycle; /* restore backlight to this value */
166 bool panel_wants_dither;
167 struct drm_display_mode *panel_fixed_mode;
168 struct drm_display_mode *vbt_mode; /* if any */
169
170 /* Feature bits from the VBIOS */
171 unsigned int int_tv_support:1;
172 unsigned int lvds_dither:1;
173 unsigned int lvds_vbt:1;
174 unsigned int int_crt_support:1;
175
176 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
177 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
178 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
179
149 /* Register state */ 180 /* Register state */
150 u8 saveLBB; 181 u8 saveLBB;
151 u32 saveDSPACNTR; 182 u32 saveDSPACNTR;
@@ -364,6 +395,21 @@ struct drm_i915_gem_object {
364 * This is the same as gtt_space->start 395 * This is the same as gtt_space->start
365 */ 396 */
366 uint32_t gtt_offset; 397 uint32_t gtt_offset;
398 /**
399 * Required alignment for the object
400 */
401 uint32_t gtt_alignment;
402 /**
403 * Fake offset for use by mmap(2)
404 */
405 uint64_t mmap_offset;
406
407 /**
408 * Fence register bits (if any) for this object. Will be set
409 * as needed when mapped into the GTT.
410 * Protected by dev->struct_mutex.
411 */
412 int fence_reg;
367 413
368 /** Boolean whether this object has a valid gtt offset. */ 414 /** Boolean whether this object has a valid gtt offset. */
369 int gtt_bound; 415 int gtt_bound;
@@ -376,6 +422,7 @@ struct drm_i915_gem_object {
376 422
377 /** Current tiling mode for the object. */ 423 /** Current tiling mode for the object. */
378 uint32_t tiling_mode; 424 uint32_t tiling_mode;
425 uint32_t stride;
379 426
380 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 427 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
381 uint32_t agp_type; 428 uint32_t agp_type;
@@ -385,6 +432,10 @@ struct drm_i915_gem_object {
385 * flags which individual pages are valid. 432 * flags which individual pages are valid.
386 */ 433 */
387 uint8_t *page_cpu_valid; 434 uint8_t *page_cpu_valid;
435
436 /** User space pin count and filp owning the pin */
437 uint32_t user_pin_count;
438 struct drm_file *pin_filp;
388}; 439};
389 440
390/** 441/**
@@ -414,8 +465,19 @@ struct drm_i915_file_private {
414 } mm; 465 } mm;
415}; 466};
416 467
468enum intel_chip_family {
469 CHIP_I8XX = 0x01,
470 CHIP_I9XX = 0x02,
471 CHIP_I915 = 0x04,
472 CHIP_I965 = 0x08,
473};
474
417extern struct drm_ioctl_desc i915_ioctls[]; 475extern struct drm_ioctl_desc i915_ioctls[];
418extern int i915_max_ioctl; 476extern int i915_max_ioctl;
477extern unsigned int i915_fbpercrtc;
478
479extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
480extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
419 481
420 /* i915_dma.c */ 482 /* i915_dma.c */
421extern void i915_kernel_lost_context(struct drm_device * dev); 483extern void i915_kernel_lost_context(struct drm_device * dev);
@@ -441,6 +503,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
441 struct drm_file *file_priv); 503 struct drm_file *file_priv);
442void i915_user_irq_get(struct drm_device *dev); 504void i915_user_irq_get(struct drm_device *dev);
443void i915_user_irq_put(struct drm_device *dev); 505void i915_user_irq_put(struct drm_device *dev);
506extern void i915_enable_interrupt (struct drm_device *dev);
444 507
445extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 508extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
446extern void i915_driver_irq_preinstall(struct drm_device * dev); 509extern void i915_driver_irq_preinstall(struct drm_device * dev);
@@ -487,6 +550,8 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
487 struct drm_file *file_priv); 550 struct drm_file *file_priv);
488int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 551int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
489 struct drm_file *file_priv); 552 struct drm_file *file_priv);
553int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
554 struct drm_file *file_priv);
490int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 555int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
491 struct drm_file *file_priv); 556 struct drm_file *file_priv);
492int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 557int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@@ -523,6 +588,16 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev);
523void i915_gem_retire_requests(struct drm_device *dev); 588void i915_gem_retire_requests(struct drm_device *dev);
524void i915_gem_retire_work_handler(struct work_struct *work); 589void i915_gem_retire_work_handler(struct work_struct *work);
525void i915_gem_clflush_object(struct drm_gem_object *obj); 590void i915_gem_clflush_object(struct drm_gem_object *obj);
591int i915_gem_object_set_domain(struct drm_gem_object *obj,
592 uint32_t read_domains,
593 uint32_t write_domain);
594int i915_gem_init_ringbuffer(struct drm_device *dev);
595void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
596int i915_gem_do_init(struct drm_device *dev, unsigned long start,
597 unsigned long end);
598int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
599int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
600 int write);
526 601
527/* i915_gem_tiling.c */ 602/* i915_gem_tiling.c */
528void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 603void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -561,6 +636,10 @@ static inline void opregion_asle_intr(struct drm_device *dev) { return; }
561static inline void opregion_enable_asle(struct drm_device *dev) { return; } 636static inline void opregion_enable_asle(struct drm_device *dev) { return; }
562#endif 637#endif
563 638
639/* modesetting */
640extern void intel_modeset_init(struct drm_device *dev);
641extern void intel_modeset_cleanup(struct drm_device *dev);
642
564/** 643/**
565 * Lock test for when it's just for synchronization of ring access. 644 * Lock test for when it's just for synchronization of ring access.
566 * 645 *
@@ -578,6 +657,13 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
578#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) 657#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
579#define I915_READ8(reg) readb(dev_priv->regs + (reg)) 658#define I915_READ8(reg) readb(dev_priv->regs + (reg))
580#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) 659#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
660#ifdef writeq
661#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
662#else
663#define I915_WRITE64(reg, val) (writel(val, dev_priv->regs + (reg)), \
664 writel(upper_32_bits(val), dev_priv->regs + \
665 (reg) + 4))
666#endif
581 667
582#define I915_VERBOSE 0 668#define I915_VERBOSE 0
583 669
@@ -660,7 +746,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
660 746
661#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ 747#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
662 (dev)->pci_device == 0x2E12 || \ 748 (dev)->pci_device == 0x2E12 || \
663 (dev)->pci_device == 0x2E22) 749 (dev)->pci_device == 0x2E22 || \
750 IS_GM45(dev))
664 751
665#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 752#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
666 (dev)->pci_device == 0x29B2 || \ 753 (dev)->pci_device == 0x29B2 || \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 24fe8c10b4b2..cc2ca5561feb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -30,6 +30,7 @@
30#include "i915_drm.h" 30#include "i915_drm.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include <linux/swap.h> 32#include <linux/swap.h>
33#include <linux/pci.h>
33 34
34#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
35 36
@@ -40,8 +41,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
40static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 41static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 42static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 43static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
44 int write);
45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, 44static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46 int write); 45 int write);
47static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, 46static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
@@ -51,34 +50,43 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o
51static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 50static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
52static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 51static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 52static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
53static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
54 unsigned alignment);
55static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
56static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
57static int i915_gem_evict_something(struct drm_device *dev);
58
59int i915_gem_do_init(struct drm_device *dev, unsigned long start,
60 unsigned long end)
61{
62 drm_i915_private_t *dev_priv = dev->dev_private;
54 63
55static void 64 if (start >= end ||
56i915_gem_cleanup_ringbuffer(struct drm_device *dev); 65 (start & (PAGE_SIZE - 1)) != 0 ||
66 (end & (PAGE_SIZE - 1)) != 0) {
67 return -EINVAL;
68 }
69
70 drm_mm_init(&dev_priv->mm.gtt_space, start,
71 end - start);
72
73 dev->gtt_total = (uint32_t) (end - start);
74
75 return 0;
76}
57 77
58int 78int
59i915_gem_init_ioctl(struct drm_device *dev, void *data, 79i915_gem_init_ioctl(struct drm_device *dev, void *data,
60 struct drm_file *file_priv) 80 struct drm_file *file_priv)
61{ 81{
62 drm_i915_private_t *dev_priv = dev->dev_private;
63 struct drm_i915_gem_init *args = data; 82 struct drm_i915_gem_init *args = data;
83 int ret;
64 84
65 mutex_lock(&dev->struct_mutex); 85 mutex_lock(&dev->struct_mutex);
66 86 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
67 if (args->gtt_start >= args->gtt_end ||
68 (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
69 (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
70 mutex_unlock(&dev->struct_mutex);
71 return -EINVAL;
72 }
73
74 drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
75 args->gtt_end - args->gtt_start);
76
77 dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
78
79 mutex_unlock(&dev->struct_mutex); 87 mutex_unlock(&dev->struct_mutex);
80 88
81 return 0; 89 return ret;
82} 90}
83 91
84int 92int
@@ -529,6 +537,252 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
529 return 0; 537 return 0;
530} 538}
531 539
540/**
541 * i915_gem_fault - fault a page into the GTT
542 * vma: VMA in question
543 * vmf: fault info
544 *
545 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
546 * from userspace. The fault handler takes care of binding the object to
547 * the GTT (if needed), allocating and programming a fence register (again,
548 * only if needed based on whether the old reg is still valid or the object
549 * is tiled) and inserting a new PTE into the faulting process.
550 *
551 * Note that the faulting process may involve evicting existing objects
552 * from the GTT and/or fence registers to make room. So performance may
553 * suffer if the GTT working set is large or there are few fence registers
554 * left.
555 */
556int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
557{
558 struct drm_gem_object *obj = vma->vm_private_data;
559 struct drm_device *dev = obj->dev;
560 struct drm_i915_private *dev_priv = dev->dev_private;
561 struct drm_i915_gem_object *obj_priv = obj->driver_private;
562 pgoff_t page_offset;
563 unsigned long pfn;
564 int ret = 0;
565
566 /* We don't use vmf->pgoff since that has the fake offset */
567 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
568 PAGE_SHIFT;
569
570 /* Now bind it into the GTT if needed */
571 mutex_lock(&dev->struct_mutex);
572 if (!obj_priv->gtt_space) {
573 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
574 if (ret) {
575 mutex_unlock(&dev->struct_mutex);
576 return VM_FAULT_SIGBUS;
577 }
578 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
579 }
580
581 /* Need a new fence register? */
582 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
583 obj_priv->tiling_mode != I915_TILING_NONE)
584 i915_gem_object_get_fence_reg(obj);
585
586 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
587 page_offset;
588
589 /* Finally, remap it using the new GTT offset */
590 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
591
592 mutex_unlock(&dev->struct_mutex);
593
594 switch (ret) {
595 case -ENOMEM:
596 case -EAGAIN:
597 return VM_FAULT_OOM;
598 case -EFAULT:
599 case -EBUSY:
600 DRM_ERROR("can't insert pfn?? fault or busy...\n");
601 return VM_FAULT_SIGBUS;
602 default:
603 return VM_FAULT_NOPAGE;
604 }
605}
606
607/**
608 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
609 * @obj: obj in question
610 *
611 * GEM memory mapping works by handing back to userspace a fake mmap offset
612 * it can use in a subsequent mmap(2) call. The DRM core code then looks
613 * up the object based on the offset and sets up the various memory mapping
614 * structures.
615 *
616 * This routine allocates and attaches a fake offset for @obj.
617 */
618static int
619i915_gem_create_mmap_offset(struct drm_gem_object *obj)
620{
621 struct drm_device *dev = obj->dev;
622 struct drm_gem_mm *mm = dev->mm_private;
623 struct drm_i915_gem_object *obj_priv = obj->driver_private;
624 struct drm_map_list *list;
625 struct drm_map *map;
626 int ret = 0;
627
628 /* Set the object up for mmap'ing */
629 list = &obj->map_list;
630 list->map = drm_calloc(1, sizeof(struct drm_map_list),
631 DRM_MEM_DRIVER);
632 if (!list->map)
633 return -ENOMEM;
634
635 map = list->map;
636 map->type = _DRM_GEM;
637 map->size = obj->size;
638 map->handle = obj;
639
640 /* Get a DRM GEM mmap offset allocated... */
641 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
642 obj->size / PAGE_SIZE, 0, 0);
643 if (!list->file_offset_node) {
644 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
645 ret = -ENOMEM;
646 goto out_free_list;
647 }
648
649 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
650 obj->size / PAGE_SIZE, 0);
651 if (!list->file_offset_node) {
652 ret = -ENOMEM;
653 goto out_free_list;
654 }
655
656 list->hash.key = list->file_offset_node->start;
657 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
658 DRM_ERROR("failed to add to map hash\n");
659 goto out_free_mm;
660 }
661
662 /* By now we should be all set, any drm_mmap request on the offset
663 * below will get to our mmap & fault handler */
664 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
665
666 return 0;
667
668out_free_mm:
669 drm_mm_put_block(list->file_offset_node);
670out_free_list:
671 drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
672
673 return ret;
674}
675
676/**
677 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
678 * @obj: object to check
679 *
680 * Return the required GTT alignment for an object, taking into account
681 * potential fence register mapping if needed.
682 */
683static uint32_t
684i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
685{
686 struct drm_device *dev = obj->dev;
687 struct drm_i915_gem_object *obj_priv = obj->driver_private;
688 int start, i;
689
690 /*
691 * Minimum alignment is 4k (GTT page size), but might be greater
692 * if a fence register is needed for the object.
693 */
694 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
695 return 4096;
696
697 /*
698 * Previous chips need to be aligned to the size of the smallest
699 * fence register that can contain the object.
700 */
701 if (IS_I9XX(dev))
702 start = 1024*1024;
703 else
704 start = 512*1024;
705
706 for (i = start; i < obj->size; i <<= 1)
707 ;
708
709 return i;
710}
711
712/**
713 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
714 * @dev: DRM device
715 * @data: GTT mapping ioctl data
716 * @file_priv: GEM object info
717 *
718 * Simply returns the fake offset to userspace so it can mmap it.
719 * The mmap call will end up in drm_gem_mmap(), which will set things
720 * up so we can get faults in the handler above.
721 *
722 * The fault handler will take care of binding the object into the GTT
723 * (since it may have been evicted to make room for something), allocating
724 * a fence register, and mapping the appropriate aperture address into
725 * userspace.
726 */
727int
728i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
729 struct drm_file *file_priv)
730{
731 struct drm_i915_gem_mmap_gtt *args = data;
732 struct drm_i915_private *dev_priv = dev->dev_private;
733 struct drm_gem_object *obj;
734 struct drm_i915_gem_object *obj_priv;
735 int ret;
736
737 if (!(dev->driver->driver_features & DRIVER_GEM))
738 return -ENODEV;
739
740 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
741 if (obj == NULL)
742 return -EBADF;
743
744 mutex_lock(&dev->struct_mutex);
745
746 obj_priv = obj->driver_private;
747
748 if (!obj_priv->mmap_offset) {
749 ret = i915_gem_create_mmap_offset(obj);
750 if (ret)
751 return ret;
752 }
753
754 args->offset = obj_priv->mmap_offset;
755
756 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
757
758 /* Make sure the alignment is correct for fence regs etc */
759 if (obj_priv->agp_mem &&
760 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
761 drm_gem_object_unreference(obj);
762 mutex_unlock(&dev->struct_mutex);
763 return -EINVAL;
764 }
765
766 /*
767 * Pull it into the GTT so that we have a page list (makes the
768 * initial fault faster and any subsequent flushing possible).
769 */
770 if (!obj_priv->agp_mem) {
771 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
772 if (ret) {
773 drm_gem_object_unreference(obj);
774 mutex_unlock(&dev->struct_mutex);
775 return ret;
776 }
777 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
778 }
779
780 drm_gem_object_unreference(obj);
781 mutex_unlock(&dev->struct_mutex);
782
783 return 0;
784}
785
532static void 786static void
533i915_gem_object_free_page_list(struct drm_gem_object *obj) 787i915_gem_object_free_page_list(struct drm_gem_object *obj)
534{ 788{
@@ -726,6 +980,7 @@ i915_gem_retire_request(struct drm_device *dev,
726 */ 980 */
727 if (obj_priv->last_rendering_seqno != request->seqno) 981 if (obj_priv->last_rendering_seqno != request->seqno)
728 return; 982 return;
983
729#if WATCH_LRU 984#if WATCH_LRU
730 DRM_INFO("%s: retire %d moves to inactive list %p\n", 985 DRM_INFO("%s: retire %d moves to inactive list %p\n",
731 __func__, request->seqno, obj); 986 __func__, request->seqno, obj);
@@ -956,6 +1211,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
956{ 1211{
957 struct drm_device *dev = obj->dev; 1212 struct drm_device *dev = obj->dev;
958 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1213 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1214 loff_t offset;
959 int ret = 0; 1215 int ret = 0;
960 1216
961#if WATCH_BUF 1217#if WATCH_BUF
@@ -991,6 +1247,14 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
991 1247
992 BUG_ON(obj_priv->active); 1248 BUG_ON(obj_priv->active);
993 1249
1250 /* blow away mappings if mapped through GTT */
1251 offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
1252 if (dev->dev_mapping)
1253 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
1254
1255 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1256 i915_gem_clear_fence_reg(obj);
1257
994 i915_gem_object_free_page_list(obj); 1258 i915_gem_object_free_page_list(obj);
995 1259
996 if (obj_priv->gtt_space) { 1260 if (obj_priv->gtt_space) {
@@ -1149,6 +1413,204 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1149 return 0; 1413 return 0;
1150} 1414}
1151 1415
1416static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
1417{
1418 struct drm_gem_object *obj = reg->obj;
1419 struct drm_device *dev = obj->dev;
1420 drm_i915_private_t *dev_priv = dev->dev_private;
1421 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1422 int regnum = obj_priv->fence_reg;
1423 uint64_t val;
1424
1425 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
1426 0xfffff000) << 32;
1427 val |= obj_priv->gtt_offset & 0xfffff000;
1428 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
1429 if (obj_priv->tiling_mode == I915_TILING_Y)
1430 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1431 val |= I965_FENCE_REG_VALID;
1432
1433 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
1434}
1435
1436static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1437{
1438 struct drm_gem_object *obj = reg->obj;
1439 struct drm_device *dev = obj->dev;
1440 drm_i915_private_t *dev_priv = dev->dev_private;
1441 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1442 int regnum = obj_priv->fence_reg;
1443 uint32_t val;
1444 uint32_t pitch_val;
1445
1446 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1447 (obj_priv->gtt_offset & (obj->size - 1))) {
1448 WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
1449 return;
1450 }
1451
1452 if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) ||
1453 IS_I945GM(dev) ||
1454 IS_G33(dev)))
1455 pitch_val = (obj_priv->stride / 128) - 1;
1456 else
1457 pitch_val = (obj_priv->stride / 512) - 1;
1458
1459 val = obj_priv->gtt_offset;
1460 if (obj_priv->tiling_mode == I915_TILING_Y)
1461 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1462 val |= I915_FENCE_SIZE_BITS(obj->size);
1463 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1464 val |= I830_FENCE_REG_VALID;
1465
1466 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
1467}
1468
1469static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1470{
1471 struct drm_gem_object *obj = reg->obj;
1472 struct drm_device *dev = obj->dev;
1473 drm_i915_private_t *dev_priv = dev->dev_private;
1474 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1475 int regnum = obj_priv->fence_reg;
1476 uint32_t val;
1477 uint32_t pitch_val;
1478
1479 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1480 (obj_priv->gtt_offset & (obj->size - 1))) {
1481 WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
1482 return;
1483 }
1484
1485 pitch_val = (obj_priv->stride / 128) - 1;
1486
1487 val = obj_priv->gtt_offset;
1488 if (obj_priv->tiling_mode == I915_TILING_Y)
1489 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1490 val |= I830_FENCE_SIZE_BITS(obj->size);
1491 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1492 val |= I830_FENCE_REG_VALID;
1493
1494 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
1495
1496}
1497
1498/**
1499 * i915_gem_object_get_fence_reg - set up a fence reg for an object
1500 * @obj: object to map through a fence reg
1501 *
1502 * When mapping objects through the GTT, userspace wants to be able to write
1503 * to them without having to worry about swizzling if the object is tiled.
1504 *
1505 * This function walks the fence regs looking for a free one for @obj,
1506 * stealing one if it can't find any.
1507 *
1508 * It then sets up the reg based on the object's properties: address, pitch
1509 * and tiling format.
1510 */
1511static void
1512i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
1513{
1514 struct drm_device *dev = obj->dev;
1515 struct drm_i915_private *dev_priv = dev->dev_private;
1516 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1517 struct drm_i915_fence_reg *reg = NULL;
1518 int i, ret;
1519
1520 switch (obj_priv->tiling_mode) {
1521 case I915_TILING_NONE:
1522 WARN(1, "allocating a fence for non-tiled object?\n");
1523 break;
1524 case I915_TILING_X:
1525 WARN(obj_priv->stride & (512 - 1),
1526 "object is X tiled but has non-512B pitch\n");
1527 break;
1528 case I915_TILING_Y:
1529 WARN(obj_priv->stride & (128 - 1),
1530 "object is Y tiled but has non-128B pitch\n");
1531 break;
1532 }
1533
1534 /* First try to find a free reg */
1535 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
1536 reg = &dev_priv->fence_regs[i];
1537 if (!reg->obj)
1538 break;
1539 }
1540
1541 /* None available, try to steal one or wait for a user to finish */
1542 if (i == dev_priv->num_fence_regs) {
1543 struct drm_i915_gem_object *old_obj_priv = NULL;
1544 loff_t offset;
1545
1546try_again:
1547 /* Could try to use LRU here instead... */
1548 for (i = dev_priv->fence_reg_start;
1549 i < dev_priv->num_fence_regs; i++) {
1550 reg = &dev_priv->fence_regs[i];
1551 old_obj_priv = reg->obj->driver_private;
1552 if (!old_obj_priv->pin_count)
1553 break;
1554 }
1555
1556 /*
1557 * Now things get ugly... we have to wait for one of the
1558 * objects to finish before trying again.
1559 */
1560 if (i == dev_priv->num_fence_regs) {
1561 ret = i915_gem_object_wait_rendering(reg->obj);
1562 if (ret) {
1563 WARN(ret, "wait_rendering failed: %d\n", ret);
1564 return;
1565 }
1566 goto try_again;
1567 }
1568
1569 /*
1570 * Zap this virtual mapping so we can set up a fence again
1571 * for this object next time we need it.
1572 */
1573 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
1574 if (dev->dev_mapping)
1575 unmap_mapping_range(dev->dev_mapping, offset,
1576 reg->obj->size, 1);
1577 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
1578 }
1579
1580 obj_priv->fence_reg = i;
1581 reg->obj = obj;
1582
1583 if (IS_I965G(dev))
1584 i965_write_fence_reg(reg);
1585 else if (IS_I9XX(dev))
1586 i915_write_fence_reg(reg);
1587 else
1588 i830_write_fence_reg(reg);
1589}
1590
1591/**
1592 * i915_gem_clear_fence_reg - clear out fence register info
1593 * @obj: object to clear
1594 *
1595 * Zeroes out the fence register itself and clears out the associated
1596 * data structures in dev_priv and obj_priv.
1597 */
1598static void
1599i915_gem_clear_fence_reg(struct drm_gem_object *obj)
1600{
1601 struct drm_device *dev = obj->dev;
1602 drm_i915_private_t *dev_priv = dev->dev_private;
1603 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1604
1605 if (IS_I965G(dev))
1606 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
1607 else
1608 I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0);
1609
1610 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
1611 obj_priv->fence_reg = I915_FENCE_REG_NONE;
1612}
1613
1152/** 1614/**
1153 * Finds free space in the GTT aperture and binds the object there. 1615 * Finds free space in the GTT aperture and binds the object there.
1154 */ 1616 */
@@ -1307,7 +1769,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
1307 * This function returns when the move is complete, including waiting on 1769 * This function returns when the move is complete, including waiting on
1308 * flushes to occur. 1770 * flushes to occur.
1309 */ 1771 */
1310static int 1772int
1311i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 1773i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1312{ 1774{
1313 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1775 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -2029,13 +2491,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2029 2491
2030 /* error other than GTT full, or we've already tried again */ 2492 /* error other than GTT full, or we've already tried again */
2031 if (ret != -ENOMEM || pin_tries >= 1) { 2493 if (ret != -ENOMEM || pin_tries >= 1) {
2032 DRM_ERROR("Failed to pin buffers %d\n", ret); 2494 if (ret != -ERESTARTSYS)
2495 DRM_ERROR("Failed to pin buffers %d\n", ret);
2033 goto err; 2496 goto err;
2034 } 2497 }
2035 2498
2036 /* unpin all of our buffers */ 2499 /* unpin all of our buffers */
2037 for (i = 0; i < pinned; i++) 2500 for (i = 0; i < pinned; i++)
2038 i915_gem_object_unpin(object_list[i]); 2501 i915_gem_object_unpin(object_list[i]);
2502 pinned = 0;
2039 2503
2040 /* evict everyone we can from the aperture */ 2504 /* evict everyone we can from the aperture */
2041 ret = i915_gem_evict_everything(dev); 2505 ret = i915_gem_evict_everything(dev);
@@ -2149,13 +2613,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2149 "back to user (%d)\n", 2613 "back to user (%d)\n",
2150 args->buffer_count, ret); 2614 args->buffer_count, ret);
2151err: 2615err:
2152 if (object_list != NULL) { 2616 for (i = 0; i < pinned; i++)
2153 for (i = 0; i < pinned; i++) 2617 i915_gem_object_unpin(object_list[i]);
2154 i915_gem_object_unpin(object_list[i]); 2618
2619 for (i = 0; i < args->buffer_count; i++)
2620 drm_gem_object_unreference(object_list[i]);
2155 2621
2156 for (i = 0; i < args->buffer_count; i++)
2157 drm_gem_object_unreference(object_list[i]);
2158 }
2159 mutex_unlock(&dev->struct_mutex); 2622 mutex_unlock(&dev->struct_mutex);
2160 2623
2161pre_mutex_err: 2624pre_mutex_err:
@@ -2178,7 +2641,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2178 if (obj_priv->gtt_space == NULL) { 2641 if (obj_priv->gtt_space == NULL) {
2179 ret = i915_gem_object_bind_to_gtt(obj, alignment); 2642 ret = i915_gem_object_bind_to_gtt(obj, alignment);
2180 if (ret != 0) { 2643 if (ret != 0) {
2181 DRM_ERROR("Failure to bind: %d", ret); 2644 if (ret != -ERESTARTSYS)
2645 DRM_ERROR("Failure to bind: %d", ret);
2182 return ret; 2646 return ret;
2183 } 2647 }
2184 } 2648 }
@@ -2249,11 +2713,22 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2249 } 2713 }
2250 obj_priv = obj->driver_private; 2714 obj_priv = obj->driver_private;
2251 2715
2252 ret = i915_gem_object_pin(obj, args->alignment); 2716 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
2253 if (ret != 0) { 2717 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2254 drm_gem_object_unreference(obj); 2718 args->handle);
2255 mutex_unlock(&dev->struct_mutex); 2719 mutex_unlock(&dev->struct_mutex);
2256 return ret; 2720 return -EINVAL;
2721 }
2722
2723 obj_priv->user_pin_count++;
2724 obj_priv->pin_filp = file_priv;
2725 if (obj_priv->user_pin_count == 1) {
2726 ret = i915_gem_object_pin(obj, args->alignment);
2727 if (ret != 0) {
2728 drm_gem_object_unreference(obj);
2729 mutex_unlock(&dev->struct_mutex);
2730 return ret;
2731 }
2257 } 2732 }
2258 2733
2259 /* XXX - flush the CPU caches for pinned objects 2734 /* XXX - flush the CPU caches for pinned objects
@@ -2273,6 +2748,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2273{ 2748{
2274 struct drm_i915_gem_pin *args = data; 2749 struct drm_i915_gem_pin *args = data;
2275 struct drm_gem_object *obj; 2750 struct drm_gem_object *obj;
2751 struct drm_i915_gem_object *obj_priv;
2276 2752
2277 mutex_lock(&dev->struct_mutex); 2753 mutex_lock(&dev->struct_mutex);
2278 2754
@@ -2284,7 +2760,19 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2284 return -EBADF; 2760 return -EBADF;
2285 } 2761 }
2286 2762
2287 i915_gem_object_unpin(obj); 2763 obj_priv = obj->driver_private;
2764 if (obj_priv->pin_filp != file_priv) {
2765 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
2766 args->handle);
2767 drm_gem_object_unreference(obj);
2768 mutex_unlock(&dev->struct_mutex);
2769 return -EINVAL;
2770 }
2771 obj_priv->user_pin_count--;
2772 if (obj_priv->user_pin_count == 0) {
2773 obj_priv->pin_filp = NULL;
2774 i915_gem_object_unpin(obj);
2775 }
2288 2776
2289 drm_gem_object_unreference(obj); 2777 drm_gem_object_unreference(obj);
2290 mutex_unlock(&dev->struct_mutex); 2778 mutex_unlock(&dev->struct_mutex);
@@ -2351,12 +2839,18 @@ int i915_gem_init_object(struct drm_gem_object *obj)
2351 2839
2352 obj->driver_private = obj_priv; 2840 obj->driver_private = obj_priv;
2353 obj_priv->obj = obj; 2841 obj_priv->obj = obj;
2842 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2354 INIT_LIST_HEAD(&obj_priv->list); 2843 INIT_LIST_HEAD(&obj_priv->list);
2844
2355 return 0; 2845 return 0;
2356} 2846}
2357 2847
2358void i915_gem_free_object(struct drm_gem_object *obj) 2848void i915_gem_free_object(struct drm_gem_object *obj)
2359{ 2849{
2850 struct drm_device *dev = obj->dev;
2851 struct drm_gem_mm *mm = dev->mm_private;
2852 struct drm_map_list *list;
2853 struct drm_map *map;
2360 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2854 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2361 2855
2362 while (obj_priv->pin_count > 0) 2856 while (obj_priv->pin_count > 0)
@@ -2364,6 +2858,20 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2364 2858
2365 i915_gem_object_unbind(obj); 2859 i915_gem_object_unbind(obj);
2366 2860
2861 list = &obj->map_list;
2862 drm_ht_remove_item(&mm->offset_hash, &list->hash);
2863
2864 if (list->file_offset_node) {
2865 drm_mm_put_block(list->file_offset_node);
2866 list->file_offset_node = NULL;
2867 }
2868
2869 map = list->map;
2870 if (map) {
2871 drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
2872 list->map = NULL;
2873 }
2874
2367 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2875 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2368 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2876 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2369} 2877}
@@ -2432,8 +2940,7 @@ i915_gem_idle(struct drm_device *dev)
2432 */ 2940 */
2433 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), 2941 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2434 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 2942 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2435 seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | 2943 seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
2436 I915_GEM_DOMAIN_GTT));
2437 2944
2438 if (seqno == 0) { 2945 if (seqno == 0) {
2439 mutex_unlock(&dev->struct_mutex); 2946 mutex_unlock(&dev->struct_mutex);
@@ -2560,12 +3067,13 @@ i915_gem_init_hws(struct drm_device *dev)
2560 return 0; 3067 return 0;
2561} 3068}
2562 3069
2563static int 3070int
2564i915_gem_init_ringbuffer(struct drm_device *dev) 3071i915_gem_init_ringbuffer(struct drm_device *dev)
2565{ 3072{
2566 drm_i915_private_t *dev_priv = dev->dev_private; 3073 drm_i915_private_t *dev_priv = dev->dev_private;
2567 struct drm_gem_object *obj; 3074 struct drm_gem_object *obj;
2568 struct drm_i915_gem_object *obj_priv; 3075 struct drm_i915_gem_object *obj_priv;
3076 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
2569 int ret; 3077 int ret;
2570 u32 head; 3078 u32 head;
2571 3079
@@ -2587,24 +3095,24 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
2587 } 3095 }
2588 3096
2589 /* Set up the kernel mapping for the ring. */ 3097 /* Set up the kernel mapping for the ring. */
2590 dev_priv->ring.Size = obj->size; 3098 ring->Size = obj->size;
2591 dev_priv->ring.tail_mask = obj->size - 1; 3099 ring->tail_mask = obj->size - 1;
2592 3100
2593 dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; 3101 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
2594 dev_priv->ring.map.size = obj->size; 3102 ring->map.size = obj->size;
2595 dev_priv->ring.map.type = 0; 3103 ring->map.type = 0;
2596 dev_priv->ring.map.flags = 0; 3104 ring->map.flags = 0;
2597 dev_priv->ring.map.mtrr = 0; 3105 ring->map.mtrr = 0;
2598 3106
2599 drm_core_ioremap_wc(&dev_priv->ring.map, dev); 3107 drm_core_ioremap_wc(&ring->map, dev);
2600 if (dev_priv->ring.map.handle == NULL) { 3108 if (ring->map.handle == NULL) {
2601 DRM_ERROR("Failed to map ringbuffer.\n"); 3109 DRM_ERROR("Failed to map ringbuffer.\n");
2602 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3110 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2603 drm_gem_object_unreference(obj); 3111 drm_gem_object_unreference(obj);
2604 return -EINVAL; 3112 return -EINVAL;
2605 } 3113 }
2606 dev_priv->ring.ring_obj = obj; 3114 ring->ring_obj = obj;
2607 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 3115 ring->virtual_start = ring->map.handle;
2608 3116
2609 /* Stop the ring if it's running. */ 3117 /* Stop the ring if it's running. */
2610 I915_WRITE(PRB0_CTL, 0); 3118 I915_WRITE(PRB0_CTL, 0);
@@ -2652,12 +3160,20 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
2652 } 3160 }
2653 3161
2654 /* Update our cache of the ring state */ 3162 /* Update our cache of the ring state */
2655 i915_kernel_lost_context(dev); 3163 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3164 i915_kernel_lost_context(dev);
3165 else {
3166 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
3167 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
3168 ring->space = ring->head - (ring->tail + 8);
3169 if (ring->space < 0)
3170 ring->space += ring->Size;
3171 }
2656 3172
2657 return 0; 3173 return 0;
2658} 3174}
2659 3175
2660static void 3176void
2661i915_gem_cleanup_ringbuffer(struct drm_device *dev) 3177i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2662{ 3178{
2663 drm_i915_private_t *dev_priv = dev->dev_private; 3179 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2695,6 +3211,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2695 drm_i915_private_t *dev_priv = dev->dev_private; 3211 drm_i915_private_t *dev_priv = dev->dev_private;
2696 int ret; 3212 int ret;
2697 3213
3214 if (drm_core_check_feature(dev, DRIVER_MODESET))
3215 return 0;
3216
2698 if (dev_priv->mm.wedged) { 3217 if (dev_priv->mm.wedged) {
2699 DRM_ERROR("Reenabling wedged hardware, good luck\n"); 3218 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2700 dev_priv->mm.wedged = 0; 3219 dev_priv->mm.wedged = 0;
@@ -2728,6 +3247,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2728 drm_i915_private_t *dev_priv = dev->dev_private; 3247 drm_i915_private_t *dev_priv = dev->dev_private;
2729 int ret; 3248 int ret;
2730 3249
3250 if (drm_core_check_feature(dev, DRIVER_MODESET))
3251 return 0;
3252
2731 ret = i915_gem_idle(dev); 3253 ret = i915_gem_idle(dev);
2732 drm_irq_uninstall(dev); 3254 drm_irq_uninstall(dev);
2733 3255
@@ -2758,5 +3280,13 @@ i915_gem_load(struct drm_device *dev)
2758 i915_gem_retire_work_handler); 3280 i915_gem_retire_work_handler);
2759 dev_priv->mm.next_gem_seqno = 1; 3281 dev_priv->mm.next_gem_seqno = 1;
2760 3282
3283 /* Old X drivers will take 0-2 for front, back, depth buffers */
3284 dev_priv->fence_reg_start = 3;
3285
3286 if (IS_I965G(dev))
3287 dev_priv->num_fence_regs = 16;
3288 else
3289 dev_priv->num_fence_regs = 8;
3290
2761 i915_gem_detect_bit_6_swizzle(dev); 3291 i915_gem_detect_bit_6_swizzle(dev);
2762} 3292}
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index e8d5abe1250e..4d1b9de0cd8b 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -250,6 +250,39 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset,
250 return len - offset; 250 return len - offset;
251} 251}
252 252
253static int i915_hws_info(char *buf, char **start, off_t offset,
254 int request, int *eof, void *data)
255{
256 struct drm_minor *minor = (struct drm_minor *) data;
257 struct drm_device *dev = minor->dev;
258 drm_i915_private_t *dev_priv = dev->dev_private;
259 int len = 0, i;
260 volatile u32 *hws;
261
262 if (offset > DRM_PROC_LIMIT) {
263 *eof = 1;
264 return 0;
265 }
266
267 hws = (volatile u32 *)dev_priv->hw_status_page;
268 if (hws == NULL) {
269 *eof = 1;
270 return 0;
271 }
272
273 *start = &buf[offset];
274 *eof = 0;
275 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
276 DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
277 i * 4,
278 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
279 }
280 if (len > request + offset)
281 return request;
282 *eof = 1;
283 return len - offset;
284}
285
253static struct drm_proc_list { 286static struct drm_proc_list {
254 /** file name */ 287 /** file name */
255 const char *name; 288 const char *name;
@@ -262,6 +295,7 @@ static struct drm_proc_list {
262 {"i915_gem_request", i915_gem_request_info}, 295 {"i915_gem_request", i915_gem_request_info},
263 {"i915_gem_seqno", i915_gem_seqno_info}, 296 {"i915_gem_seqno", i915_gem_seqno_info},
264 {"i915_gem_interrupt", i915_interrupt_info}, 297 {"i915_gem_interrupt", i915_interrupt_info},
298 {"i915_gem_hws", i915_hws_info},
265}; 299};
266 300
267#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) 301#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a8cb69469c64..241f39b7f460 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -208,6 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
208 } 208 }
209 } 209 }
210 obj_priv->tiling_mode = args->tiling_mode; 210 obj_priv->tiling_mode = args->tiling_mode;
211 obj_priv->stride = args->stride;
211 212
212 mutex_unlock(&dev->struct_mutex); 213 mutex_unlock(&dev->struct_mutex);
213 214
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 69b9a42da95e..0cadafbef411 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -30,6 +30,7 @@
30#include "drm.h" 30#include "drm.h"
31#include "i915_drm.h" 31#include "i915_drm.h"
32#include "i915_drv.h" 32#include "i915_drv.h"
33#include "intel_drv.h"
33 34
34#define MAX_NOPID ((u32)~0) 35#define MAX_NOPID ((u32)~0)
35 36
@@ -51,6 +52,15 @@
51#define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \ 52#define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \
52 I915_INTERRUPT_ENABLE_VAR) 53 I915_INTERRUPT_ENABLE_VAR)
53 54
55#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
56 PIPE_VBLANK_INTERRUPT_STATUS)
57
58#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
59 PIPE_VBLANK_INTERRUPT_ENABLE)
60
61#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
62 DRM_I915_VBLANK_PIPE_B)
63
54void 64void
55i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 65i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
56{ 66{
@@ -168,6 +178,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
168{ 178{
169 struct drm_device *dev = (struct drm_device *) arg; 179 struct drm_device *dev = (struct drm_device *) arg;
170 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 180 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
181 struct drm_i915_master_private *master_priv;
171 u32 iir, new_iir; 182 u32 iir, new_iir;
172 u32 pipea_stats, pipeb_stats; 183 u32 pipea_stats, pipeb_stats;
173 u32 vblank_status; 184 u32 vblank_status;
@@ -200,6 +211,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
200 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 211 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
201 pipea_stats = I915_READ(PIPEASTAT); 212 pipea_stats = I915_READ(PIPEASTAT);
202 pipeb_stats = I915_READ(PIPEBSTAT); 213 pipeb_stats = I915_READ(PIPEBSTAT);
214
203 /* 215 /*
204 * Clear the PIPE(A|B)STAT regs before the IIR 216 * Clear the PIPE(A|B)STAT regs before the IIR
205 */ 217 */
@@ -222,9 +234,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
222 I915_WRITE(IIR, iir); 234 I915_WRITE(IIR, iir);
223 new_iir = I915_READ(IIR); /* Flush posted writes */ 235 new_iir = I915_READ(IIR); /* Flush posted writes */
224 236
225 if (dev_priv->sarea_priv) 237 if (dev->primary->master) {
226 dev_priv->sarea_priv->last_dispatch = 238 master_priv = dev->primary->master->driver_priv;
227 READ_BREADCRUMB(dev_priv); 239 if (master_priv->sarea_priv)
240 master_priv->sarea_priv->last_dispatch =
241 READ_BREADCRUMB(dev_priv);
242 }
228 243
229 if (iir & I915_USER_INTERRUPT) { 244 if (iir & I915_USER_INTERRUPT) {
230 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 245 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
@@ -269,6 +284,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
269static int i915_emit_irq(struct drm_device * dev) 284static int i915_emit_irq(struct drm_device * dev)
270{ 285{
271 drm_i915_private_t *dev_priv = dev->dev_private; 286 drm_i915_private_t *dev_priv = dev->dev_private;
287 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
272 RING_LOCALS; 288 RING_LOCALS;
273 289
274 i915_kernel_lost_context(dev); 290 i915_kernel_lost_context(dev);
@@ -278,8 +294,8 @@ static int i915_emit_irq(struct drm_device * dev)
278 dev_priv->counter++; 294 dev_priv->counter++;
279 if (dev_priv->counter > 0x7FFFFFFFUL) 295 if (dev_priv->counter > 0x7FFFFFFFUL)
280 dev_priv->counter = 1; 296 dev_priv->counter = 1;
281 if (dev_priv->sarea_priv) 297 if (master_priv->sarea_priv)
282 dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 298 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
283 299
284 BEGIN_LP_RING(4); 300 BEGIN_LP_RING(4);
285 OUT_RING(MI_STORE_DWORD_INDEX); 301 OUT_RING(MI_STORE_DWORD_INDEX);
@@ -317,21 +333,20 @@ void i915_user_irq_put(struct drm_device *dev)
317static int i915_wait_irq(struct drm_device * dev, int irq_nr) 333static int i915_wait_irq(struct drm_device * dev, int irq_nr)
318{ 334{
319 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 335 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
336 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
320 int ret = 0; 337 int ret = 0;
321 338
322 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 339 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
323 READ_BREADCRUMB(dev_priv)); 340 READ_BREADCRUMB(dev_priv));
324 341
325 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 342 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
326 if (dev_priv->sarea_priv) { 343 if (master_priv->sarea_priv)
327 dev_priv->sarea_priv->last_dispatch = 344 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
328 READ_BREADCRUMB(dev_priv);
329 }
330 return 0; 345 return 0;
331 } 346 }
332 347
333 if (dev_priv->sarea_priv) 348 if (master_priv->sarea_priv)
334 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 349 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
335 350
336 i915_user_irq_get(dev); 351 i915_user_irq_get(dev);
337 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 352 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
@@ -343,10 +358,6 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
343 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 358 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
344 } 359 }
345 360
346 if (dev_priv->sarea_priv)
347 dev_priv->sarea_priv->last_dispatch =
348 READ_BREADCRUMB(dev_priv);
349
350 return ret; 361 return ret;
351} 362}
352 363
@@ -427,6 +438,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
427 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 438 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
428} 439}
429 440
441void i915_enable_interrupt (struct drm_device *dev)
442{
443 struct drm_i915_private *dev_priv = dev->dev_private;
444 opregion_enable_asle(dev);
445 dev_priv->irq_enabled = 1;
446}
447
448
430/* Set the vblank monitor pipe 449/* Set the vblank monitor pipe
431 */ 450 */
432int i915_vblank_pipe_set(struct drm_device *dev, void *data, 451int i915_vblank_pipe_set(struct drm_device *dev, void *data,
@@ -487,6 +506,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
487{ 506{
488 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 507 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
489 508
509 atomic_set(&dev_priv->irq_received, 0);
510
490 I915_WRITE(HWSTAM, 0xeffe); 511 I915_WRITE(HWSTAM, 0xeffe);
491 I915_WRITE(PIPEASTAT, 0); 512 I915_WRITE(PIPEASTAT, 0);
492 I915_WRITE(PIPEBSTAT, 0); 513 I915_WRITE(PIPEBSTAT, 0);
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
index 6126a60dc9cb..96e271986d2a 100644
--- a/drivers/gpu/drm/i915/i915_mem.c
+++ b/drivers/gpu/drm/i915/i915_mem.c
@@ -46,7 +46,8 @@
46static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) 46static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
47{ 47{
48 drm_i915_private_t *dev_priv = dev->dev_private; 48 drm_i915_private_t *dev_priv = dev->dev_private;
49 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; 49 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
50 drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv;
50 struct drm_tex_region *list; 51 struct drm_tex_region *list;
51 unsigned shift, nr; 52 unsigned shift, nr;
52 unsigned start; 53 unsigned start;
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 13ae731a33db..ff012835a386 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -257,8 +257,8 @@ void opregion_enable_asle(struct drm_device *dev)
257 257
258static struct intel_opregion *system_opregion; 258static struct intel_opregion *system_opregion;
259 259
260int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, 260static int intel_opregion_video_event(struct notifier_block *nb,
261 void *data) 261 unsigned long val, void *data)
262{ 262{
263 /* The only video events relevant to opregion are 0x80. These indicate 263 /* The only video events relevant to opregion are 0x80. These indicate
264 either a docking event, lid switch or display switch request. In 264 either a docking event, lid switch or display switch request. In
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9d24aaeb8a45..47e6bafeb743 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -175,9 +175,26 @@
175#define DISPLAY_PLANE_B (1<<20) 175#define DISPLAY_PLANE_B (1<<20)
176 176
177/* 177/*
178 * Instruction and interrupt control regs 178 * Fence registers
179 */ 179 */
180#define FENCE_REG_830_0 0x2000
181#define I830_FENCE_START_MASK 0x07f80000
182#define I830_FENCE_TILING_Y_SHIFT 12
183#define I830_FENCE_SIZE_BITS(size) ((get_order(size >> 19) - 1) << 8)
184#define I830_FENCE_PITCH_SHIFT 4
185#define I830_FENCE_REG_VALID (1<<0)
186
187#define I915_FENCE_START_MASK 0x0ff00000
188#define I915_FENCE_SIZE_BITS(size) ((get_order(size >> 20) - 1) << 8)
180 189
190#define FENCE_REG_965_0 0x03000
191#define I965_FENCE_PITCH_SHIFT 2
192#define I965_FENCE_TILING_Y_SHIFT 1
193#define I965_FENCE_REG_VALID (1<<0)
194
195/*
196 * Instruction and interrupt control regs
197 */
181#define PRB0_TAIL 0x02030 198#define PRB0_TAIL 0x02030
182#define PRB0_HEAD 0x02034 199#define PRB0_HEAD 0x02034
183#define PRB0_START 0x02038 200#define PRB0_START 0x02038
@@ -245,6 +262,7 @@
245#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 262#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
246#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 263#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
247 264
265
248/* 266/*
249 * Framebuffer compression (915+ only) 267 * Framebuffer compression (915+ only)
250 */ 268 */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
new file mode 100644
index 000000000000..4ca82a025525
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -0,0 +1,193 @@
1/*
2 * Copyright © 2006 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27#include "drmP.h"
28#include "drm.h"
29#include "i915_drm.h"
30#include "i915_drv.h"
31#include "intel_bios.h"
32
33
34static void *
35find_section(struct bdb_header *bdb, int section_id)
36{
37 u8 *base = (u8 *)bdb;
38 int index = 0;
39 u16 total, current_size;
40 u8 current_id;
41
42 /* skip to first section */
43 index += bdb->header_size;
44 total = bdb->bdb_size;
45
46 /* walk the sections looking for section_id */
47 while (index < total) {
48 current_id = *(base + index);
49 index++;
50 current_size = *((u16 *)(base + index));
51 index += 2;
52 if (current_id == section_id)
53 return base + index;
54 index += current_size;
55 }
56
57 return NULL;
58}
59
60/* Try to find panel data */
61static void
62parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
63{
64 struct bdb_lvds_options *lvds_options;
65 struct bdb_lvds_lfp_data *lvds_lfp_data;
66 struct bdb_lvds_lfp_data_entry *entry;
67 struct lvds_dvo_timing *dvo_timing;
68 struct drm_display_mode *panel_fixed_mode;
69
70 /* Defaults if we can't find VBT info */
71 dev_priv->lvds_dither = 0;
72 dev_priv->lvds_vbt = 0;
73
74 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
75 if (!lvds_options)
76 return;
77
78 dev_priv->lvds_dither = lvds_options->pixel_dither;
79 if (lvds_options->panel_type == 0xff)
80 return;
81
82 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
83 if (!lvds_lfp_data)
84 return;
85
86 dev_priv->lvds_vbt = 1;
87
88 entry = &lvds_lfp_data->data[lvds_options->panel_type];
89 dvo_timing = &entry->dvo_timing;
90
91 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
92 DRM_MEM_DRIVER);
93
94 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
95 dvo_timing->hactive_lo;
96 panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
97 ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
98 panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
99 dvo_timing->hsync_pulse_width;
100 panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
101 ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
102
103 panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
104 dvo_timing->vactive_lo;
105 panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
106 dvo_timing->vsync_off;
107 panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
108 dvo_timing->vsync_pulse_width;
109 panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
110 ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
111 panel_fixed_mode->clock = dvo_timing->clock * 10;
112 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
113
114 drm_mode_set_name(panel_fixed_mode);
115
116 dev_priv->vbt_mode = panel_fixed_mode;
117
118 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
119 drm_mode_debug_printmodeline(panel_fixed_mode);
120
121 return;
122}
123
124static void
125parse_general_features(struct drm_i915_private *dev_priv,
126 struct bdb_header *bdb)
127{
128 struct bdb_general_features *general;
129
130 /* Set sensible defaults in case we can't find the general block */
131 dev_priv->int_tv_support = 1;
132 dev_priv->int_crt_support = 1;
133
134 general = find_section(bdb, BDB_GENERAL_FEATURES);
135 if (general) {
136 dev_priv->int_tv_support = general->int_tv_support;
137 dev_priv->int_crt_support = general->int_crt_support;
138 }
139}
140
141/**
142 * intel_init_bios - initialize VBIOS settings & find VBT
143 * @dev: DRM device
144 *
145 * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
146 * to appropriate values.
147 *
148 * VBT existence is a sanity check that is relied on by other i830_bios.c code.
149 * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
150 * feed an updated VBT back through that, compared to what we'll fetch using
151 * this method of groping around in the BIOS data.
152 *
153 * Returns 0 on success, nonzero on failure.
154 */
155bool
156intel_init_bios(struct drm_device *dev)
157{
158 struct drm_i915_private *dev_priv = dev->dev_private;
159 struct pci_dev *pdev = dev->pdev;
160 struct vbt_header *vbt = NULL;
161 struct bdb_header *bdb;
162 u8 __iomem *bios;
163 size_t size;
164 int i;
165
166 bios = pci_map_rom(pdev, &size);
167 if (!bios)
168 return -1;
169
170 /* Scour memory looking for the VBT signature */
171 for (i = 0; i + 4 < size; i++) {
172 if (!memcmp(bios + i, "$VBT", 4)) {
173 vbt = (struct vbt_header *)(bios + i);
174 break;
175 }
176 }
177
178 if (!vbt) {
179 DRM_ERROR("VBT signature missing\n");
180 pci_unmap_rom(pdev, bios);
181 return -1;
182 }
183
184 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
185
186 /* Grab useful general definitions */
187 parse_general_features(dev_priv, bdb);
188 parse_panel_data(dev_priv, bdb);
189
190 pci_unmap_rom(pdev, bios);
191
192 return 0;
193}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
new file mode 100644
index 000000000000..5ea715ace3a0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -0,0 +1,405 @@
1/*
2 * Copyright © 2006 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#ifndef _I830_BIOS_H_
29#define _I830_BIOS_H_
30
31#include "drmP.h"
32
33struct vbt_header {
34 u8 signature[20]; /**< Always starts with 'VBT$' */
35 u16 version; /**< decimal */
36 u16 header_size; /**< in bytes */
37 u16 vbt_size; /**< in bytes */
38 u8 vbt_checksum;
39 u8 reserved0;
40 u32 bdb_offset; /**< from beginning of VBT */
41 u32 aim_offset[4]; /**< from beginning of VBT */
42} __attribute__((packed));
43
44struct bdb_header {
45 u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
46 u16 version; /**< decimal */
47 u16 header_size; /**< in bytes */
48 u16 bdb_size; /**< in bytes */
49};
50
51/* strictly speaking, this is a "skip" block, but it has interesting info */
52struct vbios_data {
53 u8 type; /* 0 == desktop, 1 == mobile */
54 u8 relstage;
55 u8 chipset;
56 u8 lvds_present:1;
57 u8 tv_present:1;
58 u8 rsvd2:6; /* finish byte */
59 u8 rsvd3[4];
60 u8 signon[155];
61 u8 copyright[61];
62 u16 code_segment;
63 u8 dos_boot_mode;
64 u8 bandwidth_percent;
65 u8 rsvd4; /* popup memory size */
66 u8 resize_pci_bios;
67 u8 rsvd5; /* is crt already on ddc2 */
68} __attribute__((packed));
69
70/*
71 * There are several types of BIOS data blocks (BDBs), each block has
72 * an ID and size in the first 3 bytes (ID in first, size in next 2).
73 * Known types are listed below.
74 */
75#define BDB_GENERAL_FEATURES 1
76#define BDB_GENERAL_DEFINITIONS 2
77#define BDB_OLD_TOGGLE_LIST 3
78#define BDB_MODE_SUPPORT_LIST 4
79#define BDB_GENERIC_MODE_TABLE 5
80#define BDB_EXT_MMIO_REGS 6
81#define BDB_SWF_IO 7
82#define BDB_SWF_MMIO 8
83#define BDB_DOT_CLOCK_TABLE 9
84#define BDB_MODE_REMOVAL_TABLE 10
85#define BDB_CHILD_DEVICE_TABLE 11
86#define BDB_DRIVER_FEATURES 12
87#define BDB_DRIVER_PERSISTENCE 13
88#define BDB_EXT_TABLE_PTRS 14
89#define BDB_DOT_CLOCK_OVERRIDE 15
90#define BDB_DISPLAY_SELECT 16
91/* 17 rsvd */
92#define BDB_DRIVER_ROTATION 18
93#define BDB_DISPLAY_REMOVE 19
94#define BDB_OEM_CUSTOM 20
95#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
96#define BDB_SDVO_LVDS_OPTIONS 22
97#define BDB_SDVO_PANEL_DTDS 23
98#define BDB_SDVO_LVDS_PNP_IDS 24
99#define BDB_SDVO_LVDS_POWER_SEQ 25
100#define BDB_TV_OPTIONS 26
101#define BDB_LVDS_OPTIONS 40
102#define BDB_LVDS_LFP_DATA_PTRS 41
103#define BDB_LVDS_LFP_DATA 42
104#define BDB_LVDS_BACKLIGHT 43
105#define BDB_LVDS_POWER 44
106#define BDB_SKIP 254 /* VBIOS private block, ignore */
107
108struct bdb_general_features {
109 /* bits 1 */
110 u8 panel_fitting:2;
111 u8 flexaim:1;
112 u8 msg_enable:1;
113 u8 clear_screen:3;
114 u8 color_flip:1;
115
116 /* bits 2 */
117 u8 download_ext_vbt:1;
118 u8 enable_ssc:1;
119 u8 ssc_freq:1;
120 u8 enable_lfp_on_override:1;
121 u8 disable_ssc_ddt:1;
122 u8 rsvd8:3; /* finish byte */
123
124 /* bits 3 */
125 u8 disable_smooth_vision:1;
126 u8 single_dvi:1;
127 u8 rsvd9:6; /* finish byte */
128
129 /* bits 4 */
130 u8 legacy_monitor_detect;
131
132 /* bits 5 */
133 u8 int_crt_support:1;
134 u8 int_tv_support:1;
135 u8 rsvd11:6; /* finish byte */
136} __attribute__((packed));
137
138struct bdb_general_definitions {
139 /* DDC GPIO */
140 u8 crt_ddc_gmbus_pin;
141
142 /* DPMS bits */
143 u8 dpms_acpi:1;
144 u8 skip_boot_crt_detect:1;
145 u8 dpms_aim:1;
146 u8 rsvd1:5; /* finish byte */
147
148 /* boot device bits */
149 u8 boot_display[2];
150 u8 child_dev_size;
151
152 /* device info */
153 u8 tv_or_lvds_info[33];
154 u8 dev1[33];
155 u8 dev2[33];
156 u8 dev3[33];
157 u8 dev4[33];
158 /* may be another device block here on some platforms */
159};
160
161struct bdb_lvds_options {
162 u8 panel_type;
163 u8 rsvd1;
164 /* LVDS capabilities, stored in a dword */
165 u8 rsvd2:1;
166 u8 lvds_edid:1;
167 u8 pixel_dither:1;
168 u8 pfit_ratio_auto:1;
169 u8 pfit_gfx_mode_enhanced:1;
170 u8 pfit_text_mode_enhanced:1;
171 u8 pfit_mode:2;
172 u8 rsvd4;
173} __attribute__((packed));
174
175/* LFP pointer table contains entries to the struct below */
176struct bdb_lvds_lfp_data_ptr {
177 u16 fp_timing_offset; /* offsets are from start of bdb */
178 u8 fp_table_size;
179 u16 dvo_timing_offset;
180 u8 dvo_table_size;
181 u16 panel_pnp_id_offset;
182 u8 pnp_table_size;
183} __attribute__((packed));
184
185struct bdb_lvds_lfp_data_ptrs {
186 u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
187 struct bdb_lvds_lfp_data_ptr ptr[16];
188} __attribute__((packed));
189
190/* LFP data has 3 blocks per entry */
191struct lvds_fp_timing {
192 u16 x_res;
193 u16 y_res;
194 u32 lvds_reg;
195 u32 lvds_reg_val;
196 u32 pp_on_reg;
197 u32 pp_on_reg_val;
198 u32 pp_off_reg;
199 u32 pp_off_reg_val;
200 u32 pp_cycle_reg;
201 u32 pp_cycle_reg_val;
202 u32 pfit_reg;
203 u32 pfit_reg_val;
204 u16 terminator;
205} __attribute__((packed));
206
207struct lvds_dvo_timing {
208 u16 clock; /**< In 10khz */
209 u8 hactive_lo;
210 u8 hblank_lo;
211 u8 hblank_hi:4;
212 u8 hactive_hi:4;
213 u8 vactive_lo;
214 u8 vblank_lo;
215 u8 vblank_hi:4;
216 u8 vactive_hi:4;
217 u8 hsync_off_lo;
218 u8 hsync_pulse_width;
219 u8 vsync_pulse_width:4;
220 u8 vsync_off:4;
221 u8 rsvd0:6;
222 u8 hsync_off_hi:2;
223 u8 h_image;
224 u8 v_image;
225 u8 max_hv;
226 u8 h_border;
227 u8 v_border;
228 u8 rsvd1:3;
229 u8 digital:2;
230 u8 vsync_positive:1;
231 u8 hsync_positive:1;
232 u8 rsvd2:1;
233} __attribute__((packed));
234
235struct lvds_pnp_id {
236 u16 mfg_name;
237 u16 product_code;
238 u32 serial;
239 u8 mfg_week;
240 u8 mfg_year;
241} __attribute__((packed));
242
243struct bdb_lvds_lfp_data_entry {
244 struct lvds_fp_timing fp_timing;
245 struct lvds_dvo_timing dvo_timing;
246 struct lvds_pnp_id pnp_id;
247} __attribute__((packed));
248
249struct bdb_lvds_lfp_data {
250 struct bdb_lvds_lfp_data_entry data[16];
251} __attribute__((packed));
252
253struct aimdb_header {
254 char signature[16];
255 char oem_device[20];
256 u16 aimdb_version;
257 u16 aimdb_header_size;
258 u16 aimdb_size;
259} __attribute__((packed));
260
261struct aimdb_block {
262 u8 aimdb_id;
263 u16 aimdb_size;
264} __attribute__((packed));
265
266struct vch_panel_data {
267 u16 fp_timing_offset;
268 u8 fp_timing_size;
269 u16 dvo_timing_offset;
270 u8 dvo_timing_size;
271 u16 text_fitting_offset;
272 u8 text_fitting_size;
273 u16 graphics_fitting_offset;
274 u8 graphics_fitting_size;
275} __attribute__((packed));
276
277struct vch_bdb_22 {
278 struct aimdb_block aimdb_block;
279 struct vch_panel_data panels[16];
280} __attribute__((packed));
281
282bool intel_init_bios(struct drm_device *dev);
283
284/*
285 * Driver<->VBIOS interaction occurs through scratch bits in
286 * GR18 & SWF*.
287 */
288
289/* GR18 bits are set on display switch and hotkey events */
290#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
291#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
292#define GR18_HK_NONE (0x0<<3)
293#define GR18_HK_LFP_STRETCH (0x1<<3)
294#define GR18_HK_TOGGLE_DISP (0x2<<3)
295#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
296#define GR18_HK_POPUP_DISABLED (0x6<<3)
297#define GR18_HK_POPUP_ENABLED (0x7<<3)
298#define GR18_HK_PFIT (0x8<<3)
299#define GR18_HK_APM_CHANGE (0xa<<3)
300#define GR18_HK_MULTIPLE (0xc<<3)
301#define GR18_USER_INT_EN (1<<2)
302#define GR18_A0000_FLUSH_EN (1<<1)
303#define GR18_SMM_EN (1<<0)
304
305/* Set by driver, cleared by VBIOS */
306#define SWF00_YRES_SHIFT 16
307#define SWF00_XRES_SHIFT 0
308#define SWF00_RES_MASK 0xffff
309
310/* Set by VBIOS at boot time and driver at runtime */
311#define SWF01_TV2_FORMAT_SHIFT 8
312#define SWF01_TV1_FORMAT_SHIFT 0
313#define SWF01_TV_FORMAT_MASK 0xffff
314
315#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
316#define SWF10_GTT_OVERRIDE_EN (1<<28)
317#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
318#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
319#define SWF10_OLD_TOGGLE 0x0
320#define SWF10_TOGGLE_LIST_1 0x1
321#define SWF10_TOGGLE_LIST_2 0x2
322#define SWF10_TOGGLE_LIST_3 0x3
323#define SWF10_TOGGLE_LIST_4 0x4
324#define SWF10_PANNING_EN (1<<23)
325#define SWF10_DRIVER_LOADED (1<<22)
326#define SWF10_EXTENDED_DESKTOP (1<<21)
327#define SWF10_EXCLUSIVE_MODE (1<<20)
328#define SWF10_OVERLAY_EN (1<<19)
329#define SWF10_PLANEB_HOLDOFF (1<<18)
330#define SWF10_PLANEA_HOLDOFF (1<<17)
331#define SWF10_VGA_HOLDOFF (1<<16)
332#define SWF10_ACTIVE_DISP_MASK 0xffff
333#define SWF10_PIPEB_LFP2 (1<<15)
334#define SWF10_PIPEB_EFP2 (1<<14)
335#define SWF10_PIPEB_TV2 (1<<13)
336#define SWF10_PIPEB_CRT2 (1<<12)
337#define SWF10_PIPEB_LFP (1<<11)
338#define SWF10_PIPEB_EFP (1<<10)
339#define SWF10_PIPEB_TV (1<<9)
340#define SWF10_PIPEB_CRT (1<<8)
341#define SWF10_PIPEA_LFP2 (1<<7)
342#define SWF10_PIPEA_EFP2 (1<<6)
343#define SWF10_PIPEA_TV2 (1<<5)
344#define SWF10_PIPEA_CRT2 (1<<4)
345#define SWF10_PIPEA_LFP (1<<3)
346#define SWF10_PIPEA_EFP (1<<2)
347#define SWF10_PIPEA_TV (1<<1)
348#define SWF10_PIPEA_CRT (1<<0)
349
350#define SWF11_MEMORY_SIZE_SHIFT 16
351#define SWF11_SV_TEST_EN (1<<15)
352#define SWF11_IS_AGP (1<<14)
353#define SWF11_DISPLAY_HOLDOFF (1<<13)
354#define SWF11_DPMS_REDUCED (1<<12)
355#define SWF11_IS_VBE_MODE (1<<11)
356#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
357#define SWF11_DPMS_MASK 0x07
358#define SWF11_DPMS_OFF (1<<2)
359#define SWF11_DPMS_SUSPEND (1<<1)
360#define SWF11_DPMS_STANDBY (1<<0)
361#define SWF11_DPMS_ON 0
362
363#define SWF14_GFX_PFIT_EN (1<<31)
364#define SWF14_TEXT_PFIT_EN (1<<30)
365#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
366#define SWF14_POPUP_EN (1<<28)
367#define SWF14_DISPLAY_HOLDOFF (1<<27)
368#define SWF14_DISP_DETECT_EN (1<<26)
369#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
370#define SWF14_DRIVER_STATUS (1<<24)
371#define SWF14_OS_TYPE_WIN9X (1<<23)
372#define SWF14_OS_TYPE_WINNT (1<<22)
373/* 21:19 rsvd */
374#define SWF14_PM_TYPE_MASK 0x00070000
375#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
376#define SWF14_PM_ACPI (0x3 << 16)
377#define SWF14_PM_APM_12 (0x2 << 16)
378#define SWF14_PM_APM_11 (0x1 << 16)
379#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
380 /* if GR18 indicates a display switch */
381#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
382#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
383#define SWF14_DS_PIPEB_TV2_EN (1<<13)
384#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
385#define SWF14_DS_PIPEB_LFP_EN (1<<11)
386#define SWF14_DS_PIPEB_EFP_EN (1<<10)
387#define SWF14_DS_PIPEB_TV_EN (1<<9)
388#define SWF14_DS_PIPEB_CRT_EN (1<<8)
389#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
390#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
391#define SWF14_DS_PIPEA_TV2_EN (1<<5)
392#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
393#define SWF14_DS_PIPEA_LFP_EN (1<<3)
394#define SWF14_DS_PIPEA_EFP_EN (1<<2)
395#define SWF14_DS_PIPEA_TV_EN (1<<1)
396#define SWF14_DS_PIPEA_CRT_EN (1<<0)
397 /* if GR18 indicates a panel fitting request */
398#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
399 /* if GR18 indicates an APM change request */
400#define SWF14_APM_HIBERNATE 0x4
401#define SWF14_APM_SUSPEND 0x3
402#define SWF14_APM_STANDBY 0x1
403#define SWF14_APM_RESTORE 0x0
404
405#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
new file mode 100644
index 000000000000..dcaed3466e83
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -0,0 +1,284 @@
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27#include <linux/i2c.h>
28#include "drmP.h"
29#include "drm.h"
30#include "drm_crtc.h"
31#include "drm_crtc_helper.h"
32#include "intel_drv.h"
33#include "i915_drm.h"
34#include "i915_drv.h"
35
36static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
37{
38 struct drm_device *dev = encoder->dev;
39 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp;
41
42 temp = I915_READ(ADPA);
43 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
44 temp &= ~ADPA_DAC_ENABLE;
45
46 switch(mode) {
47 case DRM_MODE_DPMS_ON:
48 temp |= ADPA_DAC_ENABLE;
49 break;
50 case DRM_MODE_DPMS_STANDBY:
51 temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
52 break;
53 case DRM_MODE_DPMS_SUSPEND:
54 temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
55 break;
56 case DRM_MODE_DPMS_OFF:
57 temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
58 break;
59 }
60
61 I915_WRITE(ADPA, temp);
62}
63
64static int intel_crt_mode_valid(struct drm_connector *connector,
65 struct drm_display_mode *mode)
66{
67 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
68 return MODE_NO_DBLESCAN;
69
70 if (mode->clock > 400000 || mode->clock < 25000)
71 return MODE_CLOCK_RANGE;
72
73 return MODE_OK;
74}
75
76static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
77 struct drm_display_mode *mode,
78 struct drm_display_mode *adjusted_mode)
79{
80 return true;
81}
82
83static void intel_crt_mode_set(struct drm_encoder *encoder,
84 struct drm_display_mode *mode,
85 struct drm_display_mode *adjusted_mode)
86{
87
88 struct drm_device *dev = encoder->dev;
89 struct drm_crtc *crtc = encoder->crtc;
90 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
91 struct drm_i915_private *dev_priv = dev->dev_private;
92 int dpll_md_reg;
93 u32 adpa, dpll_md;
94
95 if (intel_crtc->pipe == 0)
96 dpll_md_reg = DPLL_A_MD;
97 else
98 dpll_md_reg = DPLL_B_MD;
99
100 /*
101 * Disable separate mode multiplier used when cloning SDVO to CRT
102 * XXX this needs to be adjusted when we really are cloning
103 */
104 if (IS_I965G(dev)) {
105 dpll_md = I915_READ(dpll_md_reg);
106 I915_WRITE(dpll_md_reg,
107 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
108 }
109
110 adpa = 0;
111 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
112 adpa |= ADPA_HSYNC_ACTIVE_HIGH;
113 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
114 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
115
116 if (intel_crtc->pipe == 0)
117 adpa |= ADPA_PIPE_A_SELECT;
118 else
119 adpa |= ADPA_PIPE_B_SELECT;
120
121 I915_WRITE(ADPA, adpa);
122}
123
124/**
125 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
126 *
127 * Not for i915G/i915GM
128 *
129 * \return true if CRT is connected.
130 * \return false if CRT is disconnected.
131 */
132static bool intel_crt_detect_hotplug(struct drm_connector *connector)
133{
134 struct drm_device *dev = connector->dev;
135 struct drm_i915_private *dev_priv = dev->dev_private;
136 u32 temp;
137
138 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
139
140 temp = I915_READ(PORT_HOTPLUG_EN);
141
142 I915_WRITE(PORT_HOTPLUG_EN,
143 temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
144
145 do {
146 if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
147 break;
148 msleep(1);
149 } while (time_after(timeout, jiffies));
150
151 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
152 CRT_HOTPLUG_MONITOR_COLOR)
153 return true;
154
155 return false;
156}
157
158static bool intel_crt_detect_ddc(struct drm_connector *connector)
159{
160 struct intel_output *intel_output = to_intel_output(connector);
161
162 /* CRT should always be at 0, but check anyway */
163 if (intel_output->type != INTEL_OUTPUT_ANALOG)
164 return false;
165
166 return intel_ddc_probe(intel_output);
167}
168
169static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
170{
171 struct drm_device *dev = connector->dev;
172
173 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
174 if (intel_crt_detect_hotplug(connector))
175 return connector_status_connected;
176 else
177 return connector_status_disconnected;
178 }
179
180 if (intel_crt_detect_ddc(connector))
181 return connector_status_connected;
182
183 /* TODO use load detect */
184 return connector_status_unknown;
185}
186
187static void intel_crt_destroy(struct drm_connector *connector)
188{
189 struct intel_output *intel_output = to_intel_output(connector);
190
191 intel_i2c_destroy(intel_output->ddc_bus);
192 drm_sysfs_connector_remove(connector);
193 drm_connector_cleanup(connector);
194 kfree(connector);
195}
196
197static int intel_crt_get_modes(struct drm_connector *connector)
198{
199 struct intel_output *intel_output = to_intel_output(connector);
200 return intel_ddc_get_modes(intel_output);
201}
202
203static int intel_crt_set_property(struct drm_connector *connector,
204 struct drm_property *property,
205 uint64_t value)
206{
207 struct drm_device *dev = connector->dev;
208
209 if (property == dev->mode_config.dpms_property && connector->encoder)
210 intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf));
211
212 return 0;
213}
214
215/*
216 * Routines for controlling stuff on the analog port
217 */
218
219static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
220 .dpms = intel_crt_dpms,
221 .mode_fixup = intel_crt_mode_fixup,
222 .prepare = intel_encoder_prepare,
223 .commit = intel_encoder_commit,
224 .mode_set = intel_crt_mode_set,
225};
226
227static const struct drm_connector_funcs intel_crt_connector_funcs = {
228 .detect = intel_crt_detect,
229 .fill_modes = drm_helper_probe_single_connector_modes,
230 .destroy = intel_crt_destroy,
231 .set_property = intel_crt_set_property,
232};
233
234static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
235 .mode_valid = intel_crt_mode_valid,
236 .get_modes = intel_crt_get_modes,
237 .best_encoder = intel_best_encoder,
238};
239
240static void intel_crt_enc_destroy(struct drm_encoder *encoder)
241{
242 drm_encoder_cleanup(encoder);
243}
244
245static const struct drm_encoder_funcs intel_crt_enc_funcs = {
246 .destroy = intel_crt_enc_destroy,
247};
248
249void intel_crt_init(struct drm_device *dev)
250{
251 struct drm_connector *connector;
252 struct intel_output *intel_output;
253
254 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
255 if (!intel_output)
256 return;
257
258 connector = &intel_output->base;
259 drm_connector_init(dev, &intel_output->base,
260 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
261
262 drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
263 DRM_MODE_ENCODER_DAC);
264
265 drm_mode_connector_attach_encoder(&intel_output->base,
266 &intel_output->enc);
267
268 /* Set up the DDC bus. */
269 intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
270 if (!intel_output->ddc_bus) {
271 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
272 "failed.\n");
273 return;
274 }
275
276 intel_output->type = INTEL_OUTPUT_ANALOG;
277 connector->interlace_allowed = 0;
278 connector->doublescan_allowed = 0;
279
280 drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
281 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
282
283 drm_sysfs_connector_add(connector);
284}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
new file mode 100644
index 000000000000..e5c1c80d1f90
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -0,0 +1,1618 @@
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27#include <linux/i2c.h>
28#include "drmP.h"
29#include "intel_drv.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32
33#include "drm_crtc_helper.h"
34
35bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
36
37typedef struct {
38 /* given values */
39 int n;
40 int m1, m2;
41 int p1, p2;
42 /* derived values */
43 int dot;
44 int vco;
45 int m;
46 int p;
47} intel_clock_t;
48
49typedef struct {
50 int min, max;
51} intel_range_t;
52
53typedef struct {
54 int dot_limit;
55 int p2_slow, p2_fast;
56} intel_p2_t;
57
58#define INTEL_P2_NUM 2
59
60typedef struct {
61 intel_range_t dot, vco, n, m, m1, m2, p, p1;
62 intel_p2_t p2;
63} intel_limit_t;
64
65#define I8XX_DOT_MIN 25000
66#define I8XX_DOT_MAX 350000
67#define I8XX_VCO_MIN 930000
68#define I8XX_VCO_MAX 1400000
69#define I8XX_N_MIN 3
70#define I8XX_N_MAX 16
71#define I8XX_M_MIN 96
72#define I8XX_M_MAX 140
73#define I8XX_M1_MIN 18
74#define I8XX_M1_MAX 26
75#define I8XX_M2_MIN 6
76#define I8XX_M2_MAX 16
77#define I8XX_P_MIN 4
78#define I8XX_P_MAX 128
79#define I8XX_P1_MIN 2
80#define I8XX_P1_MAX 33
81#define I8XX_P1_LVDS_MIN 1
82#define I8XX_P1_LVDS_MAX 6
83#define I8XX_P2_SLOW 4
84#define I8XX_P2_FAST 2
85#define I8XX_P2_LVDS_SLOW 14
86#define I8XX_P2_LVDS_FAST 14 /* No fast option */
87#define I8XX_P2_SLOW_LIMIT 165000
88
89#define I9XX_DOT_MIN 20000
90#define I9XX_DOT_MAX 400000
91#define I9XX_VCO_MIN 1400000
92#define I9XX_VCO_MAX 2800000
93#define I9XX_N_MIN 3
94#define I9XX_N_MAX 8
95#define I9XX_M_MIN 70
96#define I9XX_M_MAX 120
97#define I9XX_M1_MIN 10
98#define I9XX_M1_MAX 20
99#define I9XX_M2_MIN 5
100#define I9XX_M2_MAX 9
101#define I9XX_P_SDVO_DAC_MIN 5
102#define I9XX_P_SDVO_DAC_MAX 80
103#define I9XX_P_LVDS_MIN 7
104#define I9XX_P_LVDS_MAX 98
105#define I9XX_P1_MIN 1
106#define I9XX_P1_MAX 8
107#define I9XX_P2_SDVO_DAC_SLOW 10
108#define I9XX_P2_SDVO_DAC_FAST 5
109#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
110#define I9XX_P2_LVDS_SLOW 14
111#define I9XX_P2_LVDS_FAST 7
112#define I9XX_P2_LVDS_SLOW_LIMIT 112000
113
114#define INTEL_LIMIT_I8XX_DVO_DAC 0
115#define INTEL_LIMIT_I8XX_LVDS 1
116#define INTEL_LIMIT_I9XX_SDVO_DAC 2
117#define INTEL_LIMIT_I9XX_LVDS 3
118
119static const intel_limit_t intel_limits[] = {
120 { /* INTEL_LIMIT_I8XX_DVO_DAC */
121 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
122 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
123 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
124 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
125 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
126 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
127 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
128 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
129 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
130 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
131 },
132 { /* INTEL_LIMIT_I8XX_LVDS */
133 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
134 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
135 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
136 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
137 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
138 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
139 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
140 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
141 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
142 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
143 },
144 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
145 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
146 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
147 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
148 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
149 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
150 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
151 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
152 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
153 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
154 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
155 },
156 { /* INTEL_LIMIT_I9XX_LVDS */
157 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
158 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
159 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
160 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
161 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
162 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
163 .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX },
164 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
165 /* The single-channel range is 25-112Mhz, and dual-channel
166 * is 80-224Mhz. Prefer single channel as much as possible.
167 */
168 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
169 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
170 },
171};
172
173static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
174{
175 struct drm_device *dev = crtc->dev;
176 const intel_limit_t *limit;
177
178 if (IS_I9XX(dev)) {
179 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
180 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
181 else
182 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
183 } else {
184 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
185 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
186 else
187 limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
188 }
189 return limit;
190}
191
192/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
193
194static void i8xx_clock(int refclk, intel_clock_t *clock)
195{
196 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
197 clock->p = clock->p1 * clock->p2;
198 clock->vco = refclk * clock->m / (clock->n + 2);
199 clock->dot = clock->vco / clock->p;
200}
201
202/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
203
204static void i9xx_clock(int refclk, intel_clock_t *clock)
205{
206 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
207 clock->p = clock->p1 * clock->p2;
208 clock->vco = refclk * clock->m / (clock->n + 2);
209 clock->dot = clock->vco / clock->p;
210}
211
212static void intel_clock(struct drm_device *dev, int refclk,
213 intel_clock_t *clock)
214{
215 if (IS_I9XX(dev))
216 i9xx_clock (refclk, clock);
217 else
218 i8xx_clock (refclk, clock);
219}
220
221/**
222 * Returns whether any output on the specified pipe is of the specified type
223 */
224bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
225{
226 struct drm_device *dev = crtc->dev;
227 struct drm_mode_config *mode_config = &dev->mode_config;
228 struct drm_connector *l_entry;
229
230 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
231 if (l_entry->encoder &&
232 l_entry->encoder->crtc == crtc) {
233 struct intel_output *intel_output = to_intel_output(l_entry);
234 if (intel_output->type == type)
235 return true;
236 }
237 }
238 return false;
239}
240
241#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
242/**
243 * Returns whether the given set of divisors are valid for a given refclk with
244 * the given connectors.
245 */
246
247static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
248{
249 const intel_limit_t *limit = intel_limit (crtc);
250
251 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
252 INTELPllInvalid ("p1 out of range\n");
253 if (clock->p < limit->p.min || limit->p.max < clock->p)
254 INTELPllInvalid ("p out of range\n");
255 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
256 INTELPllInvalid ("m2 out of range\n");
257 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
258 INTELPllInvalid ("m1 out of range\n");
259 if (clock->m1 <= clock->m2)
260 INTELPllInvalid ("m1 <= m2\n");
261 if (clock->m < limit->m.min || limit->m.max < clock->m)
262 INTELPllInvalid ("m out of range\n");
263 if (clock->n < limit->n.min || limit->n.max < clock->n)
264 INTELPllInvalid ("n out of range\n");
265 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
266 INTELPllInvalid ("vco out of range\n");
267 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
268 * connector, etc., rather than just a single range.
269 */
270 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
271 INTELPllInvalid ("dot out of range\n");
272
273 return true;
274}
275
276/**
277 * Returns a set of divisors for the desired target clock with the given
278 * refclk, or FALSE. The returned values represent the clock equation:
279 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
280 */
281static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
282 int refclk, intel_clock_t *best_clock)
283{
284 struct drm_device *dev = crtc->dev;
285 struct drm_i915_private *dev_priv = dev->dev_private;
286 intel_clock_t clock;
287 const intel_limit_t *limit = intel_limit(crtc);
288 int err = target;
289
290 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
291 (I915_READ(LVDS) & LVDS_PORT_EN) != 0) {
292 /*
293 * For LVDS, if the panel is on, just rely on its current
294 * settings for dual-channel. We haven't figured out how to
295 * reliably set up different single/dual channel state, if we
296 * even can.
297 */
298 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
299 LVDS_CLKB_POWER_UP)
300 clock.p2 = limit->p2.p2_fast;
301 else
302 clock.p2 = limit->p2.p2_slow;
303 } else {
304 if (target < limit->p2.dot_limit)
305 clock.p2 = limit->p2.p2_slow;
306 else
307 clock.p2 = limit->p2.p2_fast;
308 }
309
310 memset (best_clock, 0, sizeof (*best_clock));
311
312 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
313 for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
314 clock.m2 <= limit->m2.max; clock.m2++) {
315 for (clock.n = limit->n.min; clock.n <= limit->n.max;
316 clock.n++) {
317 for (clock.p1 = limit->p1.min;
318 clock.p1 <= limit->p1.max; clock.p1++) {
319 int this_err;
320
321 intel_clock(dev, refclk, &clock);
322
323 if (!intel_PLL_is_valid(crtc, &clock))
324 continue;
325
326 this_err = abs(clock.dot - target);
327 if (this_err < err) {
328 *best_clock = clock;
329 err = this_err;
330 }
331 }
332 }
333 }
334 }
335
336 return (err != target);
337}
338
339void
340intel_wait_for_vblank(struct drm_device *dev)
341{
342 /* Wait for 20ms, i.e. one cycle at 50hz. */
343 udelay(20000);
344}
345
346static void
347intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
348 struct drm_framebuffer *old_fb)
349{
350 struct drm_device *dev = crtc->dev;
351 struct drm_i915_private *dev_priv = dev->dev_private;
352 struct drm_i915_master_private *master_priv;
353 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
354 struct intel_framebuffer *intel_fb;
355 struct drm_i915_gem_object *obj_priv;
356 struct drm_gem_object *obj;
357 int pipe = intel_crtc->pipe;
358 unsigned long Start, Offset;
359 int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
360 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
361 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
362 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
363 u32 dspcntr, alignment;
364
365 /* no fb bound */
366 if (!crtc->fb) {
367 DRM_DEBUG("No FB bound\n");
368 return;
369 }
370
371 intel_fb = to_intel_framebuffer(crtc->fb);
372 obj = intel_fb->obj;
373 obj_priv = obj->driver_private;
374
375 switch (obj_priv->tiling_mode) {
376 case I915_TILING_NONE:
377 alignment = 64 * 1024;
378 break;
379 case I915_TILING_X:
380 if (IS_I9XX(dev))
381 alignment = 1024 * 1024;
382 else
383 alignment = 512 * 1024;
384 break;
385 case I915_TILING_Y:
386 /* FIXME: Is this true? */
387 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
388 return;
389 default:
390 BUG();
391 }
392
393 if (i915_gem_object_pin(intel_fb->obj, alignment))
394 return;
395
396 i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1);
397
398 Start = obj_priv->gtt_offset;
399 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
400
401 I915_WRITE(dspstride, crtc->fb->pitch);
402
403 dspcntr = I915_READ(dspcntr_reg);
404 switch (crtc->fb->bits_per_pixel) {
405 case 8:
406 dspcntr |= DISPPLANE_8BPP;
407 break;
408 case 16:
409 if (crtc->fb->depth == 15)
410 dspcntr |= DISPPLANE_15_16BPP;
411 else
412 dspcntr |= DISPPLANE_16BPP;
413 break;
414 case 24:
415 case 32:
416 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
417 break;
418 default:
419 DRM_ERROR("Unknown color depth\n");
420 return;
421 }
422 I915_WRITE(dspcntr_reg, dspcntr);
423
424 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
425 if (IS_I965G(dev)) {
426 I915_WRITE(dspbase, Offset);
427 I915_READ(dspbase);
428 I915_WRITE(dspsurf, Start);
429 I915_READ(dspsurf);
430 } else {
431 I915_WRITE(dspbase, Start + Offset);
432 I915_READ(dspbase);
433 }
434
435 intel_wait_for_vblank(dev);
436
437 if (old_fb) {
438 intel_fb = to_intel_framebuffer(old_fb);
439 i915_gem_object_unpin(intel_fb->obj);
440 }
441
442 if (!dev->primary->master)
443 return;
444
445 master_priv = dev->primary->master->driver_priv;
446 if (!master_priv->sarea_priv)
447 return;
448
449 switch (pipe) {
450 case 0:
451 master_priv->sarea_priv->pipeA_x = x;
452 master_priv->sarea_priv->pipeA_y = y;
453 break;
454 case 1:
455 master_priv->sarea_priv->pipeB_x = x;
456 master_priv->sarea_priv->pipeB_y = y;
457 break;
458 default:
459 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
460 break;
461 }
462}
463
464
465
466/**
467 * Sets the power management mode of the pipe and plane.
468 *
469 * This code should probably grow support for turning the cursor off and back
470 * on appropriately at the same time as we're turning the pipe off/on.
471 */
472static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
473{
474 struct drm_device *dev = crtc->dev;
475 struct drm_i915_master_private *master_priv;
476 struct drm_i915_private *dev_priv = dev->dev_private;
477 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
478 int pipe = intel_crtc->pipe;
479 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
480 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
481 int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR;
482 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
483 u32 temp;
484 bool enabled;
485
486 /* XXX: When our outputs are all unaware of DPMS modes other than off
487 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
488 */
489 switch (mode) {
490 case DRM_MODE_DPMS_ON:
491 case DRM_MODE_DPMS_STANDBY:
492 case DRM_MODE_DPMS_SUSPEND:
493 /* Enable the DPLL */
494 temp = I915_READ(dpll_reg);
495 if ((temp & DPLL_VCO_ENABLE) == 0) {
496 I915_WRITE(dpll_reg, temp);
497 I915_READ(dpll_reg);
498 /* Wait for the clocks to stabilize. */
499 udelay(150);
500 I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
501 I915_READ(dpll_reg);
502 /* Wait for the clocks to stabilize. */
503 udelay(150);
504 I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
505 I915_READ(dpll_reg);
506 /* Wait for the clocks to stabilize. */
507 udelay(150);
508 }
509
510 /* Enable the pipe */
511 temp = I915_READ(pipeconf_reg);
512 if ((temp & PIPEACONF_ENABLE) == 0)
513 I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
514
515 /* Enable the plane */
516 temp = I915_READ(dspcntr_reg);
517 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
518 I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
519 /* Flush the plane changes */
520 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
521 }
522
523 intel_crtc_load_lut(crtc);
524
525 /* Give the overlay scaler a chance to enable if it's on this pipe */
526 //intel_crtc_dpms_video(crtc, true); TODO
527 break;
528 case DRM_MODE_DPMS_OFF:
529 /* Give the overlay scaler a chance to disable if it's on this pipe */
530 //intel_crtc_dpms_video(crtc, FALSE); TODO
531
532 /* Disable the VGA plane that we never use */
533 I915_WRITE(VGACNTRL, VGA_DISP_DISABLE);
534
535 /* Disable display plane */
536 temp = I915_READ(dspcntr_reg);
537 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
538 I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
539 /* Flush the plane changes */
540 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
541 I915_READ(dspbase_reg);
542 }
543
544 if (!IS_I9XX(dev)) {
545 /* Wait for vblank for the disable to take effect */
546 intel_wait_for_vblank(dev);
547 }
548
549 /* Next, disable display pipes */
550 temp = I915_READ(pipeconf_reg);
551 if ((temp & PIPEACONF_ENABLE) != 0) {
552 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
553 I915_READ(pipeconf_reg);
554 }
555
556 /* Wait for vblank for the disable to take effect. */
557 intel_wait_for_vblank(dev);
558
559 temp = I915_READ(dpll_reg);
560 if ((temp & DPLL_VCO_ENABLE) != 0) {
561 I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
562 I915_READ(dpll_reg);
563 }
564
565 /* Wait for the clocks to turn off. */
566 udelay(150);
567 break;
568 }
569
570 if (!dev->primary->master)
571 return;
572
573 master_priv = dev->primary->master->driver_priv;
574 if (!master_priv->sarea_priv)
575 return;
576
577 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
578
579 switch (pipe) {
580 case 0:
581 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
582 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
583 break;
584 case 1:
585 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
586 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
587 break;
588 default:
589 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
590 break;
591 }
592
593 intel_crtc->dpms_mode = mode;
594}
595
596static void intel_crtc_prepare (struct drm_crtc *crtc)
597{
598 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
599 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
600}
601
602static void intel_crtc_commit (struct drm_crtc *crtc)
603{
604 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
605 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
606}
607
608void intel_encoder_prepare (struct drm_encoder *encoder)
609{
610 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
611 /* lvds has its own version of prepare see intel_lvds_prepare */
612 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
613}
614
615void intel_encoder_commit (struct drm_encoder *encoder)
616{
617 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
618 /* lvds has its own version of commit see intel_lvds_commit */
619 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
620}
621
622static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
623 struct drm_display_mode *mode,
624 struct drm_display_mode *adjusted_mode)
625{
626 return true;
627}
628
629
630/** Returns the core display clock speed for i830 - i945 */
631static int intel_get_core_clock_speed(struct drm_device *dev)
632{
633
634 /* Core clock values taken from the published datasheets.
635 * The 830 may go up to 166 Mhz, which we should check.
636 */
637 if (IS_I945G(dev))
638 return 400000;
639 else if (IS_I915G(dev))
640 return 333000;
641 else if (IS_I945GM(dev) || IS_845G(dev))
642 return 200000;
643 else if (IS_I915GM(dev)) {
644 u16 gcfgc = 0;
645
646 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
647
648 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
649 return 133000;
650 else {
651 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
652 case GC_DISPLAY_CLOCK_333_MHZ:
653 return 333000;
654 default:
655 case GC_DISPLAY_CLOCK_190_200_MHZ:
656 return 190000;
657 }
658 }
659 } else if (IS_I865G(dev))
660 return 266000;
661 else if (IS_I855(dev)) {
662 u16 hpllcc = 0;
663 /* Assume that the hardware is in the high speed state. This
664 * should be the default.
665 */
666 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
667 case GC_CLOCK_133_200:
668 case GC_CLOCK_100_200:
669 return 200000;
670 case GC_CLOCK_166_250:
671 return 250000;
672 case GC_CLOCK_100_133:
673 return 133000;
674 }
675 } else /* 852, 830 */
676 return 133000;
677
678 return 0; /* Silence gcc warning */
679}
680
681
682/**
683 * Return the pipe currently connected to the panel fitter,
684 * or -1 if the panel fitter is not present or not in use
685 */
686static int intel_panel_fitter_pipe (struct drm_device *dev)
687{
688 struct drm_i915_private *dev_priv = dev->dev_private;
689 u32 pfit_control;
690
691 /* i830 doesn't have a panel fitter */
692 if (IS_I830(dev))
693 return -1;
694
695 pfit_control = I915_READ(PFIT_CONTROL);
696
697 /* See if the panel fitter is in use */
698 if ((pfit_control & PFIT_ENABLE) == 0)
699 return -1;
700
701 /* 965 can place panel fitter on either pipe */
702 if (IS_I965G(dev))
703 return (pfit_control >> 29) & 0x3;
704
705 /* older chips can only use pipe 1 */
706 return 1;
707}
708
709static void intel_crtc_mode_set(struct drm_crtc *crtc,
710 struct drm_display_mode *mode,
711 struct drm_display_mode *adjusted_mode,
712 int x, int y,
713 struct drm_framebuffer *old_fb)
714{
715 struct drm_device *dev = crtc->dev;
716 struct drm_i915_private *dev_priv = dev->dev_private;
717 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
718 int pipe = intel_crtc->pipe;
719 int fp_reg = (pipe == 0) ? FPA0 : FPB0;
720 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
721 int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
722 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
723 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
724 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
725 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
726 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
727 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
728 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
729 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
730 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
731 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
732 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
733 int refclk;
734 intel_clock_t clock;
735 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
736 bool ok, is_sdvo = false, is_dvo = false;
737 bool is_crt = false, is_lvds = false, is_tv = false;
738 struct drm_mode_config *mode_config = &dev->mode_config;
739 struct drm_connector *connector;
740
741 drm_vblank_pre_modeset(dev, pipe);
742
743 list_for_each_entry(connector, &mode_config->connector_list, head) {
744 struct intel_output *intel_output = to_intel_output(connector);
745
746 if (!connector->encoder || connector->encoder->crtc != crtc)
747 continue;
748
749 switch (intel_output->type) {
750 case INTEL_OUTPUT_LVDS:
751 is_lvds = true;
752 break;
753 case INTEL_OUTPUT_SDVO:
754 is_sdvo = true;
755 break;
756 case INTEL_OUTPUT_DVO:
757 is_dvo = true;
758 break;
759 case INTEL_OUTPUT_TVOUT:
760 is_tv = true;
761 break;
762 case INTEL_OUTPUT_ANALOG:
763 is_crt = true;
764 break;
765 }
766 }
767
768 if (IS_I9XX(dev)) {
769 refclk = 96000;
770 } else {
771 refclk = 48000;
772 }
773
774 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
775 if (!ok) {
776 DRM_ERROR("Couldn't find PLL settings for mode!\n");
777 return;
778 }
779
780 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
781
782 dpll = DPLL_VGA_MODE_DIS;
783 if (IS_I9XX(dev)) {
784 if (is_lvds)
785 dpll |= DPLLB_MODE_LVDS;
786 else
787 dpll |= DPLLB_MODE_DAC_SERIAL;
788 if (is_sdvo) {
789 dpll |= DPLL_DVO_HIGH_SPEED;
790 if (IS_I945G(dev) || IS_I945GM(dev)) {
791 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
792 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
793 }
794 }
795
796 /* compute bitmask from p1 value */
797 dpll |= (1 << (clock.p1 - 1)) << 16;
798 switch (clock.p2) {
799 case 5:
800 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
801 break;
802 case 7:
803 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
804 break;
805 case 10:
806 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
807 break;
808 case 14:
809 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
810 break;
811 }
812 if (IS_I965G(dev))
813 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
814 } else {
815 if (is_lvds) {
816 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
817 } else {
818 if (clock.p1 == 2)
819 dpll |= PLL_P1_DIVIDE_BY_TWO;
820 else
821 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
822 if (clock.p2 == 4)
823 dpll |= PLL_P2_DIVIDE_BY_4;
824 }
825 }
826
827 if (is_tv) {
828 /* XXX: just matching BIOS for now */
829/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
830 dpll |= 3;
831 }
832 else
833 dpll |= PLL_REF_INPUT_DREFCLK;
834
835 /* setup pipeconf */
836 pipeconf = I915_READ(pipeconf_reg);
837
838 /* Set up the display plane register */
839 dspcntr = DISPPLANE_GAMMA_ENABLE;
840
841 if (pipe == 0)
842 dspcntr |= DISPPLANE_SEL_PIPE_A;
843 else
844 dspcntr |= DISPPLANE_SEL_PIPE_B;
845
846 if (pipe == 0 && !IS_I965G(dev)) {
847 /* Enable pixel doubling when the dot clock is > 90% of the (display)
848 * core speed.
849 *
850 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
851 * pipe == 0 check?
852 */
853 if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10)
854 pipeconf |= PIPEACONF_DOUBLE_WIDE;
855 else
856 pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
857 }
858
859 dspcntr |= DISPLAY_PLANE_ENABLE;
860 pipeconf |= PIPEACONF_ENABLE;
861 dpll |= DPLL_VCO_ENABLE;
862
863
864 /* Disable the panel fitter if it was on our pipe */
865 if (intel_panel_fitter_pipe(dev) == pipe)
866 I915_WRITE(PFIT_CONTROL, 0);
867
868 DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
869 drm_mode_debug_printmodeline(mode);
870
871
872 if (dpll & DPLL_VCO_ENABLE) {
873 I915_WRITE(fp_reg, fp);
874 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
875 I915_READ(dpll_reg);
876 udelay(150);
877 }
878
879 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
880 * This is an exception to the general rule that mode_set doesn't turn
881 * things on.
882 */
883 if (is_lvds) {
884 u32 lvds = I915_READ(LVDS);
885
886 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
887 /* Set the B0-B3 data pairs corresponding to whether we're going to
888 * set the DPLLs for dual-channel mode or not.
889 */
890 if (clock.p2 == 7)
891 lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
892 else
893 lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
894
895 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
896 * appropriately here, but we need to look more thoroughly into how
897 * panels behave in the two modes.
898 */
899
900 I915_WRITE(LVDS, lvds);
901 I915_READ(LVDS);
902 }
903
904 I915_WRITE(fp_reg, fp);
905 I915_WRITE(dpll_reg, dpll);
906 I915_READ(dpll_reg);
907 /* Wait for the clocks to stabilize. */
908 udelay(150);
909
910 if (IS_I965G(dev)) {
911 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
912 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
913 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
914 } else {
915 /* write it again -- the BIOS does, after all */
916 I915_WRITE(dpll_reg, dpll);
917 }
918 I915_READ(dpll_reg);
919 /* Wait for the clocks to stabilize. */
920 udelay(150);
921
922 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
923 ((adjusted_mode->crtc_htotal - 1) << 16));
924 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
925 ((adjusted_mode->crtc_hblank_end - 1) << 16));
926 I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
927 ((adjusted_mode->crtc_hsync_end - 1) << 16));
928 I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
929 ((adjusted_mode->crtc_vtotal - 1) << 16));
930 I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
931 ((adjusted_mode->crtc_vblank_end - 1) << 16));
932 I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
933 ((adjusted_mode->crtc_vsync_end - 1) << 16));
934 /* pipesrc and dspsize control the size that is scaled from, which should
935 * always be the user's requested size.
936 */
937 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
938 I915_WRITE(dsppos_reg, 0);
939 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
940 I915_WRITE(pipeconf_reg, pipeconf);
941 I915_READ(pipeconf_reg);
942
943 intel_wait_for_vblank(dev);
944
945 I915_WRITE(dspcntr_reg, dspcntr);
946
947 /* Flush the plane changes */
948 intel_pipe_set_base(crtc, x, y, old_fb);
949
950 drm_vblank_post_modeset(dev, pipe);
951}
952
953/** Loads the palette/gamma unit for the CRTC with the prepared values */
954void intel_crtc_load_lut(struct drm_crtc *crtc)
955{
956 struct drm_device *dev = crtc->dev;
957 struct drm_i915_private *dev_priv = dev->dev_private;
958 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
959 int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
960 int i;
961
962 /* The clocks have to be on to load the palette. */
963 if (!crtc->enabled)
964 return;
965
966 for (i = 0; i < 256; i++) {
967 I915_WRITE(palreg + 4 * i,
968 (intel_crtc->lut_r[i] << 16) |
969 (intel_crtc->lut_g[i] << 8) |
970 intel_crtc->lut_b[i]);
971 }
972}
973
974static int intel_crtc_cursor_set(struct drm_crtc *crtc,
975 struct drm_file *file_priv,
976 uint32_t handle,
977 uint32_t width, uint32_t height)
978{
979 struct drm_device *dev = crtc->dev;
980 struct drm_i915_private *dev_priv = dev->dev_private;
981 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
982 struct drm_gem_object *bo;
983 struct drm_i915_gem_object *obj_priv;
984 int pipe = intel_crtc->pipe;
985 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
986 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
987 uint32_t temp;
988 size_t addr;
989
990 DRM_DEBUG("\n");
991
992 /* if we want to turn off the cursor ignore width and height */
993 if (!handle) {
994 DRM_DEBUG("cursor off\n");
995 /* turn of the cursor */
996 temp = 0;
997 temp |= CURSOR_MODE_DISABLE;
998
999 I915_WRITE(control, temp);
1000 I915_WRITE(base, 0);
1001 return 0;
1002 }
1003
1004 /* Currently we only support 64x64 cursors */
1005 if (width != 64 || height != 64) {
1006 DRM_ERROR("we currently only support 64x64 cursors\n");
1007 return -EINVAL;
1008 }
1009
1010 bo = drm_gem_object_lookup(dev, file_priv, handle);
1011 if (!bo)
1012 return -ENOENT;
1013
1014 obj_priv = bo->driver_private;
1015
1016 if (bo->size < width * height * 4) {
1017 DRM_ERROR("buffer is to small\n");
1018 drm_gem_object_unreference(bo);
1019 return -ENOMEM;
1020 }
1021
1022 if (dev_priv->cursor_needs_physical) {
1023 addr = dev->agp->base + obj_priv->gtt_offset;
1024 } else {
1025 addr = obj_priv->gtt_offset;
1026 }
1027
1028 intel_crtc->cursor_addr = addr;
1029 temp = 0;
1030 /* set the pipe for the cursor */
1031 temp |= (pipe << 28);
1032 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
1033
1034 I915_WRITE(control, temp);
1035 I915_WRITE(base, addr);
1036
1037 return 0;
1038}
1039
1040static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1041{
1042 struct drm_device *dev = crtc->dev;
1043 struct drm_i915_private *dev_priv = dev->dev_private;
1044 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1045 int pipe = intel_crtc->pipe;
1046 uint32_t temp = 0;
1047 uint32_t adder;
1048
1049 if (x < 0) {
1050 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
1051 x = -x;
1052 }
1053 if (y < 0) {
1054 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
1055 y = -y;
1056 }
1057
1058 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
1059 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
1060
1061 adder = intel_crtc->cursor_addr;
1062 I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
1063 I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
1064
1065 return 0;
1066}
1067
1068/** Sets the color ramps on behalf of RandR */
1069void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
1070 u16 blue, int regno)
1071{
1072 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1073
1074 intel_crtc->lut_r[regno] = red >> 8;
1075 intel_crtc->lut_g[regno] = green >> 8;
1076 intel_crtc->lut_b[regno] = blue >> 8;
1077}
1078
1079static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
1080 u16 *blue, uint32_t size)
1081{
1082 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1083 int i;
1084
1085 if (size != 256)
1086 return;
1087
1088 for (i = 0; i < 256; i++) {
1089 intel_crtc->lut_r[i] = red[i] >> 8;
1090 intel_crtc->lut_g[i] = green[i] >> 8;
1091 intel_crtc->lut_b[i] = blue[i] >> 8;
1092 }
1093
1094 intel_crtc_load_lut(crtc);
1095}
1096
1097/**
1098 * Get a pipe with a simple mode set on it for doing load-based monitor
1099 * detection.
1100 *
1101 * It will be up to the load-detect code to adjust the pipe as appropriate for
1102 * its requirements. The pipe will be connected to no other outputs.
1103 *
1104 * Currently this code will only succeed if there is a pipe with no outputs
1105 * configured for it. In the future, it could choose to temporarily disable
1106 * some outputs to free up a pipe for its use.
1107 *
1108 * \return crtc, or NULL if no pipes are available.
1109 */
1110
1111/* VESA 640x480x72Hz mode to set on the pipe */
1112static struct drm_display_mode load_detect_mode = {
1113 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
1114 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
1115};
1116
1117struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
1118 struct drm_display_mode *mode,
1119 int *dpms_mode)
1120{
1121 struct intel_crtc *intel_crtc;
1122 struct drm_crtc *possible_crtc;
1123 struct drm_crtc *supported_crtc =NULL;
1124 struct drm_encoder *encoder = &intel_output->enc;
1125 struct drm_crtc *crtc = NULL;
1126 struct drm_device *dev = encoder->dev;
1127 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
1128 struct drm_crtc_helper_funcs *crtc_funcs;
1129 int i = -1;
1130
1131 /*
1132 * Algorithm gets a little messy:
1133 * - if the connector already has an assigned crtc, use it (but make
1134 * sure it's on first)
1135 * - try to find the first unused crtc that can drive this connector,
1136 * and use that if we find one
1137 * - if there are no unused crtcs available, try to use the first
1138 * one we found that supports the connector
1139 */
1140
1141 /* See if we already have a CRTC for this connector */
1142 if (encoder->crtc) {
1143 crtc = encoder->crtc;
1144 /* Make sure the crtc and connector are running */
1145 intel_crtc = to_intel_crtc(crtc);
1146 *dpms_mode = intel_crtc->dpms_mode;
1147 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
1148 crtc_funcs = crtc->helper_private;
1149 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
1150 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
1151 }
1152 return crtc;
1153 }
1154
1155 /* Find an unused one (if possible) */
1156 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
1157 i++;
1158 if (!(encoder->possible_crtcs & (1 << i)))
1159 continue;
1160 if (!possible_crtc->enabled) {
1161 crtc = possible_crtc;
1162 break;
1163 }
1164 if (!supported_crtc)
1165 supported_crtc = possible_crtc;
1166 }
1167
1168 /*
1169 * If we didn't find an unused CRTC, don't use any.
1170 */
1171 if (!crtc) {
1172 return NULL;
1173 }
1174
1175 encoder->crtc = crtc;
1176 intel_output->load_detect_temp = true;
1177
1178 intel_crtc = to_intel_crtc(crtc);
1179 *dpms_mode = intel_crtc->dpms_mode;
1180
1181 if (!crtc->enabled) {
1182 if (!mode)
1183 mode = &load_detect_mode;
1184 drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb);
1185 } else {
1186 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
1187 crtc_funcs = crtc->helper_private;
1188 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
1189 }
1190
1191 /* Add this connector to the crtc */
1192 encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode);
1193 encoder_funcs->commit(encoder);
1194 }
1195 /* let the connector get through one full cycle before testing */
1196 intel_wait_for_vblank(dev);
1197
1198 return crtc;
1199}
1200
1201void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode)
1202{
1203 struct drm_encoder *encoder = &intel_output->enc;
1204 struct drm_device *dev = encoder->dev;
1205 struct drm_crtc *crtc = encoder->crtc;
1206 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
1207 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1208
1209 if (intel_output->load_detect_temp) {
1210 encoder->crtc = NULL;
1211 intel_output->load_detect_temp = false;
1212 crtc->enabled = drm_helper_crtc_in_use(crtc);
1213 drm_helper_disable_unused_functions(dev);
1214 }
1215
1216 /* Switch crtc and output back off if necessary */
1217 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
1218 if (encoder->crtc == crtc)
1219 encoder_funcs->dpms(encoder, dpms_mode);
1220 crtc_funcs->dpms(crtc, dpms_mode);
1221 }
1222}
1223
1224/* Returns the clock of the currently programmed mode of the given pipe. */
1225static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1226{
1227 struct drm_i915_private *dev_priv = dev->dev_private;
1228 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1229 int pipe = intel_crtc->pipe;
1230 u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
1231 u32 fp;
1232 intel_clock_t clock;
1233
1234 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
1235 fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
1236 else
1237 fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
1238
1239 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
1240 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
1241 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
1242 if (IS_I9XX(dev)) {
1243 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
1244 DPLL_FPA01_P1_POST_DIV_SHIFT);
1245
1246 switch (dpll & DPLL_MODE_MASK) {
1247 case DPLLB_MODE_DAC_SERIAL:
1248 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
1249 5 : 10;
1250 break;
1251 case DPLLB_MODE_LVDS:
1252 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
1253 7 : 14;
1254 break;
1255 default:
1256 DRM_DEBUG("Unknown DPLL mode %08x in programmed "
1257 "mode\n", (int)(dpll & DPLL_MODE_MASK));
1258 return 0;
1259 }
1260
1261 /* XXX: Handle the 100Mhz refclk */
1262 i9xx_clock(96000, &clock);
1263 } else {
1264 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
1265
1266 if (is_lvds) {
1267 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
1268 DPLL_FPA01_P1_POST_DIV_SHIFT);
1269 clock.p2 = 14;
1270
1271 if ((dpll & PLL_REF_INPUT_MASK) ==
1272 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1273 /* XXX: might not be 66MHz */
1274 i8xx_clock(66000, &clock);
1275 } else
1276 i8xx_clock(48000, &clock);
1277 } else {
1278 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1279 clock.p1 = 2;
1280 else {
1281 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
1282 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
1283 }
1284 if (dpll & PLL_P2_DIVIDE_BY_4)
1285 clock.p2 = 4;
1286 else
1287 clock.p2 = 2;
1288
1289 i8xx_clock(48000, &clock);
1290 }
1291 }
1292
1293 /* XXX: It would be nice to validate the clocks, but we can't reuse
1294 * i830PllIsValid() because it relies on the xf86_config connector
1295 * configuration being accurate, which it isn't necessarily.
1296 */
1297
1298 return clock.dot;
1299}
1300
1301/** Returns the currently programmed mode of the given pipe. */
1302struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
1303 struct drm_crtc *crtc)
1304{
1305 struct drm_i915_private *dev_priv = dev->dev_private;
1306 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1307 int pipe = intel_crtc->pipe;
1308 struct drm_display_mode *mode;
1309 int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
1310 int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
1311 int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
1312 int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
1313
1314 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
1315 if (!mode)
1316 return NULL;
1317
1318 mode->clock = intel_crtc_clock_get(dev, crtc);
1319 mode->hdisplay = (htot & 0xffff) + 1;
1320 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
1321 mode->hsync_start = (hsync & 0xffff) + 1;
1322 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
1323 mode->vdisplay = (vtot & 0xffff) + 1;
1324 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
1325 mode->vsync_start = (vsync & 0xffff) + 1;
1326 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
1327
1328 drm_mode_set_name(mode);
1329 drm_mode_set_crtcinfo(mode, 0);
1330
1331 return mode;
1332}
1333
1334static void intel_crtc_destroy(struct drm_crtc *crtc)
1335{
1336 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1337
1338 drm_crtc_cleanup(crtc);
1339 kfree(intel_crtc);
1340}
1341
1342static const struct drm_crtc_helper_funcs intel_helper_funcs = {
1343 .dpms = intel_crtc_dpms,
1344 .mode_fixup = intel_crtc_mode_fixup,
1345 .mode_set = intel_crtc_mode_set,
1346 .mode_set_base = intel_pipe_set_base,
1347 .prepare = intel_crtc_prepare,
1348 .commit = intel_crtc_commit,
1349};
1350
1351static const struct drm_crtc_funcs intel_crtc_funcs = {
1352 .cursor_set = intel_crtc_cursor_set,
1353 .cursor_move = intel_crtc_cursor_move,
1354 .gamma_set = intel_crtc_gamma_set,
1355 .set_config = drm_crtc_helper_set_config,
1356 .destroy = intel_crtc_destroy,
1357};
1358
1359
1360static void intel_crtc_init(struct drm_device *dev, int pipe)
1361{
1362 struct intel_crtc *intel_crtc;
1363 int i;
1364
1365 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
1366 if (intel_crtc == NULL)
1367 return;
1368
1369 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
1370
1371 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
1372 intel_crtc->pipe = pipe;
1373 for (i = 0; i < 256; i++) {
1374 intel_crtc->lut_r[i] = i;
1375 intel_crtc->lut_g[i] = i;
1376 intel_crtc->lut_b[i] = i;
1377 }
1378
1379 intel_crtc->cursor_addr = 0;
1380 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
1381 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
1382
1383 intel_crtc->mode_set.crtc = &intel_crtc->base;
1384 intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1);
1385 intel_crtc->mode_set.num_connectors = 0;
1386
1387 if (i915_fbpercrtc) {
1388
1389
1390
1391 }
1392}
1393
1394struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
1395{
1396 struct drm_crtc *crtc = NULL;
1397
1398 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1399 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1400 if (intel_crtc->pipe == pipe)
1401 break;
1402 }
1403 return crtc;
1404}
1405
1406static int intel_connector_clones(struct drm_device *dev, int type_mask)
1407{
1408 int index_mask = 0;
1409 struct drm_connector *connector;
1410 int entry = 0;
1411
1412 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1413 struct intel_output *intel_output = to_intel_output(connector);
1414 if (type_mask & (1 << intel_output->type))
1415 index_mask |= (1 << entry);
1416 entry++;
1417 }
1418 return index_mask;
1419}
1420
1421
1422static void intel_setup_outputs(struct drm_device *dev)
1423{
1424 struct drm_connector *connector;
1425
1426 intel_crt_init(dev);
1427
1428 /* Set up integrated LVDS */
1429 if (IS_MOBILE(dev) && !IS_I830(dev))
1430 intel_lvds_init(dev);
1431
1432 if (IS_I9XX(dev)) {
1433 intel_sdvo_init(dev, SDVOB);
1434 intel_sdvo_init(dev, SDVOC);
1435 } else
1436 intel_dvo_init(dev);
1437
1438 if (IS_I9XX(dev) && !IS_I915G(dev))
1439 intel_tv_init(dev);
1440
1441 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1442 struct intel_output *intel_output = to_intel_output(connector);
1443 struct drm_encoder *encoder = &intel_output->enc;
1444 int crtc_mask = 0, clone_mask = 0;
1445
1446 /* valid crtcs */
1447 switch(intel_output->type) {
1448 case INTEL_OUTPUT_DVO:
1449 case INTEL_OUTPUT_SDVO:
1450 crtc_mask = ((1 << 0)|
1451 (1 << 1));
1452 clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
1453 (1 << INTEL_OUTPUT_DVO) |
1454 (1 << INTEL_OUTPUT_SDVO));
1455 break;
1456 case INTEL_OUTPUT_ANALOG:
1457 crtc_mask = ((1 << 0)|
1458 (1 << 1));
1459 clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
1460 (1 << INTEL_OUTPUT_DVO) |
1461 (1 << INTEL_OUTPUT_SDVO));
1462 break;
1463 case INTEL_OUTPUT_LVDS:
1464 crtc_mask = (1 << 1);
1465 clone_mask = (1 << INTEL_OUTPUT_LVDS);
1466 break;
1467 case INTEL_OUTPUT_TVOUT:
1468 crtc_mask = ((1 << 0) |
1469 (1 << 1));
1470 clone_mask = (1 << INTEL_OUTPUT_TVOUT);
1471 break;
1472 }
1473 encoder->possible_crtcs = crtc_mask;
1474 encoder->possible_clones = intel_connector_clones(dev, clone_mask);
1475 }
1476}
1477
1478static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
1479{
1480 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1481 struct drm_device *dev = fb->dev;
1482
1483 if (fb->fbdev)
1484 intelfb_remove(dev, fb);
1485
1486 drm_framebuffer_cleanup(fb);
1487 mutex_lock(&dev->struct_mutex);
1488 drm_gem_object_unreference(intel_fb->obj);
1489 mutex_unlock(&dev->struct_mutex);
1490
1491 kfree(intel_fb);
1492}
1493
1494static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
1495 struct drm_file *file_priv,
1496 unsigned int *handle)
1497{
1498 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1499 struct drm_gem_object *object = intel_fb->obj;
1500
1501 return drm_gem_handle_create(file_priv, object, handle);
1502}
1503
1504static const struct drm_framebuffer_funcs intel_fb_funcs = {
1505 .destroy = intel_user_framebuffer_destroy,
1506 .create_handle = intel_user_framebuffer_create_handle,
1507};
1508
1509int intel_framebuffer_create(struct drm_device *dev,
1510 struct drm_mode_fb_cmd *mode_cmd,
1511 struct drm_framebuffer **fb,
1512 struct drm_gem_object *obj)
1513{
1514 struct intel_framebuffer *intel_fb;
1515 int ret;
1516
1517 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1518 if (!intel_fb)
1519 return -ENOMEM;
1520
1521 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
1522 if (ret) {
1523 DRM_ERROR("framebuffer init failed %d\n", ret);
1524 return ret;
1525 }
1526
1527 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
1528
1529 intel_fb->obj = obj;
1530
1531 *fb = &intel_fb->base;
1532
1533 return 0;
1534}
1535
1536
1537static struct drm_framebuffer *
1538intel_user_framebuffer_create(struct drm_device *dev,
1539 struct drm_file *filp,
1540 struct drm_mode_fb_cmd *mode_cmd)
1541{
1542 struct drm_gem_object *obj;
1543 struct drm_framebuffer *fb;
1544 int ret;
1545
1546 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
1547 if (!obj)
1548 return NULL;
1549
1550 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
1551 if (ret) {
1552 drm_gem_object_unreference(obj);
1553 return NULL;
1554 }
1555
1556 return fb;
1557}
1558
1559static const struct drm_mode_config_funcs intel_mode_funcs = {
1560 .fb_create = intel_user_framebuffer_create,
1561 .fb_changed = intelfb_probe,
1562};
1563
1564void intel_modeset_init(struct drm_device *dev)
1565{
1566 int num_pipe;
1567 int i;
1568
1569 drm_mode_config_init(dev);
1570
1571 dev->mode_config.min_width = 0;
1572 dev->mode_config.min_height = 0;
1573
1574 dev->mode_config.funcs = (void *)&intel_mode_funcs;
1575
1576 if (IS_I965G(dev)) {
1577 dev->mode_config.max_width = 8192;
1578 dev->mode_config.max_height = 8192;
1579 } else {
1580 dev->mode_config.max_width = 2048;
1581 dev->mode_config.max_height = 2048;
1582 }
1583
1584 /* set memory base */
1585 if (IS_I9XX(dev))
1586 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
1587 else
1588 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
1589
1590 if (IS_MOBILE(dev) || IS_I9XX(dev))
1591 num_pipe = 2;
1592 else
1593 num_pipe = 1;
1594 DRM_DEBUG("%d display pipe%s available.\n",
1595 num_pipe, num_pipe > 1 ? "s" : "");
1596
1597 for (i = 0; i < num_pipe; i++) {
1598 intel_crtc_init(dev, i);
1599 }
1600
1601 intel_setup_outputs(dev);
1602}
1603
1604void intel_modeset_cleanup(struct drm_device *dev)
1605{
1606 drm_mode_config_cleanup(dev);
1607}
1608
1609
1610/* current intel driver doesn't take advantage of encoders
1611 always give back the encoder for the connector
1612*/
1613struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
1614{
1615 struct intel_output *intel_output = to_intel_output(connector);
1616
1617 return &intel_output->enc;
1618}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
new file mode 100644
index 000000000000..407edd5bf582
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -0,0 +1,146 @@
1/*
2 * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
3 * Copyright (c) 2007-2008 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 */
25#ifndef __INTEL_DRV_H__
26#define __INTEL_DRV_H__
27
28#include <linux/i2c.h>
29#include <linux/i2c-id.h>
30#include <linux/i2c-algo-bit.h>
31#include "drm_crtc.h"
32
33#include "drm_crtc_helper.h"
34/*
35 * Display related stuff
36 */
37
38/* store information about an Ixxx DVO */
39/* The i830->i865 use multiple DVOs with multiple i2cs */
40/* the i915, i945 have a single sDVO i2c bus - which is different */
41#define MAX_OUTPUTS 6
42/* maximum connectors per crtcs in the mode set */
43#define INTELFB_CONN_LIMIT 4
44
45#define INTEL_I2C_BUS_DVO 1
46#define INTEL_I2C_BUS_SDVO 2
47
48/* these are outputs from the chip - integrated only
49 external chips are via DVO or SDVO output */
50#define INTEL_OUTPUT_UNUSED 0
51#define INTEL_OUTPUT_ANALOG 1
52#define INTEL_OUTPUT_DVO 2
53#define INTEL_OUTPUT_SDVO 3
54#define INTEL_OUTPUT_LVDS 4
55#define INTEL_OUTPUT_TVOUT 5
56
57#define INTEL_DVO_CHIP_NONE 0
58#define INTEL_DVO_CHIP_LVDS 1
59#define INTEL_DVO_CHIP_TMDS 2
60#define INTEL_DVO_CHIP_TVOUT 4
61
62struct intel_i2c_chan {
63 struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
64 u32 reg; /* GPIO reg */
65 struct i2c_adapter adapter;
66 struct i2c_algo_bit_data algo;
67 u8 slave_addr;
68};
69
70struct intel_framebuffer {
71 struct drm_framebuffer base;
72 struct drm_gem_object *obj;
73};
74
75
76struct intel_output {
77 struct drm_connector base;
78
79 struct drm_encoder enc;
80 int type;
81 struct intel_i2c_chan *i2c_bus; /* for control functions */
82 struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
83 bool load_detect_temp;
84 void *dev_priv;
85};
86
87struct intel_crtc {
88 struct drm_crtc base;
89 int pipe;
90 int plane;
91 uint32_t cursor_addr;
92 u8 lut_r[256], lut_g[256], lut_b[256];
93 int dpms_mode;
94 struct intel_framebuffer *fbdev_fb;
95 /* a mode_set for fbdev users on this crtc */
96 struct drm_mode_set mode_set;
97};
98
99#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
100#define to_intel_output(x) container_of(x, struct intel_output, base)
101#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
102#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
103
104struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
105 const char *name);
106void intel_i2c_destroy(struct intel_i2c_chan *chan);
107int intel_ddc_get_modes(struct intel_output *intel_output);
108extern bool intel_ddc_probe(struct intel_output *intel_output);
109
110extern void intel_crt_init(struct drm_device *dev);
111extern void intel_sdvo_init(struct drm_device *dev, int output_device);
112extern void intel_dvo_init(struct drm_device *dev);
113extern void intel_tv_init(struct drm_device *dev);
114extern void intel_lvds_init(struct drm_device *dev);
115
116extern void intel_crtc_load_lut(struct drm_crtc *crtc);
117extern void intel_encoder_prepare (struct drm_encoder *encoder);
118extern void intel_encoder_commit (struct drm_encoder *encoder);
119
120extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
121
122extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
123 struct drm_crtc *crtc);
124extern void intel_wait_for_vblank(struct drm_device *dev);
125extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
126extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
127 struct drm_display_mode *mode,
128 int *dpms_mode);
129extern void intel_release_load_detect_pipe(struct intel_output *intel_output,
130 int dpms_mode);
131
132extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
133extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
134extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
135extern int intelfb_probe(struct drm_device *dev);
136extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
137extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
138extern void intelfb_restore(void);
139extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
140 u16 blue, int regno);
141
142extern int intel_framebuffer_create(struct drm_device *dev,
143 struct drm_mode_fb_cmd *mode_cmd,
144 struct drm_framebuffer **fb,
145 struct drm_gem_object *obj);
146#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
new file mode 100644
index 000000000000..8b8d6e65cd3f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -0,0 +1,495 @@
1/*
2 * Copyright 2006 Dave Airlie <airlied@linux.ie>
3 * Copyright © 2006-2007 Intel Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Eric Anholt <eric@anholt.net>
26 */
27#include <linux/i2c.h>
28#include "drmP.h"
29#include "drm.h"
30#include "drm_crtc.h"
31#include "intel_drv.h"
32#include "i915_drm.h"
33#include "i915_drv.h"
34#include "dvo.h"
35
36#define SIL164_ADDR 0x38
37#define CH7xxx_ADDR 0x76
38#define TFP410_ADDR 0x38
39
40static struct intel_dvo_device intel_dvo_devices[] = {
41 {
42 .type = INTEL_DVO_CHIP_TMDS,
43 .name = "sil164",
44 .dvo_reg = DVOC,
45 .slave_addr = SIL164_ADDR,
46 .dev_ops = &sil164_ops,
47 },
48 {
49 .type = INTEL_DVO_CHIP_TMDS,
50 .name = "ch7xxx",
51 .dvo_reg = DVOC,
52 .slave_addr = CH7xxx_ADDR,
53 .dev_ops = &ch7xxx_ops,
54 },
55 {
56 .type = INTEL_DVO_CHIP_LVDS,
57 .name = "ivch",
58 .dvo_reg = DVOA,
59 .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
60 .dev_ops = &ivch_ops,
61 },
62 {
63 .type = INTEL_DVO_CHIP_TMDS,
64 .name = "tfp410",
65 .dvo_reg = DVOC,
66 .slave_addr = TFP410_ADDR,
67 .dev_ops = &tfp410_ops,
68 },
69 {
70 .type = INTEL_DVO_CHIP_LVDS,
71 .name = "ch7017",
72 .dvo_reg = DVOC,
73 .slave_addr = 0x75,
74 .gpio = GPIOE,
75 .dev_ops = &ch7017_ops,
76 }
77};
78
79static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
80{
81 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
82 struct intel_output *intel_output = enc_to_intel_output(encoder);
83 struct intel_dvo_device *dvo = intel_output->dev_priv;
84 u32 dvo_reg = dvo->dvo_reg;
85 u32 temp = I915_READ(dvo_reg);
86
87 if (mode == DRM_MODE_DPMS_ON) {
88 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
89 I915_READ(dvo_reg);
90 dvo->dev_ops->dpms(dvo, mode);
91 } else {
92 dvo->dev_ops->dpms(dvo, mode);
93 I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
94 I915_READ(dvo_reg);
95 }
96}
97
98static void intel_dvo_save(struct drm_connector *connector)
99{
100 struct drm_i915_private *dev_priv = connector->dev->dev_private;
101 struct intel_output *intel_output = to_intel_output(connector);
102 struct intel_dvo_device *dvo = intel_output->dev_priv;
103
104 /* Each output should probably just save the registers it touches,
105 * but for now, use more overkill.
106 */
107 dev_priv->saveDVOA = I915_READ(DVOA);
108 dev_priv->saveDVOB = I915_READ(DVOB);
109 dev_priv->saveDVOC = I915_READ(DVOC);
110
111 dvo->dev_ops->save(dvo);
112}
113
114static void intel_dvo_restore(struct drm_connector *connector)
115{
116 struct drm_i915_private *dev_priv = connector->dev->dev_private;
117 struct intel_output *intel_output = to_intel_output(connector);
118 struct intel_dvo_device *dvo = intel_output->dev_priv;
119
120 dvo->dev_ops->restore(dvo);
121
122 I915_WRITE(DVOA, dev_priv->saveDVOA);
123 I915_WRITE(DVOB, dev_priv->saveDVOB);
124 I915_WRITE(DVOC, dev_priv->saveDVOC);
125}
126
127static int intel_dvo_mode_valid(struct drm_connector *connector,
128 struct drm_display_mode *mode)
129{
130 struct intel_output *intel_output = to_intel_output(connector);
131 struct intel_dvo_device *dvo = intel_output->dev_priv;
132
133 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
134 return MODE_NO_DBLESCAN;
135
136 /* XXX: Validate clock range */
137
138 if (dvo->panel_fixed_mode) {
139 if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay)
140 return MODE_PANEL;
141 if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay)
142 return MODE_PANEL;
143 }
144
145 return dvo->dev_ops->mode_valid(dvo, mode);
146}
147
148static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
149 struct drm_display_mode *mode,
150 struct drm_display_mode *adjusted_mode)
151{
152 struct intel_output *intel_output = enc_to_intel_output(encoder);
153 struct intel_dvo_device *dvo = intel_output->dev_priv;
154
155 /* If we have timings from the BIOS for the panel, put them in
156 * to the adjusted mode. The CRTC will be set up for this mode,
157 * with the panel scaling set up to source from the H/VDisplay
158 * of the original mode.
159 */
160 if (dvo->panel_fixed_mode != NULL) {
161#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x
162 C(hdisplay);
163 C(hsync_start);
164 C(hsync_end);
165 C(htotal);
166 C(vdisplay);
167 C(vsync_start);
168 C(vsync_end);
169 C(vtotal);
170 C(clock);
171 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
172#undef C
173 }
174
175 if (dvo->dev_ops->mode_fixup)
176 return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode);
177
178 return true;
179}
180
181static void intel_dvo_mode_set(struct drm_encoder *encoder,
182 struct drm_display_mode *mode,
183 struct drm_display_mode *adjusted_mode)
184{
185 struct drm_device *dev = encoder->dev;
186 struct drm_i915_private *dev_priv = dev->dev_private;
187 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
188 struct intel_output *intel_output = enc_to_intel_output(encoder);
189 struct intel_dvo_device *dvo = intel_output->dev_priv;
190 int pipe = intel_crtc->pipe;
191 u32 dvo_val;
192 u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
193 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
194
195 switch (dvo_reg) {
196 case DVOA:
197 default:
198 dvo_srcdim_reg = DVOA_SRCDIM;
199 break;
200 case DVOB:
201 dvo_srcdim_reg = DVOB_SRCDIM;
202 break;
203 case DVOC:
204 dvo_srcdim_reg = DVOC_SRCDIM;
205 break;
206 }
207
208 dvo->dev_ops->mode_set(dvo, mode, adjusted_mode);
209
210 /* Save the data order, since I don't know what it should be set to. */
211 dvo_val = I915_READ(dvo_reg) &
212 (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
213 dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
214 DVO_BLANK_ACTIVE_HIGH;
215
216 if (pipe == 1)
217 dvo_val |= DVO_PIPE_B_SELECT;
218 dvo_val |= DVO_PIPE_STALL;
219 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
220 dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
221 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
222 dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
223
224 I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
225
226 /*I915_WRITE(DVOB_SRCDIM,
227 (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
228 (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
229 I915_WRITE(dvo_srcdim_reg,
230 (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
231 (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
232 /*I915_WRITE(DVOB, dvo_val);*/
233 I915_WRITE(dvo_reg, dvo_val);
234}
235
236/**
237 * Detect the output connection on our DVO device.
238 *
239 * Unimplemented.
240 */
241static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
242{
243 struct intel_output *intel_output = to_intel_output(connector);
244 struct intel_dvo_device *dvo = intel_output->dev_priv;
245
246 return dvo->dev_ops->detect(dvo);
247}
248
249static int intel_dvo_get_modes(struct drm_connector *connector)
250{
251 struct intel_output *intel_output = to_intel_output(connector);
252 struct intel_dvo_device *dvo = intel_output->dev_priv;
253
254 /* We should probably have an i2c driver get_modes function for those
255 * devices which will have a fixed set of modes determined by the chip
256 * (TV-out, for example), but for now with just TMDS and LVDS,
257 * that's not the case.
258 */
259 intel_ddc_get_modes(intel_output);
260 if (!list_empty(&connector->probed_modes))
261 return 1;
262
263
264 if (dvo->panel_fixed_mode != NULL) {
265 struct drm_display_mode *mode;
266 mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode);
267 if (mode) {
268 drm_mode_probed_add(connector, mode);
269 return 1;
270 }
271 }
272 return 0;
273}
274
275static void intel_dvo_destroy (struct drm_connector *connector)
276{
277 struct intel_output *intel_output = to_intel_output(connector);
278 struct intel_dvo_device *dvo = intel_output->dev_priv;
279
280 if (dvo) {
281 if (dvo->dev_ops->destroy)
282 dvo->dev_ops->destroy(dvo);
283 if (dvo->panel_fixed_mode)
284 kfree(dvo->panel_fixed_mode);
285 /* no need, in i830_dvoices[] now */
286 //kfree(dvo);
287 }
288 if (intel_output->i2c_bus)
289 intel_i2c_destroy(intel_output->i2c_bus);
290 if (intel_output->ddc_bus)
291 intel_i2c_destroy(intel_output->ddc_bus);
292 drm_sysfs_connector_remove(connector);
293 drm_connector_cleanup(connector);
294 kfree(intel_output);
295}
296
297#ifdef RANDR_GET_CRTC_INTERFACE
298static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
299{
300 struct drm_device *dev = connector->dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 struct intel_output *intel_output = to_intel_output(connector);
303 struct intel_dvo_device *dvo = intel_output->dev_priv;
304 int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
305
306 return intel_pipe_to_crtc(pScrn, pipe);
307}
308#endif
309
310static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
311 .dpms = intel_dvo_dpms,
312 .mode_fixup = intel_dvo_mode_fixup,
313 .prepare = intel_encoder_prepare,
314 .mode_set = intel_dvo_mode_set,
315 .commit = intel_encoder_commit,
316};
317
318static const struct drm_connector_funcs intel_dvo_connector_funcs = {
319 .save = intel_dvo_save,
320 .restore = intel_dvo_restore,
321 .detect = intel_dvo_detect,
322 .destroy = intel_dvo_destroy,
323 .fill_modes = drm_helper_probe_single_connector_modes,
324};
325
326static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
327 .mode_valid = intel_dvo_mode_valid,
328 .get_modes = intel_dvo_get_modes,
329 .best_encoder = intel_best_encoder,
330};
331
332static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
333{
334 drm_encoder_cleanup(encoder);
335}
336
337static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
338 .destroy = intel_dvo_enc_destroy,
339};
340
341
342/**
343 * Attempts to get a fixed panel timing for LVDS (currently only the i830).
344 *
345 * Other chips with DVO LVDS will need to extend this to deal with the LVDS
346 * chip being on DVOB/C and having multiple pipes.
347 */
348static struct drm_display_mode *
349intel_dvo_get_current_mode (struct drm_connector *connector)
350{
351 struct drm_device *dev = connector->dev;
352 struct drm_i915_private *dev_priv = dev->dev_private;
353 struct intel_output *intel_output = to_intel_output(connector);
354 struct intel_dvo_device *dvo = intel_output->dev_priv;
355 uint32_t dvo_reg = dvo->dvo_reg;
356 uint32_t dvo_val = I915_READ(dvo_reg);
357 struct drm_display_mode *mode = NULL;
358
359 /* If the DVO port is active, that'll be the LVDS, so we can pull out
360 * its timings to get how the BIOS set up the panel.
361 */
362 if (dvo_val & DVO_ENABLE) {
363 struct drm_crtc *crtc;
364 int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
365
366 crtc = intel_get_crtc_from_pipe(dev, pipe);
367 if (crtc) {
368 mode = intel_crtc_mode_get(dev, crtc);
369
370 if (mode) {
371 mode->type |= DRM_MODE_TYPE_PREFERRED;
372 if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
373 mode->flags |= DRM_MODE_FLAG_PHSYNC;
374 if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
375 mode->flags |= DRM_MODE_FLAG_PVSYNC;
376 }
377 }
378 }
379 return mode;
380}
381
382void intel_dvo_init(struct drm_device *dev)
383{
384 struct intel_output *intel_output;
385 struct intel_dvo_device *dvo;
386 struct intel_i2c_chan *i2cbus = NULL;
387 int ret = 0;
388 int i;
389 int gpio_inited = 0;
390 int encoder_type = DRM_MODE_ENCODER_NONE;
391 intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
392 if (!intel_output)
393 return;
394
395 /* Set up the DDC bus */
396 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
397 if (!intel_output->ddc_bus)
398 goto free_intel;
399
400 /* Now, try to find a controller */
401 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
402 struct drm_connector *connector = &intel_output->base;
403 int gpio;
404
405 dvo = &intel_dvo_devices[i];
406
407 /* Allow the I2C driver info to specify the GPIO to be used in
408 * special cases, but otherwise default to what's defined
409 * in the spec.
410 */
411 if (dvo->gpio != 0)
412 gpio = dvo->gpio;
413 else if (dvo->type == INTEL_DVO_CHIP_LVDS)
414 gpio = GPIOB;
415 else
416 gpio = GPIOE;
417
418 /* Set up the I2C bus necessary for the chip we're probing.
419 * It appears that everything is on GPIOE except for panels
420 * on i830 laptops, which are on GPIOB (DVOA).
421 */
422 if (gpio_inited != gpio) {
423 if (i2cbus != NULL)
424 intel_i2c_destroy(i2cbus);
425 if (!(i2cbus = intel_i2c_create(dev, gpio,
426 gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
427 continue;
428 }
429 gpio_inited = gpio;
430 }
431
432 if (dvo->dev_ops!= NULL)
433 ret = dvo->dev_ops->init(dvo, i2cbus);
434 else
435 ret = false;
436
437 if (!ret)
438 continue;
439
440 intel_output->type = INTEL_OUTPUT_DVO;
441 switch (dvo->type) {
442 case INTEL_DVO_CHIP_TMDS:
443 drm_connector_init(dev, connector,
444 &intel_dvo_connector_funcs,
445 DRM_MODE_CONNECTOR_DVII);
446 encoder_type = DRM_MODE_ENCODER_TMDS;
447 break;
448 case INTEL_DVO_CHIP_LVDS:
449 drm_connector_init(dev, connector,
450 &intel_dvo_connector_funcs,
451 DRM_MODE_CONNECTOR_LVDS);
452 encoder_type = DRM_MODE_ENCODER_LVDS;
453 break;
454 }
455
456 drm_connector_helper_add(connector,
457 &intel_dvo_connector_helper_funcs);
458 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
459 connector->interlace_allowed = false;
460 connector->doublescan_allowed = false;
461
462 intel_output->dev_priv = dvo;
463 intel_output->i2c_bus = i2cbus;
464
465 drm_encoder_init(dev, &intel_output->enc,
466 &intel_dvo_enc_funcs, encoder_type);
467 drm_encoder_helper_add(&intel_output->enc,
468 &intel_dvo_helper_funcs);
469
470 drm_mode_connector_attach_encoder(&intel_output->base,
471 &intel_output->enc);
472 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
473 /* For our LVDS chipsets, we should hopefully be able
474 * to dig the fixed panel mode out of the BIOS data.
475 * However, it's in a different format from the BIOS
476 * data on chipsets with integrated LVDS (stored in AIM
477 * headers, likely), so for now, just get the current
478 * mode being output through DVO.
479 */
480 dvo->panel_fixed_mode =
481 intel_dvo_get_current_mode(connector);
482 dvo->panel_wants_dither = true;
483 }
484
485 drm_sysfs_connector_add(connector);
486 return;
487 }
488
489 intel_i2c_destroy(intel_output->ddc_bus);
490 /* Didn't find a chip, so tear down. */
491 if (i2cbus != NULL)
492 intel_i2c_destroy(i2cbus);
493free_intel:
494 kfree(intel_output);
495}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
new file mode 100644
index 000000000000..afd1217b8a02
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -0,0 +1,925 @@
1/*
2 * Copyright © 2007 David Airlie
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * David Airlie
25 */
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/errno.h>
30#include <linux/string.h>
31#include <linux/mm.h>
32#include <linux/tty.h>
33#include <linux/slab.h>
34#include <linux/sysrq.h>
35#include <linux/delay.h>
36#include <linux/fb.h>
37#include <linux/init.h>
38
39#include "drmP.h"
40#include "drm.h"
41#include "drm_crtc.h"
42#include "intel_drv.h"
43#include "i915_drm.h"
44#include "i915_drv.h"
45
46struct intelfb_par {
47 struct drm_device *dev;
48 struct drm_display_mode *our_mode;
49 struct intel_framebuffer *intel_fb;
50 int crtc_count;
51 /* crtc currently bound to this */
52 uint32_t crtc_ids[2];
53};
54
55static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green,
56 unsigned blue, unsigned transp,
57 struct fb_info *info)
58{
59 struct intelfb_par *par = info->par;
60 struct drm_device *dev = par->dev;
61 struct drm_crtc *crtc;
62 int i;
63
64 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
65 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
66 struct drm_mode_set *modeset = &intel_crtc->mode_set;
67 struct drm_framebuffer *fb = modeset->fb;
68
69 for (i = 0; i < par->crtc_count; i++)
70 if (crtc->base.id == par->crtc_ids[i])
71 break;
72
73 if (i == par->crtc_count)
74 continue;
75
76
77 if (regno > 255)
78 return 1;
79
80 if (fb->depth == 8) {
81 intel_crtc_fb_gamma_set(crtc, red, green, blue, regno);
82 return 0;
83 }
84
85 if (regno < 16) {
86 switch (fb->depth) {
87 case 15:
88 fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
89 ((green & 0xf800) >> 6) |
90 ((blue & 0xf800) >> 11);
91 break;
92 case 16:
93 fb->pseudo_palette[regno] = (red & 0xf800) |
94 ((green & 0xfc00) >> 5) |
95 ((blue & 0xf800) >> 11);
96 break;
97 case 24:
98 case 32:
99 fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
100 (green & 0xff00) |
101 ((blue & 0xff00) >> 8);
102 break;
103 }
104 }
105 }
106 return 0;
107}
108
109static int intelfb_check_var(struct fb_var_screeninfo *var,
110 struct fb_info *info)
111{
112 struct intelfb_par *par = info->par;
113 struct intel_framebuffer *intel_fb = par->intel_fb;
114 struct drm_framebuffer *fb = &intel_fb->base;
115 int depth;
116
117 if (var->pixclock == -1 || !var->pixclock)
118 return -EINVAL;
119
120 /* Need to resize the fb object !!! */
121 if (var->xres > fb->width || var->yres > fb->height) {
122 DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height);
123 DRM_ERROR("Need resizing code.\n");
124 return -EINVAL;
125 }
126
127 switch (var->bits_per_pixel) {
128 case 16:
129 depth = (var->green.length == 6) ? 16 : 15;
130 break;
131 case 32:
132 depth = (var->transp.length > 0) ? 32 : 24;
133 break;
134 default:
135 depth = var->bits_per_pixel;
136 break;
137 }
138
139 switch (depth) {
140 case 8:
141 var->red.offset = 0;
142 var->green.offset = 0;
143 var->blue.offset = 0;
144 var->red.length = 8;
145 var->green.length = 8;
146 var->blue.length = 8;
147 var->transp.length = 0;
148 var->transp.offset = 0;
149 break;
150 case 15:
151 var->red.offset = 10;
152 var->green.offset = 5;
153 var->blue.offset = 0;
154 var->red.length = 5;
155 var->green.length = 5;
156 var->blue.length = 5;
157 var->transp.length = 1;
158 var->transp.offset = 15;
159 break;
160 case 16:
161 var->red.offset = 11;
162 var->green.offset = 5;
163 var->blue.offset = 0;
164 var->red.length = 5;
165 var->green.length = 6;
166 var->blue.length = 5;
167 var->transp.length = 0;
168 var->transp.offset = 0;
169 break;
170 case 24:
171 var->red.offset = 16;
172 var->green.offset = 8;
173 var->blue.offset = 0;
174 var->red.length = 8;
175 var->green.length = 8;
176 var->blue.length = 8;
177 var->transp.length = 0;
178 var->transp.offset = 0;
179 break;
180 case 32:
181 var->red.offset = 16;
182 var->green.offset = 8;
183 var->blue.offset = 0;
184 var->red.length = 8;
185 var->green.length = 8;
186 var->blue.length = 8;
187 var->transp.length = 8;
188 var->transp.offset = 24;
189 break;
190 default:
191 return -EINVAL;
192 }
193
194 return 0;
195}
196
197/* this will let fbcon do the mode init */
198/* FIXME: take mode config lock? */
199static int intelfb_set_par(struct fb_info *info)
200{
201 struct intelfb_par *par = info->par;
202 struct drm_device *dev = par->dev;
203 struct fb_var_screeninfo *var = &info->var;
204 int i;
205
206 DRM_DEBUG("%d %d\n", var->xres, var->pixclock);
207
208 if (var->pixclock != -1) {
209
210 DRM_ERROR("PIXEL CLCOK SET\n");
211 return -EINVAL;
212 } else {
213 struct drm_crtc *crtc;
214 int ret;
215
216 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
217 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
218
219 for (i = 0; i < par->crtc_count; i++)
220 if (crtc->base.id == par->crtc_ids[i])
221 break;
222
223 if (i == par->crtc_count)
224 continue;
225
226 if (crtc->fb == intel_crtc->mode_set.fb) {
227 mutex_lock(&dev->mode_config.mutex);
228 ret = crtc->funcs->set_config(&intel_crtc->mode_set);
229 mutex_unlock(&dev->mode_config.mutex);
230 if (ret)
231 return ret;
232 }
233 }
234 return 0;
235 }
236}
237
238static int intelfb_pan_display(struct fb_var_screeninfo *var,
239 struct fb_info *info)
240{
241 struct intelfb_par *par = info->par;
242 struct drm_device *dev = par->dev;
243 struct drm_mode_set *modeset;
244 struct drm_crtc *crtc;
245 struct intel_crtc *intel_crtc;
246 int ret = 0;
247 int i;
248
249 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
250 for (i = 0; i < par->crtc_count; i++)
251 if (crtc->base.id == par->crtc_ids[i])
252 break;
253
254 if (i == par->crtc_count)
255 continue;
256
257 intel_crtc = to_intel_crtc(crtc);
258 modeset = &intel_crtc->mode_set;
259
260 modeset->x = var->xoffset;
261 modeset->y = var->yoffset;
262
263 if (modeset->num_connectors) {
264 mutex_lock(&dev->mode_config.mutex);
265 ret = crtc->funcs->set_config(modeset);
266 mutex_unlock(&dev->mode_config.mutex);
267 if (!ret) {
268 info->var.xoffset = var->xoffset;
269 info->var.yoffset = var->yoffset;
270 }
271 }
272 }
273
274 return ret;
275}
276
277static void intelfb_on(struct fb_info *info)
278{
279 struct intelfb_par *par = info->par;
280 struct drm_device *dev = par->dev;
281 struct drm_crtc *crtc;
282 struct drm_encoder *encoder;
283 int i;
284
285 /*
286 * For each CRTC in this fb, find all associated encoders
287 * and turn them off, then turn off the CRTC.
288 */
289 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
290 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
291
292 for (i = 0; i < par->crtc_count; i++)
293 if (crtc->base.id == par->crtc_ids[i])
294 break;
295
296 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
297
298 /* Found a CRTC on this fb, now find encoders */
299 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
300 if (encoder->crtc == crtc) {
301 struct drm_encoder_helper_funcs *encoder_funcs;
302 encoder_funcs = encoder->helper_private;
303 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
304 }
305 }
306 }
307}
308
309static void intelfb_off(struct fb_info *info, int dpms_mode)
310{
311 struct intelfb_par *par = info->par;
312 struct drm_device *dev = par->dev;
313 struct drm_crtc *crtc;
314 struct drm_encoder *encoder;
315 int i;
316
317 /*
318 * For each CRTC in this fb, find all associated encoders
319 * and turn them off, then turn off the CRTC.
320 */
321 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
323
324 for (i = 0; i < par->crtc_count; i++)
325 if (crtc->base.id == par->crtc_ids[i])
326 break;
327
328 /* Found a CRTC on this fb, now find encoders */
329 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
330 if (encoder->crtc == crtc) {
331 struct drm_encoder_helper_funcs *encoder_funcs;
332 encoder_funcs = encoder->helper_private;
333 encoder_funcs->dpms(encoder, dpms_mode);
334 }
335 }
336 if (dpms_mode == DRM_MODE_DPMS_OFF)
337 crtc_funcs->dpms(crtc, dpms_mode);
338 }
339}
340
341static int intelfb_blank(int blank, struct fb_info *info)
342{
343 switch (blank) {
344 case FB_BLANK_UNBLANK:
345 intelfb_on(info);
346 break;
347 case FB_BLANK_NORMAL:
348 intelfb_off(info, DRM_MODE_DPMS_STANDBY);
349 break;
350 case FB_BLANK_HSYNC_SUSPEND:
351 intelfb_off(info, DRM_MODE_DPMS_STANDBY);
352 break;
353 case FB_BLANK_VSYNC_SUSPEND:
354 intelfb_off(info, DRM_MODE_DPMS_SUSPEND);
355 break;
356 case FB_BLANK_POWERDOWN:
357 intelfb_off(info, DRM_MODE_DPMS_OFF);
358 break;
359 }
360 return 0;
361}
362
363static struct fb_ops intelfb_ops = {
364 .owner = THIS_MODULE,
365 .fb_check_var = intelfb_check_var,
366 .fb_set_par = intelfb_set_par,
367 .fb_setcolreg = intelfb_setcolreg,
368 .fb_fillrect = cfb_fillrect,
369 .fb_copyarea = cfb_copyarea,
370 .fb_imageblit = cfb_imageblit,
371 .fb_pan_display = intelfb_pan_display,
372 .fb_blank = intelfb_blank,
373};
374
375/**
376 * Curretly it is assumed that the old framebuffer is reused.
377 *
378 * LOCKING
379 * caller should hold the mode config lock.
380 *
381 */
382int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
383{
384 struct fb_info *info;
385 struct drm_framebuffer *fb;
386 struct drm_display_mode *mode = crtc->desired_mode;
387
388 fb = crtc->fb;
389 if (!fb)
390 return 1;
391
392 info = fb->fbdev;
393 if (!info)
394 return 1;
395
396 if (!mode)
397 return 1;
398
399 info->var.xres = mode->hdisplay;
400 info->var.right_margin = mode->hsync_start - mode->hdisplay;
401 info->var.hsync_len = mode->hsync_end - mode->hsync_start;
402 info->var.left_margin = mode->htotal - mode->hsync_end;
403 info->var.yres = mode->vdisplay;
404 info->var.lower_margin = mode->vsync_start - mode->vdisplay;
405 info->var.vsync_len = mode->vsync_end - mode->vsync_start;
406 info->var.upper_margin = mode->vtotal - mode->vsync_end;
407 info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
408 /* avoid overflow */
409 info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
410
411 return 0;
412}
413EXPORT_SYMBOL(intelfb_resize);
414
415static struct drm_mode_set kernelfb_mode;
416
417static int intelfb_panic(struct notifier_block *n, unsigned long ununsed,
418 void *panic_str)
419{
420 DRM_ERROR("panic occurred, switching back to text console\n");
421
422 intelfb_restore();
423 return 0;
424}
425
426static struct notifier_block paniced = {
427 .notifier_call = intelfb_panic,
428};
429
430static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
431 uint32_t fb_height, uint32_t surface_width,
432 uint32_t surface_height,
433 struct intel_framebuffer **intel_fb_p)
434{
435 struct fb_info *info;
436 struct intelfb_par *par;
437 struct drm_framebuffer *fb;
438 struct intel_framebuffer *intel_fb;
439 struct drm_mode_fb_cmd mode_cmd;
440 struct drm_gem_object *fbo = NULL;
441 struct drm_i915_gem_object *obj_priv;
442 struct device *device = &dev->pdev->dev;
443 int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
444
445 mode_cmd.width = surface_width;
446 mode_cmd.height = surface_height;
447
448 mode_cmd.bpp = 32;
449 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
450 mode_cmd.depth = 24;
451
452 size = mode_cmd.pitch * mode_cmd.height;
453 size = ALIGN(size, PAGE_SIZE);
454 fbo = drm_gem_object_alloc(dev, size);
455 if (!fbo) {
456 printk(KERN_ERR "failed to allocate framebuffer\n");
457 ret = -ENOMEM;
458 goto out;
459 }
460 obj_priv = fbo->driver_private;
461
462 mutex_lock(&dev->struct_mutex);
463
464 ret = i915_gem_object_pin(fbo, PAGE_SIZE);
465 if (ret) {
466 DRM_ERROR("failed to pin fb: %d\n", ret);
467 goto out_unref;
468 }
469
470 /* Flush everything out, we'll be doing GTT only from now on */
471 i915_gem_object_set_to_gtt_domain(fbo, 1);
472
473 ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
474 if (ret) {
475 DRM_ERROR("failed to allocate fb.\n");
476 goto out_unref;
477 }
478
479 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
480
481 intel_fb = to_intel_framebuffer(fb);
482 *intel_fb_p = intel_fb;
483
484 info = framebuffer_alloc(sizeof(struct intelfb_par), device);
485 if (!info) {
486 ret = -ENOMEM;
487 goto out_unref;
488 }
489
490 par = info->par;
491
492 strcpy(info->fix.id, "inteldrmfb");
493 info->fix.type = FB_TYPE_PACKED_PIXELS;
494 info->fix.visual = FB_VISUAL_TRUECOLOR;
495 info->fix.type_aux = 0;
496 info->fix.xpanstep = 1; /* doing it in hw */
497 info->fix.ypanstep = 1; /* doing it in hw */
498 info->fix.ywrapstep = 0;
499 info->fix.accel = FB_ACCEL_I830;
500 info->fix.type_aux = 0;
501
502 info->flags = FBINFO_DEFAULT;
503
504 info->fbops = &intelfb_ops;
505
506 info->fix.line_length = fb->pitch;
507 info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
508 info->fix.smem_len = size;
509
510 info->flags = FBINFO_DEFAULT;
511
512 info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
513 size);
514 if (!info->screen_base) {
515 ret = -ENOSPC;
516 goto out_unref;
517 }
518 info->screen_size = size;
519
520// memset(info->screen_base, 0, size);
521
522 info->pseudo_palette = fb->pseudo_palette;
523 info->var.xres_virtual = fb->width;
524 info->var.yres_virtual = fb->height;
525 info->var.bits_per_pixel = fb->bits_per_pixel;
526 info->var.xoffset = 0;
527 info->var.yoffset = 0;
528 info->var.activate = FB_ACTIVATE_NOW;
529 info->var.height = -1;
530 info->var.width = -1;
531
532 info->var.xres = fb_width;
533 info->var.yres = fb_height;
534
535 /* FIXME: we really shouldn't expose mmio space at all */
536 info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
537 info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar);
538
539 info->pixmap.size = 64*1024;
540 info->pixmap.buf_align = 8;
541 info->pixmap.access_align = 32;
542 info->pixmap.flags = FB_PIXMAP_SYSTEM;
543 info->pixmap.scan_align = 1;
544
545 switch(fb->depth) {
546 case 8:
547 info->var.red.offset = 0;
548 info->var.green.offset = 0;
549 info->var.blue.offset = 0;
550 info->var.red.length = 8; /* 8bit DAC */
551 info->var.green.length = 8;
552 info->var.blue.length = 8;
553 info->var.transp.offset = 0;
554 info->var.transp.length = 0;
555 break;
556 case 15:
557 info->var.red.offset = 10;
558 info->var.green.offset = 5;
559 info->var.blue.offset = 0;
560 info->var.red.length = 5;
561 info->var.green.length = 5;
562 info->var.blue.length = 5;
563 info->var.transp.offset = 15;
564 info->var.transp.length = 1;
565 break;
566 case 16:
567 info->var.red.offset = 11;
568 info->var.green.offset = 5;
569 info->var.blue.offset = 0;
570 info->var.red.length = 5;
571 info->var.green.length = 6;
572 info->var.blue.length = 5;
573 info->var.transp.offset = 0;
574 break;
575 case 24:
576 info->var.red.offset = 16;
577 info->var.green.offset = 8;
578 info->var.blue.offset = 0;
579 info->var.red.length = 8;
580 info->var.green.length = 8;
581 info->var.blue.length = 8;
582 info->var.transp.offset = 0;
583 info->var.transp.length = 0;
584 break;
585 case 32:
586 info->var.red.offset = 16;
587 info->var.green.offset = 8;
588 info->var.blue.offset = 0;
589 info->var.red.length = 8;
590 info->var.green.length = 8;
591 info->var.blue.length = 8;
592 info->var.transp.offset = 24;
593 info->var.transp.length = 8;
594 break;
595 default:
596 break;
597 }
598
599 fb->fbdev = info;
600
601 par->intel_fb = intel_fb;
602 par->dev = dev;
603
604 /* To allow resizeing without swapping buffers */
605 printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
606 intel_fb->base.height, obj_priv->gtt_offset, fbo);
607
608 mutex_unlock(&dev->struct_mutex);
609 return 0;
610
611out_unref:
612 drm_gem_object_unreference(fbo);
613 mutex_unlock(&dev->struct_mutex);
614out:
615 return ret;
616}
617
618static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *crtc)
619{
620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
621 struct intel_framebuffer *intel_fb;
622 struct drm_framebuffer *fb;
623 struct drm_connector *connector;
624 struct fb_info *info;
625 struct intelfb_par *par;
626 struct drm_mode_set *modeset;
627 unsigned int width, height;
628 int new_fb = 0;
629 int ret, i, conn_count;
630
631 if (!drm_helper_crtc_in_use(crtc))
632 return 0;
633
634 if (!crtc->desired_mode)
635 return 0;
636
637 width = crtc->desired_mode->hdisplay;
638 height = crtc->desired_mode->vdisplay;
639
640 /* is there an fb bound to this crtc already */
641 if (!intel_crtc->mode_set.fb) {
642 ret = intelfb_create(dev, width, height, width, height, &intel_fb);
643 if (ret)
644 return -EINVAL;
645 new_fb = 1;
646 } else {
647 fb = intel_crtc->mode_set.fb;
648 intel_fb = to_intel_framebuffer(fb);
649 if ((intel_fb->base.width < width) || (intel_fb->base.height < height))
650 return -EINVAL;
651 }
652
653 info = intel_fb->base.fbdev;
654 par = info->par;
655
656 modeset = &intel_crtc->mode_set;
657 modeset->fb = &intel_fb->base;
658 conn_count = 0;
659 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
660 if (connector->encoder)
661 if (connector->encoder->crtc == modeset->crtc) {
662 modeset->connectors[conn_count] = connector;
663 conn_count++;
664 if (conn_count > INTELFB_CONN_LIMIT)
665 BUG();
666 }
667 }
668
669 for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
670 modeset->connectors[i] = NULL;
671
672 par->crtc_ids[0] = crtc->base.id;
673
674 modeset->num_connectors = conn_count;
675 if (modeset->mode != modeset->crtc->desired_mode)
676 modeset->mode = modeset->crtc->desired_mode;
677
678 par->crtc_count = 1;
679
680 if (new_fb) {
681 info->var.pixclock = -1;
682 if (register_framebuffer(info) < 0)
683 return -EINVAL;
684 } else
685 intelfb_set_par(info);
686
687 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
688 info->fix.id);
689
690 /* Switch back to kernel console on panic */
691 kernelfb_mode = *modeset;
692 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
693 printk(KERN_INFO "registered panic notifier\n");
694
695 return 0;
696}
697
698static int intelfb_multi_fb_probe(struct drm_device *dev)
699{
700
701 struct drm_crtc *crtc;
702 int ret = 0;
703
704 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
705 ret = intelfb_multi_fb_probe_crtc(dev, crtc);
706 if (ret)
707 return ret;
708 }
709 return ret;
710}
711
712static int intelfb_single_fb_probe(struct drm_device *dev)
713{
714 struct drm_crtc *crtc;
715 struct drm_connector *connector;
716 unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
717 unsigned int surface_width = 0, surface_height = 0;
718 int new_fb = 0;
719 int crtc_count = 0;
720 int ret, i, conn_count = 0;
721 struct intel_framebuffer *intel_fb;
722 struct fb_info *info;
723 struct intelfb_par *par;
724 struct drm_mode_set *modeset = NULL;
725
726 DRM_DEBUG("\n");
727
728 /* Get a count of crtcs now in use and new min/maxes width/heights */
729 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
730 if (!drm_helper_crtc_in_use(crtc))
731 continue;
732
733 crtc_count++;
734 if (!crtc->desired_mode)
735 continue;
736
737 /* Smallest mode determines console size... */
738 if (crtc->desired_mode->hdisplay < fb_width)
739 fb_width = crtc->desired_mode->hdisplay;
740
741 if (crtc->desired_mode->vdisplay < fb_height)
742 fb_height = crtc->desired_mode->vdisplay;
743
744 /* ... but largest for memory allocation dimensions */
745 if (crtc->desired_mode->hdisplay > surface_width)
746 surface_width = crtc->desired_mode->hdisplay;
747
748 if (crtc->desired_mode->vdisplay > surface_height)
749 surface_height = crtc->desired_mode->vdisplay;
750 }
751
752 if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
753 /* hmm everyone went away - assume VGA cable just fell out
754 and will come back later. */
755 DRM_DEBUG("no CRTCs available?\n");
756 return 0;
757 }
758
759//fail
760 /* Find the fb for our new config */
761 if (list_empty(&dev->mode_config.fb_kernel_list)) {
762 DRM_DEBUG("creating new fb (console size %dx%d, "
763 "buffer size %dx%d)\n", fb_width, fb_height,
764 surface_width, surface_height);
765 ret = intelfb_create(dev, fb_width, fb_height, surface_width,
766 surface_height, &intel_fb);
767 if (ret)
768 return -EINVAL;
769 new_fb = 1;
770 } else {
771 struct drm_framebuffer *fb;
772
773 fb = list_first_entry(&dev->mode_config.fb_kernel_list,
774 struct drm_framebuffer, filp_head);
775 intel_fb = to_intel_framebuffer(fb);
776
777 /* if someone hotplugs something bigger than we have already
778 * allocated, we are pwned. As really we can't resize an
779 * fbdev that is in the wild currently due to fbdev not really
780 * being designed for the lower layers moving stuff around
781 * under it.
782 * - so in the grand style of things - punt.
783 */
784 if ((fb->width < surface_width) ||
785 (fb->height < surface_height)) {
786 DRM_ERROR("fb not large enough for console\n");
787 return -EINVAL;
788 }
789 }
790// fail
791
792 info = intel_fb->base.fbdev;
793 par = info->par;
794
795 crtc_count = 0;
796 /*
797 * For each CRTC, set up the connector list for the CRTC's mode
798 * set configuration.
799 */
800 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
801 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
802
803 modeset = &intel_crtc->mode_set;
804 modeset->fb = &intel_fb->base;
805 conn_count = 0;
806 list_for_each_entry(connector, &dev->mode_config.connector_list,
807 head) {
808 if (!connector->encoder)
809 continue;
810
811 if(connector->encoder->crtc == modeset->crtc) {
812 modeset->connectors[conn_count++] = connector;
813 if (conn_count > INTELFB_CONN_LIMIT)
814 BUG();
815 }
816 }
817
818 /* Zero out remaining connector pointers */
819 for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
820 modeset->connectors[i] = NULL;
821
822 par->crtc_ids[crtc_count++] = crtc->base.id;
823
824 modeset->num_connectors = conn_count;
825 if (modeset->mode != modeset->crtc->desired_mode)
826 modeset->mode = modeset->crtc->desired_mode;
827 }
828 par->crtc_count = crtc_count;
829
830 if (new_fb) {
831 info->var.pixclock = -1;
832 if (register_framebuffer(info) < 0)
833 return -EINVAL;
834 } else
835 intelfb_set_par(info);
836
837 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
838 info->fix.id);
839
840 /* Switch back to kernel console on panic */
841 kernelfb_mode = *modeset;
842 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
843 printk(KERN_INFO "registered panic notifier\n");
844
845 return 0;
846}
847
848/**
849 * intelfb_restore - restore the framebuffer console (kernel) config
850 *
851 * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
852 */
853void intelfb_restore(void)
854{
855 drm_crtc_helper_set_config(&kernelfb_mode);
856}
857
858static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
859{
860 intelfb_restore();
861}
862
863static struct sysrq_key_op sysrq_intelfb_restore_op = {
864 .handler = intelfb_sysrq,
865 .help_msg = "force fb",
866 .action_msg = "force restore of fb console",
867};
868
869int intelfb_probe(struct drm_device *dev)
870{
871 int ret;
872
873 DRM_DEBUG("\n");
874
875 /* something has changed in the lower levels of hell - deal with it
876 here */
877
878 /* two modes : a) 1 fb to rule all crtcs.
879 b) one fb per crtc.
880 two actions 1) new connected device
881 2) device removed.
882 case a/1 : if the fb surface isn't big enough - resize the surface fb.
883 if the fb size isn't big enough - resize fb into surface.
884 if everything big enough configure the new crtc/etc.
885 case a/2 : undo the configuration
886 possibly resize down the fb to fit the new configuration.
887 case b/1 : see if it is on a new crtc - setup a new fb and add it.
888 case b/2 : teardown the new fb.
889 */
890
891 /* mode a first */
892 /* search for an fb */
893 if (i915_fbpercrtc == 1) {
894 ret = intelfb_multi_fb_probe(dev);
895 } else {
896 ret = intelfb_single_fb_probe(dev);
897 }
898
899 register_sysrq_key('g', &sysrq_intelfb_restore_op);
900
901 return ret;
902}
903EXPORT_SYMBOL(intelfb_probe);
904
905int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
906{
907 struct fb_info *info;
908
909 if (!fb)
910 return -EINVAL;
911
912 info = fb->fbdev;
913
914 if (info) {
915 unregister_framebuffer(info);
916 iounmap(info->screen_base);
917 framebuffer_release(info);
918 }
919
920 atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
921 memset(&kernelfb_mode, 0, sizeof(struct drm_mode_set));
922 return 0;
923}
924EXPORT_SYMBOL(intelfb_remove);
925MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
new file mode 100644
index 000000000000..a5a2f5339e9e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
3 * Copyright © 2006-2008 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Eric Anholt <eric@anholt.net>
27 */
28#include <linux/i2c.h>
29#include <linux/i2c-id.h>
30#include <linux/i2c-algo-bit.h>
31#include "drmP.h"
32#include "drm.h"
33#include "intel_drv.h"
34#include "i915_drm.h"
35#include "i915_drv.h"
36
37/*
38 * Intel GPIO access functions
39 */
40
41#define I2C_RISEFALL_TIME 20
42
43static int get_clock(void *data)
44{
45 struct intel_i2c_chan *chan = data;
46 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
47 u32 val;
48
49 val = I915_READ(chan->reg);
50 return ((val & GPIO_CLOCK_VAL_IN) != 0);
51}
52
53static int get_data(void *data)
54{
55 struct intel_i2c_chan *chan = data;
56 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
57 u32 val;
58
59 val = I915_READ(chan->reg);
60 return ((val & GPIO_DATA_VAL_IN) != 0);
61}
62
63static void set_clock(void *data, int state_high)
64{
65 struct intel_i2c_chan *chan = data;
66 struct drm_device *dev = chan->drm_dev;
67 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
68 u32 reserved = 0, clock_bits;
69
70 /* On most chips, these bits must be preserved in software. */
71 if (!IS_I830(dev) && !IS_845G(dev))
72 reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
73 GPIO_CLOCK_PULLUP_DISABLE);
74
75 if (state_high)
76 clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
77 else
78 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
79 GPIO_CLOCK_VAL_MASK;
80 I915_WRITE(chan->reg, reserved | clock_bits);
81 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
82}
83
84static void set_data(void *data, int state_high)
85{
86 struct intel_i2c_chan *chan = data;
87 struct drm_device *dev = chan->drm_dev;
88 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
89 u32 reserved = 0, data_bits;
90
91 /* On most chips, these bits must be preserved in software. */
92 if (!IS_I830(dev) && !IS_845G(dev))
93 reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
94 GPIO_CLOCK_PULLUP_DISABLE);
95
96 if (state_high)
97 data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
98 else
99 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
100 GPIO_DATA_VAL_MASK;
101
102 I915_WRITE(chan->reg, reserved | data_bits);
103 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
104}
105
106/**
107 * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
108 * @dev: DRM device
109 * @output: driver specific output device
110 * @reg: GPIO reg to use
111 * @name: name for this bus
112 *
113 * Creates and registers a new i2c bus with the Linux i2c layer, for use
114 * in output probing and control (e.g. DDC or SDVO control functions).
115 *
116 * Possible values for @reg include:
117 * %GPIOA
118 * %GPIOB
119 * %GPIOC
120 * %GPIOD
121 * %GPIOE
122 * %GPIOF
123 * %GPIOG
124 * %GPIOH
125 * see PRM for details on how these different busses are used.
126 */
127struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
128 const char *name)
129{
130 struct intel_i2c_chan *chan;
131
132 chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
133 if (!chan)
134 goto out_free;
135
136 chan->drm_dev = dev;
137 chan->reg = reg;
138 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
139 chan->adapter.owner = THIS_MODULE;
140#ifndef I2C_HW_B_INTELFB
141#define I2C_HW_B_INTELFB I2C_HW_B_I810
142#endif
143 chan->adapter.id = I2C_HW_B_INTELFB;
144 chan->adapter.algo_data = &chan->algo;
145 chan->adapter.dev.parent = &dev->pdev->dev;
146 chan->algo.setsda = set_data;
147 chan->algo.setscl = set_clock;
148 chan->algo.getsda = get_data;
149 chan->algo.getscl = get_clock;
150 chan->algo.udelay = 20;
151 chan->algo.timeout = usecs_to_jiffies(2200);
152 chan->algo.data = chan;
153
154 i2c_set_adapdata(&chan->adapter, chan);
155
156 if(i2c_bit_add_bus(&chan->adapter))
157 goto out_free;
158
159 /* JJJ: raise SCL and SDA? */
160 set_data(chan, 1);
161 set_clock(chan, 1);
162 udelay(20);
163
164 return chan;
165
166out_free:
167 kfree(chan);
168 return NULL;
169}
170
171/**
172 * intel_i2c_destroy - unregister and free i2c bus resources
173 * @output: channel to free
174 *
175 * Unregister the adapter from the i2c layer, then free the structure.
176 */
177void intel_i2c_destroy(struct intel_i2c_chan *chan)
178{
179 if (!chan)
180 return;
181
182 i2c_del_adapter(&chan->adapter);
183 kfree(chan);
184}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
new file mode 100644
index 000000000000..ccecfaf6307b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -0,0 +1,525 @@
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Eric Anholt <eric@anholt.net>
26 * Dave Airlie <airlied@linux.ie>
27 * Jesse Barnes <jesse.barnes@intel.com>
28 */
29
30#include <linux/i2c.h>
31#include "drmP.h"
32#include "drm.h"
33#include "drm_crtc.h"
34#include "drm_edid.h"
35#include "intel_drv.h"
36#include "i915_drm.h"
37#include "i915_drv.h"
38
39/**
40 * Sets the backlight level.
41 *
42 * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
43 */
44static void intel_lvds_set_backlight(struct drm_device *dev, int level)
45{
46 struct drm_i915_private *dev_priv = dev->dev_private;
47 u32 blc_pwm_ctl;
48
49 blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
50 I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
51 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
52}
53
54/**
55 * Returns the maximum level of the backlight duty cycle field.
56 */
57static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
58{
59 struct drm_i915_private *dev_priv = dev->dev_private;
60
61 return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
62 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
63}
64
65/**
66 * Sets the power state for the panel.
67 */
68static void intel_lvds_set_power(struct drm_device *dev, bool on)
69{
70 struct drm_i915_private *dev_priv = dev->dev_private;
71 u32 pp_status;
72
73 if (on) {
74 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
75 POWER_TARGET_ON);
76 do {
77 pp_status = I915_READ(PP_STATUS);
78 } while ((pp_status & PP_ON) == 0);
79
80 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
81 } else {
82 intel_lvds_set_backlight(dev, 0);
83
84 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) &
85 ~POWER_TARGET_ON);
86 do {
87 pp_status = I915_READ(PP_STATUS);
88 } while (pp_status & PP_ON);
89 }
90}
91
92static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
93{
94 struct drm_device *dev = encoder->dev;
95
96 if (mode == DRM_MODE_DPMS_ON)
97 intel_lvds_set_power(dev, true);
98 else
99 intel_lvds_set_power(dev, false);
100
101 /* XXX: We never power down the LVDS pairs. */
102}
103
104static void intel_lvds_save(struct drm_connector *connector)
105{
106 struct drm_device *dev = connector->dev;
107 struct drm_i915_private *dev_priv = dev->dev_private;
108
109 dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS);
110 dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS);
111 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
112 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
113 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
114 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
115 BACKLIGHT_DUTY_CYCLE_MASK);
116
117 /*
118 * If the light is off at server startup, just make it full brightness
119 */
120 if (dev_priv->backlight_duty_cycle == 0)
121 dev_priv->backlight_duty_cycle =
122 intel_lvds_get_max_backlight(dev);
123}
124
125static void intel_lvds_restore(struct drm_connector *connector)
126{
127 struct drm_device *dev = connector->dev;
128 struct drm_i915_private *dev_priv = dev->dev_private;
129
130 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
131 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON);
132 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF);
133 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
134 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
135 if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
136 intel_lvds_set_power(dev, true);
137 else
138 intel_lvds_set_power(dev, false);
139}
140
141static int intel_lvds_mode_valid(struct drm_connector *connector,
142 struct drm_display_mode *mode)
143{
144 struct drm_device *dev = connector->dev;
145 struct drm_i915_private *dev_priv = dev->dev_private;
146 struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
147
148 if (fixed_mode) {
149 if (mode->hdisplay > fixed_mode->hdisplay)
150 return MODE_PANEL;
151 if (mode->vdisplay > fixed_mode->vdisplay)
152 return MODE_PANEL;
153 }
154
155 return MODE_OK;
156}
157
158static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
159 struct drm_display_mode *mode,
160 struct drm_display_mode *adjusted_mode)
161{
162 struct drm_device *dev = encoder->dev;
163 struct drm_i915_private *dev_priv = dev->dev_private;
164 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
165 struct drm_encoder *tmp_encoder;
166
167 /* Should never happen!! */
168 if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
169 printk(KERN_ERR "Can't support LVDS on pipe A\n");
170 return false;
171 }
172
173 /* Should never happen!! */
174 list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
175 if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
176 printk(KERN_ERR "Can't enable LVDS and another "
177 "encoder on the same pipe\n");
178 return false;
179 }
180 }
181
182 /*
183 * If we have timings from the BIOS for the panel, put them in
184 * to the adjusted mode. The CRTC will be set up for this mode,
185 * with the panel scaling set up to source from the H/VDisplay
186 * of the original mode.
187 */
188 if (dev_priv->panel_fixed_mode != NULL) {
189 adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
190 adjusted_mode->hsync_start =
191 dev_priv->panel_fixed_mode->hsync_start;
192 adjusted_mode->hsync_end =
193 dev_priv->panel_fixed_mode->hsync_end;
194 adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
195 adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
196 adjusted_mode->vsync_start =
197 dev_priv->panel_fixed_mode->vsync_start;
198 adjusted_mode->vsync_end =
199 dev_priv->panel_fixed_mode->vsync_end;
200 adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
201 adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
202 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
203 }
204
205 /*
206 * XXX: It would be nice to support lower refresh rates on the
207 * panels to reduce power consumption, and perhaps match the
208 * user's requested refresh rate.
209 */
210
211 return true;
212}
213
214static void intel_lvds_prepare(struct drm_encoder *encoder)
215{
216 struct drm_device *dev = encoder->dev;
217 struct drm_i915_private *dev_priv = dev->dev_private;
218
219 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
220 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
221 BACKLIGHT_DUTY_CYCLE_MASK);
222
223 intel_lvds_set_power(dev, false);
224}
225
226static void intel_lvds_commit( struct drm_encoder *encoder)
227{
228 struct drm_device *dev = encoder->dev;
229 struct drm_i915_private *dev_priv = dev->dev_private;
230
231 if (dev_priv->backlight_duty_cycle == 0)
232 dev_priv->backlight_duty_cycle =
233 intel_lvds_get_max_backlight(dev);
234
235 intel_lvds_set_power(dev, true);
236}
237
238static void intel_lvds_mode_set(struct drm_encoder *encoder,
239 struct drm_display_mode *mode,
240 struct drm_display_mode *adjusted_mode)
241{
242 struct drm_device *dev = encoder->dev;
243 struct drm_i915_private *dev_priv = dev->dev_private;
244 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
245 u32 pfit_control;
246
247 /*
248 * The LVDS pin pair will already have been turned on in the
249 * intel_crtc_mode_set since it has a large impact on the DPLL
250 * settings.
251 */
252
253 /*
254 * Enable automatic panel scaling so that non-native modes fill the
255 * screen. Should be enabled before the pipe is enabled, according to
256 * register description and PRM.
257 */
258 if (mode->hdisplay != adjusted_mode->hdisplay ||
259 mode->vdisplay != adjusted_mode->vdisplay)
260 pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
261 HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
262 HORIZ_INTERP_BILINEAR);
263 else
264 pfit_control = 0;
265
266 if (!IS_I965G(dev)) {
267 if (dev_priv->panel_wants_dither)
268 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
269 }
270 else
271 pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
272
273 I915_WRITE(PFIT_CONTROL, pfit_control);
274}
275
276/**
277 * Detect the LVDS connection.
278 *
279 * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have
280 * been set up if the LVDS was actually connected anyway.
281 */
282static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
283{
284 return connector_status_connected;
285}
286
287/**
288 * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
289 */
290static int intel_lvds_get_modes(struct drm_connector *connector)
291{
292 struct drm_device *dev = connector->dev;
293 struct intel_output *intel_output = to_intel_output(connector);
294 struct drm_i915_private *dev_priv = dev->dev_private;
295 int ret = 0;
296
297 ret = intel_ddc_get_modes(intel_output);
298
299 if (ret)
300 return ret;
301
302 /* Didn't get an EDID, so
303 * Set wide sync ranges so we get all modes
304 * handed to valid_mode for checking
305 */
306 connector->display_info.min_vfreq = 0;
307 connector->display_info.max_vfreq = 200;
308 connector->display_info.min_hfreq = 0;
309 connector->display_info.max_hfreq = 200;
310
311 if (dev_priv->panel_fixed_mode != NULL) {
312 struct drm_display_mode *mode;
313
314 mutex_unlock(&dev->mode_config.mutex);
315 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
316 drm_mode_probed_add(connector, mode);
317 mutex_unlock(&dev->mode_config.mutex);
318
319 return 1;
320 }
321
322 return 0;
323}
324
325/**
326 * intel_lvds_destroy - unregister and free LVDS structures
327 * @connector: connector to free
328 *
329 * Unregister the DDC bus for this connector then free the driver private
330 * structure.
331 */
332static void intel_lvds_destroy(struct drm_connector *connector)
333{
334 struct intel_output *intel_output = to_intel_output(connector);
335
336 if (intel_output->ddc_bus)
337 intel_i2c_destroy(intel_output->ddc_bus);
338 drm_sysfs_connector_remove(connector);
339 drm_connector_cleanup(connector);
340 kfree(connector);
341}
342
343static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
344 .dpms = intel_lvds_dpms,
345 .mode_fixup = intel_lvds_mode_fixup,
346 .prepare = intel_lvds_prepare,
347 .mode_set = intel_lvds_mode_set,
348 .commit = intel_lvds_commit,
349};
350
351static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
352 .get_modes = intel_lvds_get_modes,
353 .mode_valid = intel_lvds_mode_valid,
354 .best_encoder = intel_best_encoder,
355};
356
357static const struct drm_connector_funcs intel_lvds_connector_funcs = {
358 .save = intel_lvds_save,
359 .restore = intel_lvds_restore,
360 .detect = intel_lvds_detect,
361 .fill_modes = drm_helper_probe_single_connector_modes,
362 .destroy = intel_lvds_destroy,
363};
364
365
366static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
367{
368 drm_encoder_cleanup(encoder);
369}
370
371static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
372 .destroy = intel_lvds_enc_destroy,
373};
374
375
376
377/**
378 * intel_lvds_init - setup LVDS connectors on this device
379 * @dev: drm device
380 *
381 * Create the connector, register the LVDS DDC bus, and try to figure out what
382 * modes we can display on the LVDS panel (if present).
383 */
384void intel_lvds_init(struct drm_device *dev)
385{
386 struct drm_i915_private *dev_priv = dev->dev_private;
387 struct intel_output *intel_output;
388 struct drm_connector *connector;
389 struct drm_encoder *encoder;
390 struct drm_display_mode *scan; /* *modes, *bios_mode; */
391 struct drm_crtc *crtc;
392 u32 lvds;
393 int pipe;
394
395 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
396 if (!intel_output) {
397 return;
398 }
399
400 connector = &intel_output->base;
401 encoder = &intel_output->enc;
402 drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs,
403 DRM_MODE_CONNECTOR_LVDS);
404
405 drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs,
406 DRM_MODE_ENCODER_LVDS);
407
408 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
409 intel_output->type = INTEL_OUTPUT_LVDS;
410
411 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
412 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
413 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
414 connector->interlace_allowed = false;
415 connector->doublescan_allowed = false;
416
417
418 /*
419 * LVDS discovery:
420 * 1) check for EDID on DDC
421 * 2) check for VBT data
422 * 3) check to see if LVDS is already on
423 * if none of the above, no panel
424 * 4) make sure lid is open
425 * if closed, act like it's not there for now
426 */
427
428 /* Set up the DDC bus. */
429 intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
430 if (!intel_output->ddc_bus) {
431 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
432 "failed.\n");
433 goto failed;
434 }
435
436 /*
437 * Attempt to get the fixed panel mode from DDC. Assume that the
438 * preferred mode is the right one.
439 */
440 intel_ddc_get_modes(intel_output);
441
442 list_for_each_entry(scan, &connector->probed_modes, head) {
443 mutex_lock(&dev->mode_config.mutex);
444 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
445 dev_priv->panel_fixed_mode =
446 drm_mode_duplicate(dev, scan);
447 mutex_unlock(&dev->mode_config.mutex);
448 goto out; /* FIXME: check for quirks */
449 }
450 mutex_unlock(&dev->mode_config.mutex);
451 }
452
453 /* Failed to get EDID, what about VBT? */
454 if (dev_priv->vbt_mode) {
455 mutex_lock(&dev->mode_config.mutex);
456 dev_priv->panel_fixed_mode =
457 drm_mode_duplicate(dev, dev_priv->vbt_mode);
458 mutex_unlock(&dev->mode_config.mutex);
459 }
460
461 /*
462 * If we didn't get EDID, try checking if the panel is already turned
463 * on. If so, assume that whatever is currently programmed is the
464 * correct mode.
465 */
466 lvds = I915_READ(LVDS);
467 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
468 crtc = intel_get_crtc_from_pipe(dev, pipe);
469
470 if (crtc && (lvds & LVDS_PORT_EN)) {
471 dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
472 if (dev_priv->panel_fixed_mode) {
473 dev_priv->panel_fixed_mode->type |=
474 DRM_MODE_TYPE_PREFERRED;
475 goto out; /* FIXME: check for quirks */
476 }
477 }
478
479 /* If we still don't have a mode after all that, give up. */
480 if (!dev_priv->panel_fixed_mode)
481 goto failed;
482
483 /* FIXME: detect aopen & mac mini type stuff automatically? */
484 /*
485 * Blacklist machines with BIOSes that list an LVDS panel without
486 * actually having one.
487 */
488 if (IS_I945GM(dev)) {
489 /* aopen mini pc */
490 if (dev->pdev->subsystem_vendor == 0xa0a0)
491 goto failed;
492
493 if ((dev->pdev->subsystem_vendor == 0x8086) &&
494 (dev->pdev->subsystem_device == 0x7270)) {
495 /* It's a Mac Mini or Macbook Pro.
496 *
497 * Apple hardware is out to get us. The macbook pro
498 * has a real LVDS panel, but the mac mini does not,
499 * and they have the same device IDs. We'll
500 * distinguish by panel size, on the assumption
501 * that Apple isn't about to make any machines with an
502 * 800x600 display.
503 */
504
505 if (dev_priv->panel_fixed_mode != NULL &&
506 dev_priv->panel_fixed_mode->hdisplay == 800 &&
507 dev_priv->panel_fixed_mode->vdisplay == 600) {
508 DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n");
509 goto failed;
510 }
511 }
512 }
513
514
515out:
516 drm_sysfs_connector_add(connector);
517 return;
518
519failed:
520 DRM_DEBUG("No LVDS modes found, disabling.\n");
521 if (intel_output->ddc_bus)
522 intel_i2c_destroy(intel_output->ddc_bus);
523 drm_connector_cleanup(connector);
524 kfree(connector);
525}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
new file mode 100644
index 000000000000..e42019e5d661
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -0,0 +1,83 @@
1/*
2 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
3 * Copyright (c) 2007 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/i2c.h>
27#include <linux/fb.h>
28#include "drmP.h"
29#include "intel_drv.h"
30
31/**
32 * intel_ddc_probe
33 *
34 */
35bool intel_ddc_probe(struct intel_output *intel_output)
36{
37 u8 out_buf[] = { 0x0, 0x0};
38 u8 buf[2];
39 int ret;
40 struct i2c_msg msgs[] = {
41 {
42 .addr = 0x50,
43 .flags = 0,
44 .len = 1,
45 .buf = out_buf,
46 },
47 {
48 .addr = 0x50,
49 .flags = I2C_M_RD,
50 .len = 1,
51 .buf = buf,
52 }
53 };
54
55 ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
56 if (ret == 2)
57 return true;
58
59 return false;
60}
61
62/**
63 * intel_ddc_get_modes - get modelist from monitor
64 * @connector: DRM connector device to use
65 *
66 * Fetch the EDID information from @connector using the DDC bus.
67 */
68int intel_ddc_get_modes(struct intel_output *intel_output)
69{
70 struct edid *edid;
71 int ret = 0;
72
73 edid = drm_get_edid(&intel_output->base,
74 &intel_output->ddc_bus->adapter);
75 if (edid) {
76 drm_mode_connector_update_edid_property(&intel_output->base,
77 edid);
78 ret = drm_add_edid_modes(&intel_output->base, edid);
79 kfree(edid);
80 }
81
82 return ret;
83}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
new file mode 100644
index 000000000000..fbbaa4f414a0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -0,0 +1,1128 @@
1/*
2 * Copyright 2006 Dave Airlie <airlied@linux.ie>
3 * Copyright © 2006-2007 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Eric Anholt <eric@anholt.net>
27 */
28#include <linux/i2c.h>
29#include <linux/delay.h>
30#include "drmP.h"
31#include "drm.h"
32#include "drm_crtc.h"
33#include "intel_drv.h"
34#include "i915_drm.h"
35#include "i915_drv.h"
36#include "intel_sdvo_regs.h"
37
38#undef SDVO_DEBUG
39
40struct intel_sdvo_priv {
41 struct intel_i2c_chan *i2c_bus;
42 int slaveaddr;
43 int output_device;
44
45 u16 active_outputs;
46
47 struct intel_sdvo_caps caps;
48 int pixel_clock_min, pixel_clock_max;
49
50 int save_sdvo_mult;
51 u16 save_active_outputs;
52 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
53 struct intel_sdvo_dtd save_output_dtd[16];
54 u32 save_SDVOX;
55};
56
57/**
58 * Writes the SDVOB or SDVOC with the given value, but always writes both
59 * SDVOB and SDVOC to work around apparent hardware issues (according to
60 * comments in the BIOS).
61 */
62static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
63{
64 struct drm_device *dev = intel_output->base.dev;
65 struct drm_i915_private *dev_priv = dev->dev_private;
66 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
67 u32 bval = val, cval = val;
68 int i;
69
70 if (sdvo_priv->output_device == SDVOB) {
71 cval = I915_READ(SDVOC);
72 } else {
73 bval = I915_READ(SDVOB);
74 }
75 /*
76 * Write the registers twice for luck. Sometimes,
77 * writing them only once doesn't appear to 'stick'.
78 * The BIOS does this too. Yay, magic
79 */
80 for (i = 0; i < 2; i++)
81 {
82 I915_WRITE(SDVOB, bval);
83 I915_READ(SDVOB);
84 I915_WRITE(SDVOC, cval);
85 I915_READ(SDVOC);
86 }
87}
88
89static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
90 u8 *ch)
91{
92 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
93 u8 out_buf[2];
94 u8 buf[2];
95 int ret;
96
97 struct i2c_msg msgs[] = {
98 {
99 .addr = sdvo_priv->i2c_bus->slave_addr,
100 .flags = 0,
101 .len = 1,
102 .buf = out_buf,
103 },
104 {
105 .addr = sdvo_priv->i2c_bus->slave_addr,
106 .flags = I2C_M_RD,
107 .len = 1,
108 .buf = buf,
109 }
110 };
111
112 out_buf[0] = addr;
113 out_buf[1] = 0;
114
115 if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2)
116 {
117 *ch = buf[0];
118 return true;
119 }
120
121 DRM_DEBUG("i2c transfer returned %d\n", ret);
122 return false;
123}
124
125static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
126 u8 ch)
127{
128 u8 out_buf[2];
129 struct i2c_msg msgs[] = {
130 {
131 .addr = intel_output->i2c_bus->slave_addr,
132 .flags = 0,
133 .len = 2,
134 .buf = out_buf,
135 }
136 };
137
138 out_buf[0] = addr;
139 out_buf[1] = ch;
140
141 if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1)
142 {
143 return true;
144 }
145 return false;
146}
147
148#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
149/** Mapping of command numbers to names, for debug output */
150const static struct _sdvo_cmd_name {
151 u8 cmd;
152 char *name;
153} sdvo_cmd_names[] = {
154 SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
155 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
156 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
157 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
158 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
159 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
160 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
161 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
162 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
163 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
164 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
165 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
166 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
167 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
168 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
169 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
170 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
171 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
172 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
173 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
174 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
175 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
176 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
177 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
178 SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
179 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
180 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
181 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
182 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
183 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
184 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
185 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
186 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
187 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
188 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
189 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
190 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
191};
192
193#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
194#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
195
196#ifdef SDVO_DEBUG
197static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
198 void *args, int args_len)
199{
200 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
201 int i;
202
203 DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
204 for (i = 0; i < args_len; i++)
205 printk("%02X ", ((u8 *)args)[i]);
206 for (; i < 8; i++)
207 printk(" ");
208 for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
209 if (cmd == sdvo_cmd_names[i].cmd) {
210 printk("(%s)", sdvo_cmd_names[i].name);
211 break;
212 }
213 }
214 if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
215 printk("(%02X)",cmd);
216 printk("\n");
217}
218#else
219#define intel_sdvo_debug_write(o, c, a, l)
220#endif
221
222static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
223 void *args, int args_len)
224{
225 int i;
226
227 intel_sdvo_debug_write(intel_output, cmd, args, args_len);
228
229 for (i = 0; i < args_len; i++) {
230 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i,
231 ((u8*)args)[i]);
232 }
233
234 intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
235}
236
237#ifdef SDVO_DEBUG
238static const char *cmd_status_names[] = {
239 "Power on",
240 "Success",
241 "Not supported",
242 "Invalid arg",
243 "Pending",
244 "Target not specified",
245 "Scaling not supported"
246};
247
248static void intel_sdvo_debug_response(struct intel_output *intel_output,
249 void *response, int response_len,
250 u8 status)
251{
252 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
253
254 DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
255 for (i = 0; i < response_len; i++)
256 printk("%02X ", ((u8 *)response)[i]);
257 for (; i < 8; i++)
258 printk(" ");
259 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
260 printk("(%s)", cmd_status_names[status]);
261 else
262 printk("(??? %d)", status);
263 printk("\n");
264}
265#else
266#define intel_sdvo_debug_response(o, r, l, s)
267#endif
268
269static u8 intel_sdvo_read_response(struct intel_output *intel_output,
270 void *response, int response_len)
271{
272 int i;
273 u8 status;
274 u8 retry = 50;
275
276 while (retry--) {
277 /* Read the command response */
278 for (i = 0; i < response_len; i++) {
279 intel_sdvo_read_byte(intel_output,
280 SDVO_I2C_RETURN_0 + i,
281 &((u8 *)response)[i]);
282 }
283
284 /* read the return status */
285 intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS,
286 &status);
287
288 intel_sdvo_debug_response(intel_output, response, response_len,
289 status);
290 if (status != SDVO_CMD_STATUS_PENDING)
291 return status;
292
293 mdelay(50);
294 }
295
296 return status;
297}
298
299static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
300{
301 if (mode->clock >= 100000)
302 return 1;
303 else if (mode->clock >= 50000)
304 return 2;
305 else
306 return 4;
307}
308
309/**
310 * Don't check status code from this as it switches the bus back to the
311 * SDVO chips which defeats the purpose of doing a bus switch in the first
312 * place.
313 */
314static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
315 u8 target)
316{
317 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
318}
319
320static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
321{
322 struct intel_sdvo_set_target_input_args targets = {0};
323 u8 status;
324
325 if (target_0 && target_1)
326 return SDVO_CMD_STATUS_NOTSUPP;
327
328 if (target_1)
329 targets.target_1 = 1;
330
331 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets,
332 sizeof(targets));
333
334 status = intel_sdvo_read_response(intel_output, NULL, 0);
335
336 return (status == SDVO_CMD_STATUS_SUCCESS);
337}
338
339/**
340 * Return whether each input is trained.
341 *
342 * This function is making an assumption about the layout of the response,
343 * which should be checked against the docs.
344 */
345static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2)
346{
347 struct intel_sdvo_get_trained_inputs_response response;
348 u8 status;
349
350 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
351 status = intel_sdvo_read_response(intel_output, &response, sizeof(response));
352 if (status != SDVO_CMD_STATUS_SUCCESS)
353 return false;
354
355 *input_1 = response.input0_trained;
356 *input_2 = response.input1_trained;
357 return true;
358}
359
360static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output,
361 u16 *outputs)
362{
363 u8 status;
364
365 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
366 status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs));
367
368 return (status == SDVO_CMD_STATUS_SUCCESS);
369}
370
371static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output,
372 u16 outputs)
373{
374 u8 status;
375
376 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
377 sizeof(outputs));
378 status = intel_sdvo_read_response(intel_output, NULL, 0);
379 return (status == SDVO_CMD_STATUS_SUCCESS);
380}
381
382static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output,
383 int mode)
384{
385 u8 status, state = SDVO_ENCODER_STATE_ON;
386
387 switch (mode) {
388 case DRM_MODE_DPMS_ON:
389 state = SDVO_ENCODER_STATE_ON;
390 break;
391 case DRM_MODE_DPMS_STANDBY:
392 state = SDVO_ENCODER_STATE_STANDBY;
393 break;
394 case DRM_MODE_DPMS_SUSPEND:
395 state = SDVO_ENCODER_STATE_SUSPEND;
396 break;
397 case DRM_MODE_DPMS_OFF:
398 state = SDVO_ENCODER_STATE_OFF;
399 break;
400 }
401
402 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
403 sizeof(state));
404 status = intel_sdvo_read_response(intel_output, NULL, 0);
405
406 return (status == SDVO_CMD_STATUS_SUCCESS);
407}
408
409static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output,
410 int *clock_min,
411 int *clock_max)
412{
413 struct intel_sdvo_pixel_clock_range clocks;
414 u8 status;
415
416 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
417 NULL, 0);
418
419 status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks));
420
421 if (status != SDVO_CMD_STATUS_SUCCESS)
422 return false;
423
424 /* Convert the values from units of 10 kHz to kHz. */
425 *clock_min = clocks.min * 10;
426 *clock_max = clocks.max * 10;
427
428 return true;
429}
430
431static bool intel_sdvo_set_target_output(struct intel_output *intel_output,
432 u16 outputs)
433{
434 u8 status;
435
436 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
437 sizeof(outputs));
438
439 status = intel_sdvo_read_response(intel_output, NULL, 0);
440 return (status == SDVO_CMD_STATUS_SUCCESS);
441}
442
443static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd,
444 struct intel_sdvo_dtd *dtd)
445{
446 u8 status;
447
448 intel_sdvo_write_cmd(intel_output, cmd, NULL, 0);
449 status = intel_sdvo_read_response(intel_output, &dtd->part1,
450 sizeof(dtd->part1));
451 if (status != SDVO_CMD_STATUS_SUCCESS)
452 return false;
453
454 intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0);
455 status = intel_sdvo_read_response(intel_output, &dtd->part2,
456 sizeof(dtd->part2));
457 if (status != SDVO_CMD_STATUS_SUCCESS)
458 return false;
459
460 return true;
461}
462
463static bool intel_sdvo_get_input_timing(struct intel_output *intel_output,
464 struct intel_sdvo_dtd *dtd)
465{
466 return intel_sdvo_get_timing(intel_output,
467 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
468}
469
470static bool intel_sdvo_get_output_timing(struct intel_output *intel_output,
471 struct intel_sdvo_dtd *dtd)
472{
473 return intel_sdvo_get_timing(intel_output,
474 SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
475}
476
477static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd,
478 struct intel_sdvo_dtd *dtd)
479{
480 u8 status;
481
482 intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1));
483 status = intel_sdvo_read_response(intel_output, NULL, 0);
484 if (status != SDVO_CMD_STATUS_SUCCESS)
485 return false;
486
487 intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2));
488 status = intel_sdvo_read_response(intel_output, NULL, 0);
489 if (status != SDVO_CMD_STATUS_SUCCESS)
490 return false;
491
492 return true;
493}
494
495static bool intel_sdvo_set_input_timing(struct intel_output *intel_output,
496 struct intel_sdvo_dtd *dtd)
497{
498 return intel_sdvo_set_timing(intel_output,
499 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
500}
501
502static bool intel_sdvo_set_output_timing(struct intel_output *intel_output,
503 struct intel_sdvo_dtd *dtd)
504{
505 return intel_sdvo_set_timing(intel_output,
506 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
507}
508
509
510static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
511{
512 u8 response, status;
513
514 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
515 status = intel_sdvo_read_response(intel_output, &response, 1);
516
517 if (status != SDVO_CMD_STATUS_SUCCESS) {
518 DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
519 return SDVO_CLOCK_RATE_MULT_1X;
520 } else {
521 DRM_DEBUG("Current clock rate multiplier: %d\n", response);
522 }
523
524 return response;
525}
526
527static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val)
528{
529 u8 status;
530
531 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
532 status = intel_sdvo_read_response(intel_output, NULL, 0);
533 if (status != SDVO_CMD_STATUS_SUCCESS)
534 return false;
535
536 return true;
537}
538
539static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
540 struct drm_display_mode *mode,
541 struct drm_display_mode *adjusted_mode)
542{
543 /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
544 * device will be told of the multiplier during mode_set.
545 */
546 adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
547 return true;
548}
549
550static void intel_sdvo_mode_set(struct drm_encoder *encoder,
551 struct drm_display_mode *mode,
552 struct drm_display_mode *adjusted_mode)
553{
554 struct drm_device *dev = encoder->dev;
555 struct drm_i915_private *dev_priv = dev->dev_private;
556 struct drm_crtc *crtc = encoder->crtc;
557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
558 struct intel_output *intel_output = enc_to_intel_output(encoder);
559 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
560 u16 width, height;
561 u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
562 u16 h_sync_offset, v_sync_offset;
563 u32 sdvox;
564 struct intel_sdvo_dtd output_dtd;
565 int sdvo_pixel_multiply;
566
567 if (!mode)
568 return;
569
570 width = mode->crtc_hdisplay;
571 height = mode->crtc_vdisplay;
572
573 /* do some mode translations */
574 h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
575 h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
576
577 v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
578 v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
579
580 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
581 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
582
583 output_dtd.part1.clock = mode->clock / 10;
584 output_dtd.part1.h_active = width & 0xff;
585 output_dtd.part1.h_blank = h_blank_len & 0xff;
586 output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
587 ((h_blank_len >> 8) & 0xf);
588 output_dtd.part1.v_active = height & 0xff;
589 output_dtd.part1.v_blank = v_blank_len & 0xff;
590 output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
591 ((v_blank_len >> 8) & 0xf);
592
593 output_dtd.part2.h_sync_off = h_sync_offset;
594 output_dtd.part2.h_sync_width = h_sync_len & 0xff;
595 output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
596 (v_sync_len & 0xf);
597 output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
598 ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
599 ((v_sync_len & 0x30) >> 4);
600
601 output_dtd.part2.dtd_flags = 0x18;
602 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
603 output_dtd.part2.dtd_flags |= 0x2;
604 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
605 output_dtd.part2.dtd_flags |= 0x4;
606
607 output_dtd.part2.sdvo_flags = 0;
608 output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
609 output_dtd.part2.reserved = 0;
610
611 /* Set the output timing to the screen */
612 intel_sdvo_set_target_output(intel_output, sdvo_priv->active_outputs);
613 intel_sdvo_set_output_timing(intel_output, &output_dtd);
614
615 /* Set the input timing to the screen. Assume always input 0. */
616 intel_sdvo_set_target_input(intel_output, true, false);
617
618 /* We would like to use i830_sdvo_create_preferred_input_timing() to
619 * provide the device with a timing it can support, if it supports that
620 * feature. However, presumably we would need to adjust the CRTC to
621 * output the preferred timing, and we don't support that currently.
622 */
623 intel_sdvo_set_input_timing(intel_output, &output_dtd);
624
625 switch (intel_sdvo_get_pixel_multiplier(mode)) {
626 case 1:
627 intel_sdvo_set_clock_rate_mult(intel_output,
628 SDVO_CLOCK_RATE_MULT_1X);
629 break;
630 case 2:
631 intel_sdvo_set_clock_rate_mult(intel_output,
632 SDVO_CLOCK_RATE_MULT_2X);
633 break;
634 case 4:
635 intel_sdvo_set_clock_rate_mult(intel_output,
636 SDVO_CLOCK_RATE_MULT_4X);
637 break;
638 }
639
640 /* Set the SDVO control regs. */
641 if (0/*IS_I965GM(dev)*/) {
642 sdvox = SDVO_BORDER_ENABLE;
643 } else {
644 sdvox = I915_READ(sdvo_priv->output_device);
645 switch (sdvo_priv->output_device) {
646 case SDVOB:
647 sdvox &= SDVOB_PRESERVE_MASK;
648 break;
649 case SDVOC:
650 sdvox &= SDVOC_PRESERVE_MASK;
651 break;
652 }
653 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
654 }
655 if (intel_crtc->pipe == 1)
656 sdvox |= SDVO_PIPE_B_SELECT;
657
658 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
659 if (IS_I965G(dev)) {
660 /* done in crtc_mode_set as the dpll_md reg must be written
661 early */
662 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
663 /* done in crtc_mode_set as it lives inside the
664 dpll register */
665 } else {
666 sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
667 }
668
669 intel_sdvo_write_sdvox(intel_output, sdvox);
670}
671
672static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
673{
674 struct drm_device *dev = encoder->dev;
675 struct drm_i915_private *dev_priv = dev->dev_private;
676 struct intel_output *intel_output = enc_to_intel_output(encoder);
677 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
678 u32 temp;
679
680 if (mode != DRM_MODE_DPMS_ON) {
681 intel_sdvo_set_active_outputs(intel_output, 0);
682 if (0)
683 intel_sdvo_set_encoder_power_state(intel_output, mode);
684
685 if (mode == DRM_MODE_DPMS_OFF) {
686 temp = I915_READ(sdvo_priv->output_device);
687 if ((temp & SDVO_ENABLE) != 0) {
688 intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE);
689 }
690 }
691 } else {
692 bool input1, input2;
693 int i;
694 u8 status;
695
696 temp = I915_READ(sdvo_priv->output_device);
697 if ((temp & SDVO_ENABLE) == 0)
698 intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE);
699 for (i = 0; i < 2; i++)
700 intel_wait_for_vblank(dev);
701
702 status = intel_sdvo_get_trained_inputs(intel_output, &input1,
703 &input2);
704
705
706 /* Warn if the device reported failure to sync.
707 * A lot of SDVO devices fail to notify of sync, but it's
708 * a given it the status is a success, we succeeded.
709 */
710 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
711 DRM_DEBUG("First %s output reported failure to sync\n",
712 SDVO_NAME(sdvo_priv));
713 }
714
715 if (0)
716 intel_sdvo_set_encoder_power_state(intel_output, mode);
717 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->active_outputs);
718 }
719 return;
720}
721
722static void intel_sdvo_save(struct drm_connector *connector)
723{
724 struct drm_device *dev = connector->dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
726 struct intel_output *intel_output = to_intel_output(connector);
727 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
728 int o;
729
730 sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output);
731 intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs);
732
733 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
734 intel_sdvo_set_target_input(intel_output, true, false);
735 intel_sdvo_get_input_timing(intel_output,
736 &sdvo_priv->save_input_dtd_1);
737 }
738
739 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
740 intel_sdvo_set_target_input(intel_output, false, true);
741 intel_sdvo_get_input_timing(intel_output,
742 &sdvo_priv->save_input_dtd_2);
743 }
744
745 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
746 {
747 u16 this_output = (1 << o);
748 if (sdvo_priv->caps.output_flags & this_output)
749 {
750 intel_sdvo_set_target_output(intel_output, this_output);
751 intel_sdvo_get_output_timing(intel_output,
752 &sdvo_priv->save_output_dtd[o]);
753 }
754 }
755
756 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
757}
758
759static void intel_sdvo_restore(struct drm_connector *connector)
760{
761 struct drm_device *dev = connector->dev;
762 struct drm_i915_private *dev_priv = dev->dev_private;
763 struct intel_output *intel_output = to_intel_output(connector);
764 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
765 int o;
766 int i;
767 bool input1, input2;
768 u8 status;
769
770 intel_sdvo_set_active_outputs(intel_output, 0);
771
772 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
773 {
774 u16 this_output = (1 << o);
775 if (sdvo_priv->caps.output_flags & this_output) {
776 intel_sdvo_set_target_output(intel_output, this_output);
777 intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]);
778 }
779 }
780
781 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
782 intel_sdvo_set_target_input(intel_output, true, false);
783 intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1);
784 }
785
786 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
787 intel_sdvo_set_target_input(intel_output, false, true);
788 intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2);
789 }
790
791 intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult);
792
793 I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
794
795 if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
796 {
797 for (i = 0; i < 2; i++)
798 intel_wait_for_vblank(dev);
799 status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2);
800 if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
801 DRM_DEBUG("First %s output reported failure to sync\n",
802 SDVO_NAME(sdvo_priv));
803 }
804
805 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs);
806}
807
808static int intel_sdvo_mode_valid(struct drm_connector *connector,
809 struct drm_display_mode *mode)
810{
811 struct intel_output *intel_output = to_intel_output(connector);
812 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
813
814 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
815 return MODE_NO_DBLESCAN;
816
817 if (sdvo_priv->pixel_clock_min > mode->clock)
818 return MODE_CLOCK_LOW;
819
820 if (sdvo_priv->pixel_clock_max < mode->clock)
821 return MODE_CLOCK_HIGH;
822
823 return MODE_OK;
824}
825
826static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps)
827{
828 u8 status;
829
830 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
831 status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps));
832 if (status != SDVO_CMD_STATUS_SUCCESS)
833 return false;
834
835 return true;
836}
837
838struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
839{
840 struct drm_connector *connector = NULL;
841 struct intel_output *iout = NULL;
842 struct intel_sdvo_priv *sdvo;
843
844 /* find the sdvo connector */
845 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
846 iout = to_intel_output(connector);
847
848 if (iout->type != INTEL_OUTPUT_SDVO)
849 continue;
850
851 sdvo = iout->dev_priv;
852
853 if (sdvo->output_device == SDVOB && sdvoB)
854 return connector;
855
856 if (sdvo->output_device == SDVOC && !sdvoB)
857 return connector;
858
859 }
860
861 return NULL;
862}
863
864int intel_sdvo_supports_hotplug(struct drm_connector *connector)
865{
866 u8 response[2];
867 u8 status;
868 struct intel_output *intel_output;
869 DRM_DEBUG("\n");
870
871 if (!connector)
872 return 0;
873
874 intel_output = to_intel_output(connector);
875
876 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
877 status = intel_sdvo_read_response(intel_output, &response, 2);
878
879 if (response[0] !=0)
880 return 1;
881
882 return 0;
883}
884
885void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
886{
887 u8 response[2];
888 u8 status;
889 struct intel_output *intel_output = to_intel_output(connector);
890
891 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
892 intel_sdvo_read_response(intel_output, &response, 2);
893
894 if (on) {
895 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
896 status = intel_sdvo_read_response(intel_output, &response, 2);
897
898 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
899 } else {
900 response[0] = 0;
901 response[1] = 0;
902 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
903 }
904
905 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
906 intel_sdvo_read_response(intel_output, &response, 2);
907}
908
909static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
910{
911 u8 response[2];
912 u8 status;
913 struct intel_output *intel_output = to_intel_output(connector);
914
915 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
916 status = intel_sdvo_read_response(intel_output, &response, 2);
917
918 DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
919 if ((response[0] != 0) || (response[1] != 0))
920 return connector_status_connected;
921 else
922 return connector_status_disconnected;
923}
924
925static int intel_sdvo_get_modes(struct drm_connector *connector)
926{
927 struct intel_output *intel_output = to_intel_output(connector);
928
929 /* set the bus switch and get the modes */
930 intel_sdvo_set_control_bus_switch(intel_output, SDVO_CONTROL_BUS_DDC2);
931 intel_ddc_get_modes(intel_output);
932
933 if (list_empty(&connector->probed_modes))
934 return 0;
935 return 1;
936}
937
938static void intel_sdvo_destroy(struct drm_connector *connector)
939{
940 struct intel_output *intel_output = to_intel_output(connector);
941
942 if (intel_output->i2c_bus)
943 intel_i2c_destroy(intel_output->i2c_bus);
944 drm_sysfs_connector_remove(connector);
945 drm_connector_cleanup(connector);
946 kfree(intel_output);
947}
948
949static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
950 .dpms = intel_sdvo_dpms,
951 .mode_fixup = intel_sdvo_mode_fixup,
952 .prepare = intel_encoder_prepare,
953 .mode_set = intel_sdvo_mode_set,
954 .commit = intel_encoder_commit,
955};
956
957static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
958 .save = intel_sdvo_save,
959 .restore = intel_sdvo_restore,
960 .detect = intel_sdvo_detect,
961 .fill_modes = drm_helper_probe_single_connector_modes,
962 .destroy = intel_sdvo_destroy,
963};
964
965static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
966 .get_modes = intel_sdvo_get_modes,
967 .mode_valid = intel_sdvo_mode_valid,
968 .best_encoder = intel_best_encoder,
969};
970
971static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
972{
973 drm_encoder_cleanup(encoder);
974}
975
976static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
977 .destroy = intel_sdvo_enc_destroy,
978};
979
980
981void intel_sdvo_init(struct drm_device *dev, int output_device)
982{
983 struct drm_connector *connector;
984 struct intel_output *intel_output;
985 struct intel_sdvo_priv *sdvo_priv;
986 struct intel_i2c_chan *i2cbus = NULL;
987 int connector_type;
988 u8 ch[0x40];
989 int i;
990 int encoder_type, output_id;
991
992 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
993 if (!intel_output) {
994 return;
995 }
996
997 connector = &intel_output->base;
998
999 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
1000 DRM_MODE_CONNECTOR_Unknown);
1001 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
1002 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
1003 intel_output->type = INTEL_OUTPUT_SDVO;
1004
1005 connector->interlace_allowed = 0;
1006 connector->doublescan_allowed = 0;
1007
1008 /* setup the DDC bus. */
1009 if (output_device == SDVOB)
1010 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
1011 else
1012 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
1013
1014 if (!i2cbus)
1015 goto err_connector;
1016
1017 sdvo_priv->i2c_bus = i2cbus;
1018
1019 if (output_device == SDVOB) {
1020 output_id = 1;
1021 sdvo_priv->i2c_bus->slave_addr = 0x38;
1022 } else {
1023 output_id = 2;
1024 sdvo_priv->i2c_bus->slave_addr = 0x39;
1025 }
1026
1027 sdvo_priv->output_device = output_device;
1028 intel_output->i2c_bus = i2cbus;
1029 intel_output->dev_priv = sdvo_priv;
1030
1031
1032 /* Read the regs to test if we can talk to the device */
1033 for (i = 0; i < 0x40; i++) {
1034 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
1035 DRM_DEBUG("No SDVO device found on SDVO%c\n",
1036 output_device == SDVOB ? 'B' : 'C');
1037 goto err_i2c;
1038 }
1039 }
1040
1041 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
1042
1043 memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs));
1044
1045 /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
1046 if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
1047 {
1048 sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
1049 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1050 encoder_type = DRM_MODE_ENCODER_DAC;
1051 connector_type = DRM_MODE_CONNECTOR_VGA;
1052 }
1053 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
1054 {
1055 sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
1056 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1057 encoder_type = DRM_MODE_ENCODER_DAC;
1058 connector_type = DRM_MODE_CONNECTOR_VGA;
1059 }
1060 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
1061 {
1062 sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
1063 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1064 encoder_type = DRM_MODE_ENCODER_TMDS;
1065 connector_type = DRM_MODE_CONNECTOR_DVID;
1066 }
1067 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1)
1068 {
1069 sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
1070 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1071 encoder_type = DRM_MODE_ENCODER_TMDS;
1072 connector_type = DRM_MODE_CONNECTOR_DVID;
1073 }
1074 else
1075 {
1076 unsigned char bytes[2];
1077
1078 memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
1079 DRM_DEBUG("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
1080 SDVO_NAME(sdvo_priv),
1081 bytes[0], bytes[1]);
1082 goto err_i2c;
1083 }
1084
1085 drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type);
1086 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
1087 connector->connector_type = connector_type;
1088
1089 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
1090 drm_sysfs_connector_add(connector);
1091
1092 /* Set the input timing to the screen. Assume always input 0. */
1093 intel_sdvo_set_target_input(intel_output, true, false);
1094
1095 intel_sdvo_get_input_pixel_clock_range(intel_output,
1096 &sdvo_priv->pixel_clock_min,
1097 &sdvo_priv->pixel_clock_max);
1098
1099
1100 DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
1101 "clock range %dMHz - %dMHz, "
1102 "input 1: %c, input 2: %c, "
1103 "output 1: %c, output 2: %c\n",
1104 SDVO_NAME(sdvo_priv),
1105 sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
1106 sdvo_priv->caps.device_rev_id,
1107 sdvo_priv->pixel_clock_min / 1000,
1108 sdvo_priv->pixel_clock_max / 1000,
1109 (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
1110 (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
1111 /* check currently supported outputs */
1112 sdvo_priv->caps.output_flags &
1113 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
1114 sdvo_priv->caps.output_flags &
1115 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
1116
1117 intel_output->ddc_bus = i2cbus;
1118
1119 return;
1120
1121err_i2c:
1122 intel_i2c_destroy(intel_output->i2c_bus);
1123err_connector:
1124 drm_connector_cleanup(connector);
1125 kfree(intel_output);
1126
1127 return;
1128}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
new file mode 100644
index 000000000000..861a43f8693c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -0,0 +1,327 @@
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27/**
28 * @file SDVO command definitions and structures.
29 */
30
31#define SDVO_OUTPUT_FIRST (0)
32#define SDVO_OUTPUT_TMDS0 (1 << 0)
33#define SDVO_OUTPUT_RGB0 (1 << 1)
34#define SDVO_OUTPUT_CVBS0 (1 << 2)
35#define SDVO_OUTPUT_SVID0 (1 << 3)
36#define SDVO_OUTPUT_YPRPB0 (1 << 4)
37#define SDVO_OUTPUT_SCART0 (1 << 5)
38#define SDVO_OUTPUT_LVDS0 (1 << 6)
39#define SDVO_OUTPUT_TMDS1 (1 << 8)
40#define SDVO_OUTPUT_RGB1 (1 << 9)
41#define SDVO_OUTPUT_CVBS1 (1 << 10)
42#define SDVO_OUTPUT_SVID1 (1 << 11)
43#define SDVO_OUTPUT_YPRPB1 (1 << 12)
44#define SDVO_OUTPUT_SCART1 (1 << 13)
45#define SDVO_OUTPUT_LVDS1 (1 << 14)
46#define SDVO_OUTPUT_LAST (14)
47
48struct intel_sdvo_caps {
49 u8 vendor_id;
50 u8 device_id;
51 u8 device_rev_id;
52 u8 sdvo_version_major;
53 u8 sdvo_version_minor;
54 unsigned int sdvo_inputs_mask:2;
55 unsigned int smooth_scaling:1;
56 unsigned int sharp_scaling:1;
57 unsigned int up_scaling:1;
58 unsigned int down_scaling:1;
59 unsigned int stall_support:1;
60 unsigned int pad:1;
61 u16 output_flags;
62} __attribute__((packed));
63
64/** This matches the EDID DTD structure, more or less */
65struct intel_sdvo_dtd {
66 struct {
67 u16 clock; /**< pixel clock, in 10kHz units */
68 u8 h_active; /**< lower 8 bits (pixels) */
69 u8 h_blank; /**< lower 8 bits (pixels) */
70 u8 h_high; /**< upper 4 bits each h_active, h_blank */
71 u8 v_active; /**< lower 8 bits (lines) */
72 u8 v_blank; /**< lower 8 bits (lines) */
73 u8 v_high; /**< upper 4 bits each v_active, v_blank */
74 } part1;
75
76 struct {
77 u8 h_sync_off; /**< lower 8 bits, from hblank start */
78 u8 h_sync_width; /**< lower 8 bits (pixels) */
79 /** lower 4 bits each vsync offset, vsync width */
80 u8 v_sync_off_width;
81 /**
82 * 2 high bits of hsync offset, 2 high bits of hsync width,
83 * bits 4-5 of vsync offset, and 2 high bits of vsync width.
84 */
85 u8 sync_off_width_high;
86 u8 dtd_flags;
87 u8 sdvo_flags;
88 /** bits 6-7 of vsync offset at bits 6-7 */
89 u8 v_sync_off_high;
90 u8 reserved;
91 } part2;
92} __attribute__((packed));
93
94struct intel_sdvo_pixel_clock_range {
95 u16 min; /**< pixel clock, in 10kHz units */
96 u16 max; /**< pixel clock, in 10kHz units */
97} __attribute__((packed));
98
99struct intel_sdvo_preferred_input_timing_args {
100 u16 clock;
101 u16 width;
102 u16 height;
103} __attribute__((packed));
104
105/* I2C registers for SDVO */
106#define SDVO_I2C_ARG_0 0x07
107#define SDVO_I2C_ARG_1 0x06
108#define SDVO_I2C_ARG_2 0x05
109#define SDVO_I2C_ARG_3 0x04
110#define SDVO_I2C_ARG_4 0x03
111#define SDVO_I2C_ARG_5 0x02
112#define SDVO_I2C_ARG_6 0x01
113#define SDVO_I2C_ARG_7 0x00
114#define SDVO_I2C_OPCODE 0x08
115#define SDVO_I2C_CMD_STATUS 0x09
116#define SDVO_I2C_RETURN_0 0x0a
117#define SDVO_I2C_RETURN_1 0x0b
118#define SDVO_I2C_RETURN_2 0x0c
119#define SDVO_I2C_RETURN_3 0x0d
120#define SDVO_I2C_RETURN_4 0x0e
121#define SDVO_I2C_RETURN_5 0x0f
122#define SDVO_I2C_RETURN_6 0x10
123#define SDVO_I2C_RETURN_7 0x11
124#define SDVO_I2C_VENDOR_BEGIN 0x20
125
126/* Status results */
127#define SDVO_CMD_STATUS_POWER_ON 0x0
128#define SDVO_CMD_STATUS_SUCCESS 0x1
129#define SDVO_CMD_STATUS_NOTSUPP 0x2
130#define SDVO_CMD_STATUS_INVALID_ARG 0x3
131#define SDVO_CMD_STATUS_PENDING 0x4
132#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
133#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
134
135/* SDVO commands, argument/result registers */
136
137#define SDVO_CMD_RESET 0x01
138
139/** Returns a struct intel_sdvo_caps */
140#define SDVO_CMD_GET_DEVICE_CAPS 0x02
141
142#define SDVO_CMD_GET_FIRMWARE_REV 0x86
143# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
144# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
145# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
146
147/**
148 * Reports which inputs are trained (managed to sync).
149 *
150 * Devices must have trained within 2 vsyncs of a mode change.
151 */
152#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
153struct intel_sdvo_get_trained_inputs_response {
154 unsigned int input0_trained:1;
155 unsigned int input1_trained:1;
156 unsigned int pad:6;
157} __attribute__((packed));
158
159/** Returns a struct intel_sdvo_output_flags of active outputs. */
160#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
161
162/**
163 * Sets the current set of active outputs.
164 *
165 * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
166 * on multi-output devices.
167 */
168#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
169
170/**
171 * Returns the current mapping of SDVO inputs to outputs on the device.
172 *
173 * Returns two struct intel_sdvo_output_flags structures.
174 */
175#define SDVO_CMD_GET_IN_OUT_MAP 0x06
176
177/**
178 * Sets the current mapping of SDVO inputs to outputs on the device.
179 *
180 * Takes two struct i380_sdvo_output_flags structures.
181 */
182#define SDVO_CMD_SET_IN_OUT_MAP 0x07
183
184/**
185 * Returns a struct intel_sdvo_output_flags of attached displays.
186 */
187#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
188
189/**
190 * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
191 */
192#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
193
194/**
195 * Takes a struct intel_sdvo_output_flags.
196 */
197#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
198
199/**
200 * Returns a struct intel_sdvo_output_flags of displays with hot plug
201 * interrupts enabled.
202 */
203#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
204
205#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
206struct intel_sdvo_get_interrupt_event_source_response {
207 u16 interrupt_status;
208 unsigned int ambient_light_interrupt:1;
209 unsigned int pad:7;
210} __attribute__((packed));
211
212/**
213 * Selects which input is affected by future input commands.
214 *
215 * Commands affected include SET_INPUT_TIMINGS_PART[12],
216 * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
217 * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
218 */
219#define SDVO_CMD_SET_TARGET_INPUT 0x10
220struct intel_sdvo_set_target_input_args {
221 unsigned int target_1:1;
222 unsigned int pad:7;
223} __attribute__((packed));
224
225/**
226 * Takes a struct intel_sdvo_output_flags of which outputs are targetted by
227 * future output commands.
228 *
229 * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
230 * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
231 */
232#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
233
234#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
235#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
236#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
237#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
238#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
239#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
240#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
241#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
242/* Part 1 */
243# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
244# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
245# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
246# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
247# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
248# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
249# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
250# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
251/* Part 2 */
252# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
253# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
254# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
255# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
256# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
257# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
258# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
259# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
260# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
261# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
262# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
263# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
264# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
265# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
266# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
267# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
268# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
269# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
270
271/**
272 * Generates a DTD based on the given width, height, and flags.
273 *
274 * This will be supported by any device supporting scaling or interlaced
275 * modes.
276 */
277#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
278# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
279# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
280# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
281# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
282# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
283# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
284# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
285# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
286# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
287
288#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
289#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
290
291/** Returns a struct intel_sdvo_pixel_clock_range */
292#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
293/** Returns a struct intel_sdvo_pixel_clock_range */
294#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
295
296/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
297#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
298
299/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
300#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
301/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
302#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
303# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
304# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
305# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
306
307#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
308
309#define SDVO_CMD_GET_TV_FORMAT 0x28
310
311#define SDVO_CMD_SET_TV_FORMAT 0x29
312
313#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
314#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
315#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
316# define SDVO_ENCODER_STATE_ON (1 << 0)
317# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
318# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
319# define SDVO_ENCODER_STATE_OFF (1 << 3)
320
321#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
322
323#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
324# define SDVO_CONTROL_BUS_PROM 0x0
325# define SDVO_CONTROL_BUS_DDC1 0x1
326# define SDVO_CONTROL_BUS_DDC2 0x2
327# define SDVO_CONTROL_BUS_DDC3 0x3
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
new file mode 100644
index 000000000000..fbb35dc56f5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -0,0 +1,1725 @@
1/*
2 * Copyright © 2006-2008 Intel Corporation
3 * Jesse Barnes <jesse.barnes@intel.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Eric Anholt <eric@anholt.net>
26 *
27 */
28
29/** @file
30 * Integrated TV-out support for the 915GM and 945GM.
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "drm_crtc.h"
36#include "drm_edid.h"
37#include "intel_drv.h"
38#include "i915_drm.h"
39#include "i915_drv.h"
40
41enum tv_margin {
42 TV_MARGIN_LEFT, TV_MARGIN_TOP,
43 TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
44};
45
46/** Private structure for the integrated TV support */
47struct intel_tv_priv {
48 int type;
49 char *tv_format;
50 int margin[4];
51 u32 save_TV_H_CTL_1;
52 u32 save_TV_H_CTL_2;
53 u32 save_TV_H_CTL_3;
54 u32 save_TV_V_CTL_1;
55 u32 save_TV_V_CTL_2;
56 u32 save_TV_V_CTL_3;
57 u32 save_TV_V_CTL_4;
58 u32 save_TV_V_CTL_5;
59 u32 save_TV_V_CTL_6;
60 u32 save_TV_V_CTL_7;
61 u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3;
62
63 u32 save_TV_CSC_Y;
64 u32 save_TV_CSC_Y2;
65 u32 save_TV_CSC_U;
66 u32 save_TV_CSC_U2;
67 u32 save_TV_CSC_V;
68 u32 save_TV_CSC_V2;
69 u32 save_TV_CLR_KNOBS;
70 u32 save_TV_CLR_LEVEL;
71 u32 save_TV_WIN_POS;
72 u32 save_TV_WIN_SIZE;
73 u32 save_TV_FILTER_CTL_1;
74 u32 save_TV_FILTER_CTL_2;
75 u32 save_TV_FILTER_CTL_3;
76
77 u32 save_TV_H_LUMA[60];
78 u32 save_TV_H_CHROMA[60];
79 u32 save_TV_V_LUMA[43];
80 u32 save_TV_V_CHROMA[43];
81
82 u32 save_TV_DAC;
83 u32 save_TV_CTL;
84};
85
86struct video_levels {
87 int blank, black, burst;
88};
89
90struct color_conversion {
91 u16 ry, gy, by, ay;
92 u16 ru, gu, bu, au;
93 u16 rv, gv, bv, av;
94};
95
96static const u32 filter_table[] = {
97 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
98 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
99 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
100 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
101 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
102 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
103 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
104 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
105 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
106 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
107 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
108 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
109 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
110 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
111 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
112 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
113 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
114 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
115 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
116 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
117 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
118 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
119 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
120 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
121 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
122 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
123 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
124 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
125 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
126 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
127 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0,
128 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
129 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
130 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
131 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
132 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
133 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
134 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
135 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
136 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
137 0x28003100, 0x28002F00, 0x00003100, 0x36403000,
138 0x2D002CC0, 0x30003640, 0x2D0036C0,
139 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
140 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
141 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
142 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
143 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
144 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
145 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
146 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
147 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
148 0x28003100, 0x28002F00, 0x00003100,
149};
150
151/*
152 * Color conversion values have 3 separate fixed point formats:
153 *
154 * 10 bit fields (ay, au)
155 * 1.9 fixed point (b.bbbbbbbbb)
156 * 11 bit fields (ry, by, ru, gu, gv)
157 * exp.mantissa (ee.mmmmmmmmm)
158 * ee = 00 = 10^-1 (0.mmmmmmmmm)
159 * ee = 01 = 10^-2 (0.0mmmmmmmmm)
160 * ee = 10 = 10^-3 (0.00mmmmmmmmm)
161 * ee = 11 = 10^-4 (0.000mmmmmmmmm)
162 * 12 bit fields (gy, rv, bu)
163 * exp.mantissa (eee.mmmmmmmmm)
164 * eee = 000 = 10^-1 (0.mmmmmmmmm)
165 * eee = 001 = 10^-2 (0.0mmmmmmmmm)
166 * eee = 010 = 10^-3 (0.00mmmmmmmmm)
167 * eee = 011 = 10^-4 (0.000mmmmmmmmm)
168 * eee = 100 = reserved
169 * eee = 101 = reserved
170 * eee = 110 = reserved
171 * eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation)
172 *
173 * Saturation and contrast are 8 bits, with their own representation:
174 * 8 bit field (saturation, contrast)
175 * exp.mantissa (ee.mmmmmm)
176 * ee = 00 = 10^-1 (0.mmmmmm)
177 * ee = 01 = 10^0 (m.mmmmm)
178 * ee = 10 = 10^1 (mm.mmmm)
179 * ee = 11 = 10^2 (mmm.mmm)
180 *
181 * Simple conversion function:
182 *
183 * static u32
184 * float_to_csc_11(float f)
185 * {
186 * u32 exp;
187 * u32 mant;
188 * u32 ret;
189 *
190 * if (f < 0)
191 * f = -f;
192 *
193 * if (f >= 1) {
194 * exp = 0x7;
195 * mant = 1 << 8;
196 * } else {
197 * for (exp = 0; exp < 3 && f < 0.5; exp++)
198 * f *= 2.0;
199 * mant = (f * (1 << 9) + 0.5);
200 * if (mant >= (1 << 9))
201 * mant = (1 << 9) - 1;
202 * }
203 * ret = (exp << 9) | mant;
204 * return ret;
205 * }
206 */
207
208/*
209 * Behold, magic numbers! If we plant them they might grow a big
210 * s-video cable to the sky... or something.
211 *
212 * Pre-converted to appropriate hex value.
213 */
214
215/*
216 * PAL & NTSC values for composite & s-video connections
217 */
218static const struct color_conversion ntsc_m_csc_composite = {
219 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
220 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
221 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
222};
223
224static const struct video_levels ntsc_m_levels_composite = {
225 .blank = 225, .black = 267, .burst = 113,
226};
227
228static const struct color_conversion ntsc_m_csc_svideo = {
229 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
230 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
231 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
232};
233
234static const struct video_levels ntsc_m_levels_svideo = {
235 .blank = 266, .black = 316, .burst = 133,
236};
237
238static const struct color_conversion ntsc_j_csc_composite = {
239 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
240 .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00,
241 .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00,
242};
243
244static const struct video_levels ntsc_j_levels_composite = {
245 .blank = 225, .black = 225, .burst = 113,
246};
247
248static const struct color_conversion ntsc_j_csc_svideo = {
249 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
250 .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00,
251 .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00,
252};
253
254static const struct video_levels ntsc_j_levels_svideo = {
255 .blank = 266, .black = 266, .burst = 133,
256};
257
258static const struct color_conversion pal_csc_composite = {
259 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
260 .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00,
261 .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00,
262};
263
264static const struct video_levels pal_levels_composite = {
265 .blank = 237, .black = 237, .burst = 118,
266};
267
268static const struct color_conversion pal_csc_svideo = {
269 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
270 .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00,
271 .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00,
272};
273
274static const struct video_levels pal_levels_svideo = {
275 .blank = 280, .black = 280, .burst = 139,
276};
277
278static const struct color_conversion pal_m_csc_composite = {
279 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
280 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
281 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
282};
283
284static const struct video_levels pal_m_levels_composite = {
285 .blank = 225, .black = 267, .burst = 113,
286};
287
288static const struct color_conversion pal_m_csc_svideo = {
289 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
290 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
291 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
292};
293
294static const struct video_levels pal_m_levels_svideo = {
295 .blank = 266, .black = 316, .burst = 133,
296};
297
298static const struct color_conversion pal_n_csc_composite = {
299 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
300 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
301 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
302};
303
304static const struct video_levels pal_n_levels_composite = {
305 .blank = 225, .black = 267, .burst = 118,
306};
307
308static const struct color_conversion pal_n_csc_svideo = {
309 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
310 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
311 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
312};
313
314static const struct video_levels pal_n_levels_svideo = {
315 .blank = 266, .black = 316, .burst = 139,
316};
317
318/*
319 * Component connections
320 */
321static const struct color_conversion sdtv_csc_yprpb = {
322 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146,
323 .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00,
324 .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00,
325};
326
327static const struct color_conversion sdtv_csc_rgb = {
328 .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
329 .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
330 .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
331};
332
333static const struct color_conversion hdtv_csc_yprpb = {
334 .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146,
335 .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00,
336 .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00,
337};
338
339static const struct color_conversion hdtv_csc_rgb = {
340 .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
341 .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
342 .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
343};
344
345static const struct video_levels component_levels = {
346 .blank = 279, .black = 279, .burst = 0,
347};
348
349
350struct tv_mode {
351 char *name;
352 int clock;
353 int refresh; /* in millihertz (for precision) */
354 u32 oversample;
355 int hsync_end, hblank_start, hblank_end, htotal;
356 bool progressive, trilevel_sync, component_only;
357 int vsync_start_f1, vsync_start_f2, vsync_len;
358 bool veq_ena;
359 int veq_start_f1, veq_start_f2, veq_len;
360 int vi_end_f1, vi_end_f2, nbr_end;
361 bool burst_ena;
362 int hburst_start, hburst_len;
363 int vburst_start_f1, vburst_end_f1;
364 int vburst_start_f2, vburst_end_f2;
365 int vburst_start_f3, vburst_end_f3;
366 int vburst_start_f4, vburst_end_f4;
367 /*
368 * subcarrier programming
369 */
370 int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
371 u32 sc_reset;
372 bool pal_burst;
373 /*
374 * blank/black levels
375 */
376 const struct video_levels *composite_levels, *svideo_levels;
377 const struct color_conversion *composite_color, *svideo_color;
378 const u32 *filter_table;
379 int max_srcw;
380};
381
382
383/*
384 * Sub carrier DDA
385 *
386 * I think this works as follows:
387 *
388 * subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096
389 *
390 * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value
391 *
392 * So,
393 * dda1_ideal = subcarrier/pixel * 4096
394 * dda1_inc = floor (dda1_ideal)
395 * dda2 = dda1_ideal - dda1_inc
396 *
397 * then pick a ratio for dda2 that gives the closest approximation. If
398 * you can't get close enough, you can play with dda3 as well. This
399 * seems likely to happen when dda2 is small as the jumps would be larger
400 *
401 * To invert this,
402 *
403 * pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size)
404 *
405 * The constants below were all computed using a 107.520MHz clock
406 */
407
408/**
409 * Register programming values for TV modes.
410 *
411 * These values account for -1s required.
412 */
413
414const static struct tv_mode tv_modes[] = {
415 {
416 .name = "NTSC-M",
417 .clock = 107520,
418 .refresh = 29970,
419 .oversample = TV_OVERSAMPLE_8X,
420 .component_only = 0,
421 /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
422
423 .hsync_end = 64, .hblank_end = 124,
424 .hblank_start = 836, .htotal = 857,
425
426 .progressive = false, .trilevel_sync = false,
427
428 .vsync_start_f1 = 6, .vsync_start_f2 = 7,
429 .vsync_len = 6,
430
431 .veq_ena = true, .veq_start_f1 = 0,
432 .veq_start_f2 = 1, .veq_len = 18,
433
434 .vi_end_f1 = 20, .vi_end_f2 = 21,
435 .nbr_end = 240,
436
437 .burst_ena = true,
438 .hburst_start = 72, .hburst_len = 34,
439 .vburst_start_f1 = 9, .vburst_end_f1 = 240,
440 .vburst_start_f2 = 10, .vburst_end_f2 = 240,
441 .vburst_start_f3 = 9, .vburst_end_f3 = 240,
442 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
443
444 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
445 .dda1_inc = 136,
446 .dda2_inc = 7624, .dda2_size = 20013,
447 .dda3_inc = 0, .dda3_size = 0,
448 .sc_reset = TV_SC_RESET_EVERY_4,
449 .pal_burst = false,
450
451 .composite_levels = &ntsc_m_levels_composite,
452 .composite_color = &ntsc_m_csc_composite,
453 .svideo_levels = &ntsc_m_levels_svideo,
454 .svideo_color = &ntsc_m_csc_svideo,
455
456 .filter_table = filter_table,
457 },
458 {
459 .name = "NTSC-443",
460 .clock = 107520,
461 .refresh = 29970,
462 .oversample = TV_OVERSAMPLE_8X,
463 .component_only = 0,
464 /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
465 .hsync_end = 64, .hblank_end = 124,
466 .hblank_start = 836, .htotal = 857,
467
468 .progressive = false, .trilevel_sync = false,
469
470 .vsync_start_f1 = 6, .vsync_start_f2 = 7,
471 .vsync_len = 6,
472
473 .veq_ena = true, .veq_start_f1 = 0,
474 .veq_start_f2 = 1, .veq_len = 18,
475
476 .vi_end_f1 = 20, .vi_end_f2 = 21,
477 .nbr_end = 240,
478
479 .burst_ena = 8,
480 .hburst_start = 72, .hburst_len = 34,
481 .vburst_start_f1 = 9, .vburst_end_f1 = 240,
482 .vburst_start_f2 = 10, .vburst_end_f2 = 240,
483 .vburst_start_f3 = 9, .vburst_end_f3 = 240,
484 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
485
486 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
487 .dda1_inc = 168,
488 .dda2_inc = 18557, .dda2_size = 20625,
489 .dda3_inc = 0, .dda3_size = 0,
490 .sc_reset = TV_SC_RESET_EVERY_8,
491 .pal_burst = true,
492
493 .composite_levels = &ntsc_m_levels_composite,
494 .composite_color = &ntsc_m_csc_composite,
495 .svideo_levels = &ntsc_m_levels_svideo,
496 .svideo_color = &ntsc_m_csc_svideo,
497
498 .filter_table = filter_table,
499 },
500 {
501 .name = "NTSC-J",
502 .clock = 107520,
503 .refresh = 29970,
504 .oversample = TV_OVERSAMPLE_8X,
505 .component_only = 0,
506
507 /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
508 .hsync_end = 64, .hblank_end = 124,
509 .hblank_start = 836, .htotal = 857,
510
511 .progressive = false, .trilevel_sync = false,
512
513 .vsync_start_f1 = 6, .vsync_start_f2 = 7,
514 .vsync_len = 6,
515
516 .veq_ena = true, .veq_start_f1 = 0,
517 .veq_start_f2 = 1, .veq_len = 18,
518
519 .vi_end_f1 = 20, .vi_end_f2 = 21,
520 .nbr_end = 240,
521
522 .burst_ena = true,
523 .hburst_start = 72, .hburst_len = 34,
524 .vburst_start_f1 = 9, .vburst_end_f1 = 240,
525 .vburst_start_f2 = 10, .vburst_end_f2 = 240,
526 .vburst_start_f3 = 9, .vburst_end_f3 = 240,
527 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
528
529 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
530 .dda1_inc = 136,
531 .dda2_inc = 7624, .dda2_size = 20013,
532 .dda3_inc = 0, .dda3_size = 0,
533 .sc_reset = TV_SC_RESET_EVERY_4,
534 .pal_burst = false,
535
536 .composite_levels = &ntsc_j_levels_composite,
537 .composite_color = &ntsc_j_csc_composite,
538 .svideo_levels = &ntsc_j_levels_svideo,
539 .svideo_color = &ntsc_j_csc_svideo,
540
541 .filter_table = filter_table,
542 },
543 {
544 .name = "PAL-M",
545 .clock = 107520,
546 .refresh = 29970,
547 .oversample = TV_OVERSAMPLE_8X,
548 .component_only = 0,
549
550 /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
551 .hsync_end = 64, .hblank_end = 124,
552 .hblank_start = 836, .htotal = 857,
553
554 .progressive = false, .trilevel_sync = false,
555
556 .vsync_start_f1 = 6, .vsync_start_f2 = 7,
557 .vsync_len = 6,
558
559 .veq_ena = true, .veq_start_f1 = 0,
560 .veq_start_f2 = 1, .veq_len = 18,
561
562 .vi_end_f1 = 20, .vi_end_f2 = 21,
563 .nbr_end = 240,
564
565 .burst_ena = true,
566 .hburst_start = 72, .hburst_len = 34,
567 .vburst_start_f1 = 9, .vburst_end_f1 = 240,
568 .vburst_start_f2 = 10, .vburst_end_f2 = 240,
569 .vburst_start_f3 = 9, .vburst_end_f3 = 240,
570 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
571
572 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
573 .dda1_inc = 136,
574 .dda2_inc = 7624, .dda2_size = 20013,
575 .dda3_inc = 0, .dda3_size = 0,
576 .sc_reset = TV_SC_RESET_EVERY_4,
577 .pal_burst = false,
578
579 .composite_levels = &pal_m_levels_composite,
580 .composite_color = &pal_m_csc_composite,
581 .svideo_levels = &pal_m_levels_svideo,
582 .svideo_color = &pal_m_csc_svideo,
583
584 .filter_table = filter_table,
585 },
586 {
587 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
588 .name = "PAL-N",
589 .clock = 107520,
590 .refresh = 25000,
591 .oversample = TV_OVERSAMPLE_8X,
592 .component_only = 0,
593
594 .hsync_end = 64, .hblank_end = 128,
595 .hblank_start = 844, .htotal = 863,
596
597 .progressive = false, .trilevel_sync = false,
598
599
600 .vsync_start_f1 = 6, .vsync_start_f2 = 7,
601 .vsync_len = 6,
602
603 .veq_ena = true, .veq_start_f1 = 0,
604 .veq_start_f2 = 1, .veq_len = 18,
605
606 .vi_end_f1 = 24, .vi_end_f2 = 25,
607 .nbr_end = 286,
608
609 .burst_ena = true,
610 .hburst_start = 73, .hburst_len = 34,
611 .vburst_start_f1 = 8, .vburst_end_f1 = 285,
612 .vburst_start_f2 = 8, .vburst_end_f2 = 286,
613 .vburst_start_f3 = 9, .vburst_end_f3 = 286,
614 .vburst_start_f4 = 9, .vburst_end_f4 = 285,
615
616
617 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
618 .dda1_inc = 168,
619 .dda2_inc = 18557, .dda2_size = 20625,
620 .dda3_inc = 0, .dda3_size = 0,
621 .sc_reset = TV_SC_RESET_EVERY_8,
622 .pal_burst = true,
623
624 .composite_levels = &pal_n_levels_composite,
625 .composite_color = &pal_n_csc_composite,
626 .svideo_levels = &pal_n_levels_svideo,
627 .svideo_color = &pal_n_csc_svideo,
628
629 .filter_table = filter_table,
630 },
631 {
632 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
633 .name = "PAL",
634 .clock = 107520,
635 .refresh = 25000,
636 .oversample = TV_OVERSAMPLE_8X,
637 .component_only = 0,
638
639 .hsync_end = 64, .hblank_end = 128,
640 .hblank_start = 844, .htotal = 863,
641
642 .progressive = false, .trilevel_sync = false,
643
644 .vsync_start_f1 = 5, .vsync_start_f2 = 6,
645 .vsync_len = 5,
646
647 .veq_ena = true, .veq_start_f1 = 0,
648 .veq_start_f2 = 1, .veq_len = 15,
649
650 .vi_end_f1 = 24, .vi_end_f2 = 25,
651 .nbr_end = 286,
652
653 .burst_ena = true,
654 .hburst_start = 73, .hburst_len = 32,
655 .vburst_start_f1 = 8, .vburst_end_f1 = 285,
656 .vburst_start_f2 = 8, .vburst_end_f2 = 286,
657 .vburst_start_f3 = 9, .vburst_end_f3 = 286,
658 .vburst_start_f4 = 9, .vburst_end_f4 = 285,
659
660 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
661 .dda1_inc = 168,
662 .dda2_inc = 18557, .dda2_size = 20625,
663 .dda3_inc = 0, .dda3_size = 0,
664 .sc_reset = TV_SC_RESET_EVERY_8,
665 .pal_burst = true,
666
667 .composite_levels = &pal_levels_composite,
668 .composite_color = &pal_csc_composite,
669 .svideo_levels = &pal_levels_svideo,
670 .svideo_color = &pal_csc_svideo,
671
672 .filter_table = filter_table,
673 },
674 {
675 .name = "480p@59.94Hz",
676 .clock = 107520,
677 .refresh = 59940,
678 .oversample = TV_OVERSAMPLE_4X,
679 .component_only = 1,
680
681 .hsync_end = 64, .hblank_end = 122,
682 .hblank_start = 842, .htotal = 857,
683
684 .progressive = true,.trilevel_sync = false,
685
686 .vsync_start_f1 = 12, .vsync_start_f2 = 12,
687 .vsync_len = 12,
688
689 .veq_ena = false,
690
691 .vi_end_f1 = 44, .vi_end_f2 = 44,
692 .nbr_end = 496,
693
694 .burst_ena = false,
695
696 .filter_table = filter_table,
697 },
698 {
699 .name = "480p@60Hz",
700 .clock = 107520,
701 .refresh = 60000,
702 .oversample = TV_OVERSAMPLE_4X,
703 .component_only = 1,
704
705 .hsync_end = 64, .hblank_end = 122,
706 .hblank_start = 842, .htotal = 856,
707
708 .progressive = true,.trilevel_sync = false,
709
710 .vsync_start_f1 = 12, .vsync_start_f2 = 12,
711 .vsync_len = 12,
712
713 .veq_ena = false,
714
715 .vi_end_f1 = 44, .vi_end_f2 = 44,
716 .nbr_end = 496,
717
718 .burst_ena = false,
719
720 .filter_table = filter_table,
721 },
722 {
723 .name = "576p",
724 .clock = 107520,
725 .refresh = 50000,
726 .oversample = TV_OVERSAMPLE_4X,
727 .component_only = 1,
728
729 .hsync_end = 64, .hblank_end = 139,
730 .hblank_start = 859, .htotal = 863,
731
732 .progressive = true, .trilevel_sync = false,
733
734 .vsync_start_f1 = 10, .vsync_start_f2 = 10,
735 .vsync_len = 10,
736
737 .veq_ena = false,
738
739 .vi_end_f1 = 48, .vi_end_f2 = 48,
740 .nbr_end = 575,
741
742 .burst_ena = false,
743
744 .filter_table = filter_table,
745 },
746 {
747 .name = "720p@60Hz",
748 .clock = 148800,
749 .refresh = 60000,
750 .oversample = TV_OVERSAMPLE_2X,
751 .component_only = 1,
752
753 .hsync_end = 80, .hblank_end = 300,
754 .hblank_start = 1580, .htotal = 1649,
755
756 .progressive = true, .trilevel_sync = true,
757
758 .vsync_start_f1 = 10, .vsync_start_f2 = 10,
759 .vsync_len = 10,
760
761 .veq_ena = false,
762
763 .vi_end_f1 = 29, .vi_end_f2 = 29,
764 .nbr_end = 719,
765
766 .burst_ena = false,
767
768 .filter_table = filter_table,
769 },
770 {
771 .name = "720p@59.94Hz",
772 .clock = 148800,
773 .refresh = 59940,
774 .oversample = TV_OVERSAMPLE_2X,
775 .component_only = 1,
776
777 .hsync_end = 80, .hblank_end = 300,
778 .hblank_start = 1580, .htotal = 1651,
779
780 .progressive = true, .trilevel_sync = true,
781
782 .vsync_start_f1 = 10, .vsync_start_f2 = 10,
783 .vsync_len = 10,
784
785 .veq_ena = false,
786
787 .vi_end_f1 = 29, .vi_end_f2 = 29,
788 .nbr_end = 719,
789
790 .burst_ena = false,
791
792 .filter_table = filter_table,
793 },
794 {
795 .name = "720p@50Hz",
796 .clock = 148800,
797 .refresh = 50000,
798 .oversample = TV_OVERSAMPLE_2X,
799 .component_only = 1,
800
801 .hsync_end = 80, .hblank_end = 300,
802 .hblank_start = 1580, .htotal = 1979,
803
804 .progressive = true, .trilevel_sync = true,
805
806 .vsync_start_f1 = 10, .vsync_start_f2 = 10,
807 .vsync_len = 10,
808
809 .veq_ena = false,
810
811 .vi_end_f1 = 29, .vi_end_f2 = 29,
812 .nbr_end = 719,
813
814 .burst_ena = false,
815
816 .filter_table = filter_table,
817 .max_srcw = 800
818 },
819 {
820 .name = "1080i@50Hz",
821 .clock = 148800,
822 .refresh = 25000,
823 .oversample = TV_OVERSAMPLE_2X,
824 .component_only = 1,
825
826 .hsync_end = 88, .hblank_end = 235,
827 .hblank_start = 2155, .htotal = 2639,
828
829 .progressive = false, .trilevel_sync = true,
830
831 .vsync_start_f1 = 4, .vsync_start_f2 = 5,
832 .vsync_len = 10,
833
834 .veq_ena = true, .veq_start_f1 = 4,
835 .veq_start_f2 = 4, .veq_len = 10,
836
837
838 .vi_end_f1 = 21, .vi_end_f2 = 22,
839 .nbr_end = 539,
840
841 .burst_ena = false,
842
843 .filter_table = filter_table,
844 },
845 {
846 .name = "1080i@60Hz",
847 .clock = 148800,
848 .refresh = 30000,
849 .oversample = TV_OVERSAMPLE_2X,
850 .component_only = 1,
851
852 .hsync_end = 88, .hblank_end = 235,
853 .hblank_start = 2155, .htotal = 2199,
854
855 .progressive = false, .trilevel_sync = true,
856
857 .vsync_start_f1 = 4, .vsync_start_f2 = 5,
858 .vsync_len = 10,
859
860 .veq_ena = true, .veq_start_f1 = 4,
861 .veq_start_f2 = 4, .veq_len = 10,
862
863
864 .vi_end_f1 = 21, .vi_end_f2 = 22,
865 .nbr_end = 539,
866
867 .burst_ena = false,
868
869 .filter_table = filter_table,
870 },
871 {
872 .name = "1080i@59.94Hz",
873 .clock = 148800,
874 .refresh = 29970,
875 .oversample = TV_OVERSAMPLE_2X,
876 .component_only = 1,
877
878 .hsync_end = 88, .hblank_end = 235,
879 .hblank_start = 2155, .htotal = 2200,
880
881 .progressive = false, .trilevel_sync = true,
882
883 .vsync_start_f1 = 4, .vsync_start_f2 = 5,
884 .vsync_len = 10,
885
886 .veq_ena = true, .veq_start_f1 = 4,
887 .veq_start_f2 = 4, .veq_len = 10,
888
889
890 .vi_end_f1 = 21, .vi_end_f2 = 22,
891 .nbr_end = 539,
892
893 .burst_ena = false,
894
895 .filter_table = filter_table,
896 },
897};
898
899#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
900
901static void
902intel_tv_dpms(struct drm_encoder *encoder, int mode)
903{
904 struct drm_device *dev = encoder->dev;
905 struct drm_i915_private *dev_priv = dev->dev_private;
906
907 switch(mode) {
908 case DRM_MODE_DPMS_ON:
909 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
910 break;
911 case DRM_MODE_DPMS_STANDBY:
912 case DRM_MODE_DPMS_SUSPEND:
913 case DRM_MODE_DPMS_OFF:
914 I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
915 break;
916 }
917}
918
919static void
920intel_tv_save(struct drm_connector *connector)
921{
922 struct drm_device *dev = connector->dev;
923 struct drm_i915_private *dev_priv = dev->dev_private;
924 struct intel_output *intel_output = to_intel_output(connector);
925 struct intel_tv_priv *tv_priv = intel_output->dev_priv;
926 int i;
927
928 tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
929 tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
930 tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
931 tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
932 tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
933 tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
934 tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
935 tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
936 tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
937 tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
938 tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
939 tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
940 tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
941
942 tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
943 tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
944 tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
945 tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
946 tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
947 tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
948 tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
949 tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
950 tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
951 tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
952 tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
953 tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
954 tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
955
956 for (i = 0; i < 60; i++)
957 tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
958 for (i = 0; i < 60; i++)
959 tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
960 for (i = 0; i < 43; i++)
961 tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
962 for (i = 0; i < 43; i++)
963 tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
964
965 tv_priv->save_TV_DAC = I915_READ(TV_DAC);
966 tv_priv->save_TV_CTL = I915_READ(TV_CTL);
967}
968
969static void
970intel_tv_restore(struct drm_connector *connector)
971{
972 struct drm_device *dev = connector->dev;
973 struct drm_i915_private *dev_priv = dev->dev_private;
974 struct intel_output *intel_output = to_intel_output(connector);
975 struct intel_tv_priv *tv_priv = intel_output->dev_priv;
976 struct drm_crtc *crtc = connector->encoder->crtc;
977 struct intel_crtc *intel_crtc;
978 int i;
979
980 /* FIXME: No CRTC? */
981 if (!crtc)
982 return;
983
984 intel_crtc = to_intel_crtc(crtc);
985 I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
986 I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
987 I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
988 I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
989 I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
990 I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
991 I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
992 I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
993 I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
994 I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
995 I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
996 I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
997 I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
998
999 I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
1000 I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
1001 I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
1002 I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
1003 I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
1004 I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
1005 I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
1006 I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
1007
1008 {
1009 int pipeconf_reg = (intel_crtc->pipe == 0) ?
1010 PIPEACONF : PIPEBCONF;
1011 int dspcntr_reg = (intel_crtc->plane == 0) ?
1012 DSPACNTR : DSPBCNTR;
1013 int pipeconf = I915_READ(pipeconf_reg);
1014 int dspcntr = I915_READ(dspcntr_reg);
1015 int dspbase_reg = (intel_crtc->plane == 0) ?
1016 DSPAADDR : DSPBADDR;
1017 /* Pipe must be off here */
1018 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
1019 /* Flush the plane changes */
1020 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1021
1022 if (!IS_I9XX(dev)) {
1023 /* Wait for vblank for the disable to take effect */
1024 intel_wait_for_vblank(dev);
1025 }
1026
1027 I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
1028 /* Wait for vblank for the disable to take effect. */
1029 intel_wait_for_vblank(dev);
1030
1031 /* Filter ctl must be set before TV_WIN_SIZE */
1032 I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
1033 I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
1034 I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
1035 I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
1036 I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
1037 I915_WRITE(pipeconf_reg, pipeconf);
1038 I915_WRITE(dspcntr_reg, dspcntr);
1039 /* Flush the plane changes */
1040 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1041 }
1042
1043 for (i = 0; i < 60; i++)
1044 I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
1045 for (i = 0; i < 60; i++)
1046 I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
1047 for (i = 0; i < 43; i++)
1048 I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
1049 for (i = 0; i < 43; i++)
1050 I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
1051
1052 I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
1053 I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
1054}
1055
1056static const struct tv_mode *
1057intel_tv_mode_lookup (char *tv_format)
1058{
1059 int i;
1060
1061 for (i = 0; i < sizeof(tv_modes) / sizeof (tv_modes[0]); i++) {
1062 const struct tv_mode *tv_mode = &tv_modes[i];
1063
1064 if (!strcmp(tv_format, tv_mode->name))
1065 return tv_mode;
1066 }
1067 return NULL;
1068}
1069
1070static const struct tv_mode *
1071intel_tv_mode_find (struct intel_output *intel_output)
1072{
1073 struct intel_tv_priv *tv_priv = intel_output->dev_priv;
1074
1075 return intel_tv_mode_lookup(tv_priv->tv_format);
1076}
1077
1078static enum drm_mode_status
1079intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
1080{
1081 struct intel_output *intel_output = to_intel_output(connector);
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1083
1084 /* Ensure TV refresh is close to desired refresh */
1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1)
1086 return MODE_OK;
1087 return MODE_CLOCK_RANGE;
1088}
1089
1090
1091static bool
1092intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
1093 struct drm_display_mode *adjusted_mode)
1094{
1095 struct drm_device *dev = encoder->dev;
1096 struct drm_mode_config *drm_config = &dev->mode_config;
1097 struct intel_output *intel_output = enc_to_intel_output(encoder);
1098 const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output);
1099 struct drm_encoder *other_encoder;
1100
1101 if (!tv_mode)
1102 return false;
1103
1104 /* FIXME: lock encoder list */
1105 list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
1106 if (other_encoder != encoder &&
1107 other_encoder->crtc == encoder->crtc)
1108 return false;
1109 }
1110
1111 adjusted_mode->clock = tv_mode->clock;
1112 return true;
1113}
1114
1115static void
1116intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1117 struct drm_display_mode *adjusted_mode)
1118{
1119 struct drm_device *dev = encoder->dev;
1120 struct drm_i915_private *dev_priv = dev->dev_private;
1121 struct drm_crtc *crtc = encoder->crtc;
1122 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1123 struct intel_output *intel_output = enc_to_intel_output(encoder);
1124 struct intel_tv_priv *tv_priv = intel_output->dev_priv;
1125 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1126 u32 tv_ctl;
1127 u32 hctl1, hctl2, hctl3;
1128 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
1129 u32 scctl1, scctl2, scctl3;
1130 int i, j;
1131 const struct video_levels *video_levels;
1132 const struct color_conversion *color_conversion;
1133 bool burst_ena;
1134
1135 if (!tv_mode)
1136 return; /* can't happen (mode_prepare prevents this) */
1137
1138 tv_ctl = 0;
1139
1140 switch (tv_priv->type) {
1141 default:
1142 case DRM_MODE_CONNECTOR_Unknown:
1143 case DRM_MODE_CONNECTOR_Composite:
1144 tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
1145 video_levels = tv_mode->composite_levels;
1146 color_conversion = tv_mode->composite_color;
1147 burst_ena = tv_mode->burst_ena;
1148 break;
1149 case DRM_MODE_CONNECTOR_Component:
1150 tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
1151 video_levels = &component_levels;
1152 if (tv_mode->burst_ena)
1153 color_conversion = &sdtv_csc_yprpb;
1154 else
1155 color_conversion = &hdtv_csc_yprpb;
1156 burst_ena = false;
1157 break;
1158 case DRM_MODE_CONNECTOR_SVIDEO:
1159 tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
1160 video_levels = tv_mode->svideo_levels;
1161 color_conversion = tv_mode->svideo_color;
1162 burst_ena = tv_mode->burst_ena;
1163 break;
1164 }
1165 hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
1166 (tv_mode->htotal << TV_HTOTAL_SHIFT);
1167
1168 hctl2 = (tv_mode->hburst_start << 16) |
1169 (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
1170
1171 if (burst_ena)
1172 hctl2 |= TV_BURST_ENA;
1173
1174 hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
1175 (tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
1176
1177 vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
1178 (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
1179 (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
1180
1181 vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
1182 (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
1183 (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
1184
1185 vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
1186 (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
1187 (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
1188
1189 if (tv_mode->veq_ena)
1190 vctl3 |= TV_EQUAL_ENA;
1191
1192 vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
1193 (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
1194
1195 vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
1196 (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
1197
1198 vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
1199 (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
1200
1201 vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
1202 (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
1203
1204 if (intel_crtc->pipe == 1)
1205 tv_ctl |= TV_ENC_PIPEB_SELECT;
1206 tv_ctl |= tv_mode->oversample;
1207
1208 if (tv_mode->progressive)
1209 tv_ctl |= TV_PROGRESSIVE;
1210 if (tv_mode->trilevel_sync)
1211 tv_ctl |= TV_TRILEVEL_SYNC;
1212 if (tv_mode->pal_burst)
1213 tv_ctl |= TV_PAL_BURST;
1214 scctl1 = 0;
1215 /* dda1 implies valid video levels */
1216 if (tv_mode->dda1_inc) {
1217 scctl1 |= TV_SC_DDA1_EN;
1218 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1219 }
1220
1221 if (tv_mode->dda2_inc)
1222 scctl1 |= TV_SC_DDA2_EN;
1223
1224 if (tv_mode->dda3_inc)
1225 scctl1 |= TV_SC_DDA3_EN;
1226
1227 scctl1 |= tv_mode->sc_reset;
1228 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
1229
1230 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
1231 tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT;
1232
1233 scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT |
1234 tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
1235
1236 /* Enable two fixes for the chips that need them. */
1237 if (dev->pci_device < 0x2772)
1238 tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
1239
1240 I915_WRITE(TV_H_CTL_1, hctl1);
1241 I915_WRITE(TV_H_CTL_2, hctl2);
1242 I915_WRITE(TV_H_CTL_3, hctl3);
1243 I915_WRITE(TV_V_CTL_1, vctl1);
1244 I915_WRITE(TV_V_CTL_2, vctl2);
1245 I915_WRITE(TV_V_CTL_3, vctl3);
1246 I915_WRITE(TV_V_CTL_4, vctl4);
1247 I915_WRITE(TV_V_CTL_5, vctl5);
1248 I915_WRITE(TV_V_CTL_6, vctl6);
1249 I915_WRITE(TV_V_CTL_7, vctl7);
1250 I915_WRITE(TV_SC_CTL_1, scctl1);
1251 I915_WRITE(TV_SC_CTL_2, scctl2);
1252 I915_WRITE(TV_SC_CTL_3, scctl3);
1253
1254 if (color_conversion) {
1255 I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
1256 color_conversion->gy);
1257 I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) |
1258 color_conversion->ay);
1259 I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
1260 color_conversion->gu);
1261 I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
1262 color_conversion->au);
1263 I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
1264 color_conversion->gv);
1265 I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
1266 color_conversion->av);
1267 }
1268
1269 I915_WRITE(TV_CLR_KNOBS, 0x00606000);
1270 if (video_levels)
1271 I915_WRITE(TV_CLR_LEVEL,
1272 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
1273 (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
1274 {
1275 int pipeconf_reg = (intel_crtc->pipe == 0) ?
1276 PIPEACONF : PIPEBCONF;
1277 int dspcntr_reg = (intel_crtc->plane == 0) ?
1278 DSPACNTR : DSPBCNTR;
1279 int pipeconf = I915_READ(pipeconf_reg);
1280 int dspcntr = I915_READ(dspcntr_reg);
1281 int dspbase_reg = (intel_crtc->plane == 0) ?
1282 DSPAADDR : DSPBADDR;
1283 int xpos = 0x0, ypos = 0x0;
1284 unsigned int xsize, ysize;
1285 /* Pipe must be off here */
1286 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
1287 /* Flush the plane changes */
1288 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1289
1290 /* Wait for vblank for the disable to take effect */
1291 if (!IS_I9XX(dev))
1292 intel_wait_for_vblank(dev);
1293
1294 I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
1295 /* Wait for vblank for the disable to take effect. */
1296 intel_wait_for_vblank(dev);
1297
1298 /* Filter ctl must be set before TV_WIN_SIZE */
1299 I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
1300 xsize = tv_mode->hblank_start - tv_mode->hblank_end;
1301 if (tv_mode->progressive)
1302 ysize = tv_mode->nbr_end + 1;
1303 else
1304 ysize = 2*tv_mode->nbr_end + 1;
1305
1306 xpos += tv_priv->margin[TV_MARGIN_LEFT];
1307 ypos += tv_priv->margin[TV_MARGIN_TOP];
1308 xsize -= (tv_priv->margin[TV_MARGIN_LEFT] +
1309 tv_priv->margin[TV_MARGIN_RIGHT]);
1310 ysize -= (tv_priv->margin[TV_MARGIN_TOP] +
1311 tv_priv->margin[TV_MARGIN_BOTTOM]);
1312 I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
1313 I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
1314
1315 I915_WRITE(pipeconf_reg, pipeconf);
1316 I915_WRITE(dspcntr_reg, dspcntr);
1317 /* Flush the plane changes */
1318 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1319 }
1320
1321 j = 0;
1322 for (i = 0; i < 60; i++)
1323 I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
1324 for (i = 0; i < 60; i++)
1325 I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
1326 for (i = 0; i < 43; i++)
1327 I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
1328 for (i = 0; i < 43; i++)
1329 I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
1330 I915_WRITE(TV_DAC, 0);
1331 I915_WRITE(TV_CTL, tv_ctl);
1332}
1333
1334static const struct drm_display_mode reported_modes[] = {
1335 {
1336 .name = "NTSC 480i",
1337 .clock = 107520,
1338 .hdisplay = 1280,
1339 .hsync_start = 1368,
1340 .hsync_end = 1496,
1341 .htotal = 1712,
1342
1343 .vdisplay = 1024,
1344 .vsync_start = 1027,
1345 .vsync_end = 1034,
1346 .vtotal = 1104,
1347 .type = DRM_MODE_TYPE_DRIVER,
1348 },
1349};
1350
1351/**
1352 * Detects TV presence by checking for load.
1353 *
1354 * Requires that the current pipe's DPLL is active.
1355
1356 * \return true if TV is connected.
1357 * \return false if TV is disconnected.
1358 */
1359static int
1360intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1361{
1362 struct drm_encoder *encoder = &intel_output->enc;
1363 struct drm_device *dev = encoder->dev;
1364 struct drm_i915_private *dev_priv = dev->dev_private;
1365 unsigned long irqflags;
1366 u32 tv_ctl, save_tv_ctl;
1367 u32 tv_dac, save_tv_dac;
1368 int type = DRM_MODE_CONNECTOR_Unknown;
1369
1370 tv_dac = I915_READ(TV_DAC);
1371
1372 /* Disable TV interrupts around load detect or we'll recurse */
1373 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1374 i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
1375 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1376 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1377
1378 /*
1379 * Detect TV by polling)
1380 */
1381 if (intel_output->load_detect_temp) {
1382 /* TV not currently running, prod it with destructive detect */
1383 save_tv_dac = tv_dac;
1384 tv_ctl = I915_READ(TV_CTL);
1385 save_tv_ctl = tv_ctl;
1386 tv_ctl &= ~TV_ENC_ENABLE;
1387 tv_ctl &= ~TV_TEST_MODE_MASK;
1388 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
1389 tv_dac &= ~TVDAC_SENSE_MASK;
1390 tv_dac |= (TVDAC_STATE_CHG_EN |
1391 TVDAC_A_SENSE_CTL |
1392 TVDAC_B_SENSE_CTL |
1393 TVDAC_C_SENSE_CTL |
1394 DAC_CTL_OVERRIDE |
1395 DAC_A_0_7_V |
1396 DAC_B_0_7_V |
1397 DAC_C_0_7_V);
1398 I915_WRITE(TV_CTL, tv_ctl);
1399 I915_WRITE(TV_DAC, tv_dac);
1400 intel_wait_for_vblank(dev);
1401 tv_dac = I915_READ(TV_DAC);
1402 I915_WRITE(TV_DAC, save_tv_dac);
1403 I915_WRITE(TV_CTL, save_tv_ctl);
1404 }
1405 /*
1406 * A B C
1407 * 0 1 1 Composite
1408 * 1 0 X svideo
1409 * 0 0 0 Component
1410 */
1411 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
1412 DRM_DEBUG("Detected Composite TV connection\n");
1413 type = DRM_MODE_CONNECTOR_Composite;
1414 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
1415 DRM_DEBUG("Detected S-Video TV connection\n");
1416 type = DRM_MODE_CONNECTOR_SVIDEO;
1417 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
1418 DRM_DEBUG("Detected Component TV connection\n");
1419 type = DRM_MODE_CONNECTOR_Component;
1420 } else {
1421 DRM_DEBUG("No TV connection detected\n");
1422 type = -1;
1423 }
1424
1425 /* Restore interrupt config */
1426 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1427 i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
1428 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1429 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1430
1431 return type;
1432}
1433
1434/**
1435 * Detect the TV connection.
1436 *
1437 * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
1438 * we have a pipe programmed in order to probe the TV.
1439 */
1440static enum drm_connector_status
1441intel_tv_detect(struct drm_connector *connector)
1442{
1443 struct drm_crtc *crtc;
1444 struct drm_display_mode mode;
1445 struct intel_output *intel_output = to_intel_output(connector);
1446 struct intel_tv_priv *tv_priv = intel_output->dev_priv;
1447 struct drm_encoder *encoder = &intel_output->enc;
1448 int dpms_mode;
1449 int type = tv_priv->type;
1450
1451 mode = reported_modes[0];
1452 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1453
1454 if (encoder->crtc) {
1455 type = intel_tv_detect_type(encoder->crtc, intel_output);
1456 } else {
1457 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
1458 if (crtc) {
1459 type = intel_tv_detect_type(crtc, intel_output);
1460 intel_release_load_detect_pipe(intel_output, dpms_mode);
1461 } else
1462 type = -1;
1463 }
1464
1465 if (type < 0)
1466 return connector_status_disconnected;
1467
1468 return connector_status_connected;
1469}
1470
1471static struct input_res {
1472 char *name;
1473 int w, h;
1474} input_res_table[] =
1475{
1476 {"640x480", 640, 480},
1477 {"800x600", 800, 600},
1478 {"1024x768", 1024, 768},
1479 {"1280x1024", 1280, 1024},
1480 {"848x480", 848, 480},
1481 {"1280x720", 1280, 720},
1482 {"1920x1080", 1920, 1080},
1483};
1484
1485/**
1486 * Stub get_modes function.
1487 *
1488 * This should probably return a set of fixed modes, unless we can figure out
1489 * how to probe modes off of TV connections.
1490 */
1491
1492static int
1493intel_tv_get_modes(struct drm_connector *connector)
1494{
1495 struct drm_display_mode *mode_ptr;
1496 struct intel_output *intel_output = to_intel_output(connector);
1497 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1498 int j;
1499
1500 for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
1501 j++) {
1502 struct input_res *input = &input_res_table[j];
1503 unsigned int hactive_s = input->w;
1504 unsigned int vactive_s = input->h;
1505
1506 if (tv_mode->max_srcw && input->w > tv_mode->max_srcw)
1507 continue;
1508
1509 if (input->w > 1024 && (!tv_mode->progressive
1510 && !tv_mode->component_only))
1511 continue;
1512
1513 mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode),
1514 DRM_MEM_DRIVER);
1515 strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
1516
1517 mode_ptr->hdisplay = hactive_s;
1518 mode_ptr->hsync_start = hactive_s + 1;
1519 mode_ptr->hsync_end = hactive_s + 64;
1520 if (mode_ptr->hsync_end <= mode_ptr->hsync_start)
1521 mode_ptr->hsync_end = mode_ptr->hsync_start + 1;
1522 mode_ptr->htotal = hactive_s + 96;
1523
1524 mode_ptr->vdisplay = vactive_s;
1525 mode_ptr->vsync_start = vactive_s + 1;
1526 mode_ptr->vsync_end = vactive_s + 32;
1527 if (mode_ptr->vsync_end <= mode_ptr->vsync_start)
1528 mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
1529 mode_ptr->vtotal = vactive_s + 33;
1530
1531 mode_ptr->clock = (int) (tv_mode->refresh *
1532 mode_ptr->vtotal *
1533 mode_ptr->htotal / 1000) / 1000;
1534
1535 mode_ptr->type = DRM_MODE_TYPE_DRIVER;
1536 drm_mode_probed_add(connector, mode_ptr);
1537 }
1538
1539 return 0;
1540}
1541
1542static void
1543intel_tv_destroy (struct drm_connector *connector)
1544{
1545 struct intel_output *intel_output = to_intel_output(connector);
1546
1547 drm_sysfs_connector_remove(connector);
1548 drm_connector_cleanup(connector);
1549 drm_free(intel_output, sizeof(struct intel_output) + sizeof(struct intel_tv_priv),
1550 DRM_MEM_DRIVER);
1551}
1552
1553
1554static int
1555intel_tv_set_property(struct drm_connector *connector, struct drm_property *property,
1556 uint64_t val)
1557{
1558 struct drm_device *dev = connector->dev;
1559 struct intel_output *intel_output = to_intel_output(connector);
1560 struct intel_tv_priv *tv_priv = intel_output->dev_priv;
1561 int ret = 0;
1562
1563 ret = drm_connector_property_set_value(connector, property, val);
1564 if (ret < 0)
1565 goto out;
1566
1567 if (property == dev->mode_config.tv_left_margin_property)
1568 tv_priv->margin[TV_MARGIN_LEFT] = val;
1569 else if (property == dev->mode_config.tv_right_margin_property)
1570 tv_priv->margin[TV_MARGIN_RIGHT] = val;
1571 else if (property == dev->mode_config.tv_top_margin_property)
1572 tv_priv->margin[TV_MARGIN_TOP] = val;
1573 else if (property == dev->mode_config.tv_bottom_margin_property)
1574 tv_priv->margin[TV_MARGIN_BOTTOM] = val;
1575 else if (property == dev->mode_config.tv_mode_property) {
1576 if (val >= NUM_TV_MODES) {
1577 ret = -EINVAL;
1578 goto out;
1579 }
1580 tv_priv->tv_format = tv_modes[val].name;
1581 intel_tv_mode_set(&intel_output->enc, NULL, NULL);
1582 } else {
1583 ret = -EINVAL;
1584 goto out;
1585 }
1586
1587 intel_tv_mode_set(&intel_output->enc, NULL, NULL);
1588out:
1589 return ret;
1590}
1591
1592static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1593 .dpms = intel_tv_dpms,
1594 .mode_fixup = intel_tv_mode_fixup,
1595 .prepare = intel_encoder_prepare,
1596 .mode_set = intel_tv_mode_set,
1597 .commit = intel_encoder_commit,
1598};
1599
1600static const struct drm_connector_funcs intel_tv_connector_funcs = {
1601 .save = intel_tv_save,
1602 .restore = intel_tv_restore,
1603 .detect = intel_tv_detect,
1604 .destroy = intel_tv_destroy,
1605 .set_property = intel_tv_set_property,
1606 .fill_modes = drm_helper_probe_single_connector_modes,
1607};
1608
1609static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
1610 .mode_valid = intel_tv_mode_valid,
1611 .get_modes = intel_tv_get_modes,
1612 .best_encoder = intel_best_encoder,
1613};
1614
1615static void intel_tv_enc_destroy(struct drm_encoder *encoder)
1616{
1617 drm_encoder_cleanup(encoder);
1618}
1619
1620static const struct drm_encoder_funcs intel_tv_enc_funcs = {
1621 .destroy = intel_tv_enc_destroy,
1622};
1623
1624
1625void
1626intel_tv_init(struct drm_device *dev)
1627{
1628 struct drm_i915_private *dev_priv = dev->dev_private;
1629 struct drm_connector *connector;
1630 struct intel_output *intel_output;
1631 struct intel_tv_priv *tv_priv;
1632 u32 tv_dac_on, tv_dac_off, save_tv_dac;
1633 char **tv_format_names;
1634 int i, initial_mode = 0;
1635
1636 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
1637 return;
1638
1639 /* Even if we have an encoder we may not have a connector */
1640 if (!dev_priv->int_tv_support)
1641 return;
1642
1643 /*
1644 * Sanity check the TV output by checking to see if the
1645 * DAC register holds a value
1646 */
1647 save_tv_dac = I915_READ(TV_DAC);
1648
1649 I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
1650 tv_dac_on = I915_READ(TV_DAC);
1651
1652 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
1653 tv_dac_off = I915_READ(TV_DAC);
1654
1655 I915_WRITE(TV_DAC, save_tv_dac);
1656
1657 /*
1658 * If the register does not hold the state change enable
1659 * bit, (either as a 0 or a 1), assume it doesn't really
1660 * exist
1661 */
1662 if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 ||
1663 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1664 return;
1665
1666 intel_output = drm_calloc(1, sizeof(struct intel_output) +
1667 sizeof(struct intel_tv_priv), DRM_MEM_DRIVER);
1668 if (!intel_output) {
1669 return;
1670 }
1671 connector = &intel_output->base;
1672
1673 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1674 DRM_MODE_CONNECTOR_SVIDEO);
1675
1676 drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs,
1677 DRM_MODE_ENCODER_TVDAC);
1678
1679 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
1680 tv_priv = (struct intel_tv_priv *)(intel_output + 1);
1681 intel_output->type = INTEL_OUTPUT_TVOUT;
1682 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
1683 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1684 intel_output->dev_priv = tv_priv;
1685 tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
1686
1687 /* BIOS margin values */
1688 tv_priv->margin[TV_MARGIN_LEFT] = 54;
1689 tv_priv->margin[TV_MARGIN_TOP] = 36;
1690 tv_priv->margin[TV_MARGIN_RIGHT] = 46;
1691 tv_priv->margin[TV_MARGIN_BOTTOM] = 37;
1692
1693 tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
1694
1695 drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs);
1696 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
1697 connector->interlace_allowed = false;
1698 connector->doublescan_allowed = false;
1699
1700 /* Create TV properties then attach current values */
1701 tv_format_names = drm_alloc(sizeof(char *) * NUM_TV_MODES,
1702 DRM_MEM_DRIVER);
1703 if (!tv_format_names)
1704 goto out;
1705 for (i = 0; i < NUM_TV_MODES; i++)
1706 tv_format_names[i] = tv_modes[i].name;
1707 drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names);
1708
1709 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
1710 initial_mode);
1711 drm_connector_attach_property(connector,
1712 dev->mode_config.tv_left_margin_property,
1713 tv_priv->margin[TV_MARGIN_LEFT]);
1714 drm_connector_attach_property(connector,
1715 dev->mode_config.tv_top_margin_property,
1716 tv_priv->margin[TV_MARGIN_TOP]);
1717 drm_connector_attach_property(connector,
1718 dev->mode_config.tv_right_margin_property,
1719 tv_priv->margin[TV_MARGIN_RIGHT]);
1720 drm_connector_attach_property(connector,
1721 dev->mode_config.tv_bottom_margin_property,
1722 tv_priv->margin[TV_MARGIN_BOTTOM]);
1723out:
1724 drm_sysfs_connector_add(connector);
1725}
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 4b27d9abb7bc..cace3964feeb 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -860,12 +860,12 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
860 * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must 860 * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
861 * be careful about how this function is called. 861 * be careful about how this function is called.
862 */ 862 */
863static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf) 863static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
864{ 864{
865 drm_radeon_private_t *dev_priv = dev->dev_private;
866 drm_radeon_buf_priv_t *buf_priv = buf->dev_private; 865 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
866 struct drm_radeon_master_private *master_priv = master->driver_priv;
867 867
868 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; 868 buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
869 buf->pending = 1; 869 buf->pending = 1;
870 buf->used = 0; 870 buf->used = 0;
871} 871}
@@ -1027,6 +1027,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1027 drm_radeon_kcmd_buffer_t *cmdbuf) 1027 drm_radeon_kcmd_buffer_t *cmdbuf)
1028{ 1028{
1029 drm_radeon_private_t *dev_priv = dev->dev_private; 1029 drm_radeon_private_t *dev_priv = dev->dev_private;
1030 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
1030 struct drm_device_dma *dma = dev->dma; 1031 struct drm_device_dma *dma = dev->dma;
1031 struct drm_buf *buf = NULL; 1032 struct drm_buf *buf = NULL;
1032 int emit_dispatch_age = 0; 1033 int emit_dispatch_age = 0;
@@ -1134,7 +1135,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1134 } 1135 }
1135 1136
1136 emit_dispatch_age = 1; 1137 emit_dispatch_age = 1;
1137 r300_discard_buffer(dev, buf); 1138 r300_discard_buffer(dev, file_priv->master, buf);
1138 break; 1139 break;
1139 1140
1140 case R300_CMD_WAIT: 1141 case R300_CMD_WAIT:
@@ -1189,7 +1190,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1189 1190
1190 /* Emit the vertex buffer age */ 1191 /* Emit the vertex buffer age */
1191 BEGIN_RING(2); 1192 BEGIN_RING(2);
1192 RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch); 1193 RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
1193 ADVANCE_RING(); 1194 ADVANCE_RING();
1194 } 1195 }
1195 1196
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index dcebb4bee7aa..63212d7bbc28 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -31,6 +31,7 @@
31 31
32#include "drmP.h" 32#include "drmP.h"
33#include "drm.h" 33#include "drm.h"
34#include "drm_sarea.h"
34#include "radeon_drm.h" 35#include "radeon_drm.h"
35#include "radeon_drv.h" 36#include "radeon_drv.h"
36#include "r300_reg.h" 37#include "r300_reg.h"
@@ -667,15 +668,14 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
667 RADEON_WRITE(RADEON_BUS_CNTL, tmp); 668 RADEON_WRITE(RADEON_BUS_CNTL, tmp);
668 } /* PCIE cards appears to not need this */ 669 } /* PCIE cards appears to not need this */
669 670
670 dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; 671 dev_priv->scratch[0] = 0;
671 RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); 672 RADEON_WRITE(RADEON_LAST_FRAME_REG, 0);
672 673
673 dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0; 674 dev_priv->scratch[1] = 0;
674 RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 675 RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0);
675 dev_priv->sarea_priv->last_dispatch);
676 676
677 dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; 677 dev_priv->scratch[2] = 0;
678 RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear); 678 RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0);
679 679
680 radeon_do_wait_for_idle(dev_priv); 680 radeon_do_wait_for_idle(dev_priv);
681 681
@@ -871,9 +871,11 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
871 } 871 }
872} 872}
873 873
874static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) 874static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
875 struct drm_file *file_priv)
875{ 876{
876 drm_radeon_private_t *dev_priv = dev->dev_private; 877 drm_radeon_private_t *dev_priv = dev->dev_private;
878 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
877 879
878 DRM_DEBUG("\n"); 880 DRM_DEBUG("\n");
879 881
@@ -998,8 +1000,8 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
998 dev_priv->buffers_offset = init->buffers_offset; 1000 dev_priv->buffers_offset = init->buffers_offset;
999 dev_priv->gart_textures_offset = init->gart_textures_offset; 1001 dev_priv->gart_textures_offset = init->gart_textures_offset;
1000 1002
1001 dev_priv->sarea = drm_getsarea(dev); 1003 master_priv->sarea = drm_getsarea(dev);
1002 if (!dev_priv->sarea) { 1004 if (!master_priv->sarea) {
1003 DRM_ERROR("could not find sarea!\n"); 1005 DRM_ERROR("could not find sarea!\n");
1004 radeon_do_cleanup_cp(dev); 1006 radeon_do_cleanup_cp(dev);
1005 return -EINVAL; 1007 return -EINVAL;
@@ -1035,10 +1037,6 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1035 } 1037 }
1036 } 1038 }
1037 1039
1038 dev_priv->sarea_priv =
1039 (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
1040 init->sarea_priv_offset);
1041
1042#if __OS_HAS_AGP 1040#if __OS_HAS_AGP
1043 if (dev_priv->flags & RADEON_IS_AGP) { 1041 if (dev_priv->flags & RADEON_IS_AGP) {
1044 drm_core_ioremap(dev_priv->cp_ring, dev); 1042 drm_core_ioremap(dev_priv->cp_ring, dev);
@@ -1329,7 +1327,7 @@ int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_pri
1329 case RADEON_INIT_CP: 1327 case RADEON_INIT_CP:
1330 case RADEON_INIT_R200_CP: 1328 case RADEON_INIT_R200_CP:
1331 case RADEON_INIT_R300_CP: 1329 case RADEON_INIT_R300_CP:
1332 return radeon_do_init_cp(dev, init); 1330 return radeon_do_init_cp(dev, init, file_priv);
1333 case RADEON_CLEANUP_CP: 1331 case RADEON_CLEANUP_CP:
1334 return radeon_do_cleanup_cp(dev); 1332 return radeon_do_cleanup_cp(dev);
1335 } 1333 }
@@ -1768,6 +1766,51 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
1768 return ret; 1766 return ret;
1769} 1767}
1770 1768
1769int radeon_master_create(struct drm_device *dev, struct drm_master *master)
1770{
1771 struct drm_radeon_master_private *master_priv;
1772 unsigned long sareapage;
1773 int ret;
1774
1775 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
1776 if (!master_priv)
1777 return -ENOMEM;
1778
1779 /* prebuild the SAREA */
1780 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
1781 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER,
1782 &master_priv->sarea);
1783 if (ret) {
1784 DRM_ERROR("SAREA setup failed\n");
1785 return ret;
1786 }
1787 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
1788 master_priv->sarea_priv->pfCurrentPage = 0;
1789
1790 master->driver_priv = master_priv;
1791 return 0;
1792}
1793
1794void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
1795{
1796 struct drm_radeon_master_private *master_priv = master->driver_priv;
1797
1798 if (!master_priv)
1799 return;
1800
1801 if (master_priv->sarea_priv &&
1802 master_priv->sarea_priv->pfCurrentPage != 0)
1803 radeon_cp_dispatch_flip(dev, master);
1804
1805 master_priv->sarea_priv = NULL;
1806 if (master_priv->sarea)
1807 drm_rmmap_locked(dev, master_priv->sarea);
1808
1809 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
1810
1811 master->driver_priv = NULL;
1812}
1813
1771/* Create mappings for registers and framebuffer so userland doesn't necessarily 1814/* Create mappings for registers and framebuffer so userland doesn't necessarily
1772 * have to find them. 1815 * have to find them.
1773 */ 1816 */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 71af746a4e47..fef207881f45 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -96,6 +96,8 @@ static struct drm_driver driver = {
96 .enable_vblank = radeon_enable_vblank, 96 .enable_vblank = radeon_enable_vblank,
97 .disable_vblank = radeon_disable_vblank, 97 .disable_vblank = radeon_disable_vblank,
98 .dri_library_name = dri_library_name, 98 .dri_library_name = dri_library_name,
99 .master_create = radeon_master_create,
100 .master_destroy = radeon_master_destroy,
99 .irq_preinstall = radeon_driver_irq_preinstall, 101 .irq_preinstall = radeon_driver_irq_preinstall,
100 .irq_postinstall = radeon_driver_irq_postinstall, 102 .irq_postinstall = radeon_driver_irq_postinstall,
101 .irq_uninstall = radeon_driver_irq_uninstall, 103 .irq_uninstall = radeon_driver_irq_uninstall,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 3bbb871b25d5..490bc7ceef60 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -226,9 +226,13 @@ struct radeon_virt_surface {
226#define RADEON_FLUSH_EMITED (1 < 0) 226#define RADEON_FLUSH_EMITED (1 < 0)
227#define RADEON_PURGE_EMITED (1 < 1) 227#define RADEON_PURGE_EMITED (1 < 1)
228 228
229struct drm_radeon_master_private {
230 drm_local_map_t *sarea;
231 drm_radeon_sarea_t *sarea_priv;
232};
233
229typedef struct drm_radeon_private { 234typedef struct drm_radeon_private {
230 drm_radeon_ring_buffer_t ring; 235 drm_radeon_ring_buffer_t ring;
231 drm_radeon_sarea_t *sarea_priv;
232 236
233 u32 fb_location; 237 u32 fb_location;
234 u32 fb_size; 238 u32 fb_size;
@@ -409,6 +413,9 @@ extern int radeon_driver_open(struct drm_device *dev,
409extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, 413extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
410 unsigned long arg); 414 unsigned long arg);
411 415
416extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
417extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
418extern void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master);
412/* r300_cmdbuf.c */ 419/* r300_cmdbuf.c */
413extern void r300_init_reg_flags(struct drm_device *dev); 420extern void r300_init_reg_flags(struct drm_device *dev);
414 421
@@ -1335,8 +1342,9 @@ do { \
1335} while (0) 1342} while (0)
1336 1343
1337#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ 1344#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
1338do { \ 1345do { \
1339 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \ 1346 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; \
1347 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \
1340 if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ 1348 if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \
1341 int __ret = radeon_do_cp_idle( dev_priv ); \ 1349 int __ret = radeon_do_cp_idle( dev_priv ); \
1342 if ( __ret ) return __ret; \ 1350 if ( __ret ) return __ret; \
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 5d7153fcc7b0..ef940a079dcb 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -742,13 +742,14 @@ static struct {
742 */ 742 */
743 743
744static void radeon_clear_box(drm_radeon_private_t * dev_priv, 744static void radeon_clear_box(drm_radeon_private_t * dev_priv,
745 struct drm_radeon_master_private *master_priv,
745 int x, int y, int w, int h, int r, int g, int b) 746 int x, int y, int w, int h, int r, int g, int b)
746{ 747{
747 u32 color; 748 u32 color;
748 RING_LOCALS; 749 RING_LOCALS;
749 750
750 x += dev_priv->sarea_priv->boxes[0].x1; 751 x += master_priv->sarea_priv->boxes[0].x1;
751 y += dev_priv->sarea_priv->boxes[0].y1; 752 y += master_priv->sarea_priv->boxes[0].y1;
752 753
753 switch (dev_priv->color_fmt) { 754 switch (dev_priv->color_fmt) {
754 case RADEON_COLOR_FORMAT_RGB565: 755 case RADEON_COLOR_FORMAT_RGB565:
@@ -776,7 +777,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv,
776 RADEON_GMC_SRC_DATATYPE_COLOR | 777 RADEON_GMC_SRC_DATATYPE_COLOR |
777 RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS); 778 RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
778 779
779 if (dev_priv->sarea_priv->pfCurrentPage == 1) { 780 if (master_priv->sarea_priv->pfCurrentPage == 1) {
780 OUT_RING(dev_priv->front_pitch_offset); 781 OUT_RING(dev_priv->front_pitch_offset);
781 } else { 782 } else {
782 OUT_RING(dev_priv->back_pitch_offset); 783 OUT_RING(dev_priv->back_pitch_offset);
@@ -790,7 +791,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv,
790 ADVANCE_RING(); 791 ADVANCE_RING();
791} 792}
792 793
793static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) 794static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv, struct drm_radeon_master_private *master_priv)
794{ 795{
795 /* Collapse various things into a wait flag -- trying to 796 /* Collapse various things into a wait flag -- trying to
796 * guess if userspase slept -- better just to have them tell us. 797 * guess if userspase slept -- better just to have them tell us.
@@ -807,12 +808,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
807 /* Purple box for page flipping 808 /* Purple box for page flipping
808 */ 809 */
809 if (dev_priv->stats.boxes & RADEON_BOX_FLIP) 810 if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
810 radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255); 811 radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255);
811 812
812 /* Red box if we have to wait for idle at any point 813 /* Red box if we have to wait for idle at any point
813 */ 814 */
814 if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE) 815 if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
815 radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0); 816 radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0);
816 817
817 /* Blue box: lost context? 818 /* Blue box: lost context?
818 */ 819 */
@@ -820,12 +821,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
820 /* Yellow box for texture swaps 821 /* Yellow box for texture swaps
821 */ 822 */
822 if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD) 823 if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
823 radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0); 824 radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0);
824 825
825 /* Green box if hardware never idles (as far as we can tell) 826 /* Green box if hardware never idles (as far as we can tell)
826 */ 827 */
827 if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) 828 if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
828 radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); 829 radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0);
829 830
830 /* Draw bars indicating number of buffers allocated 831 /* Draw bars indicating number of buffers allocated
831 * (not a great measure, easily confused) 832 * (not a great measure, easily confused)
@@ -834,7 +835,7 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
834 if (dev_priv->stats.requested_bufs > 100) 835 if (dev_priv->stats.requested_bufs > 100)
835 dev_priv->stats.requested_bufs = 100; 836 dev_priv->stats.requested_bufs = 100;
836 837
837 radeon_clear_box(dev_priv, 4, 16, 838 radeon_clear_box(dev_priv, master_priv, 4, 16,
838 dev_priv->stats.requested_bufs, 4, 839 dev_priv->stats.requested_bufs, 4,
839 196, 128, 128); 840 196, 128, 128);
840 } 841 }
@@ -848,11 +849,13 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
848 */ 849 */
849 850
850static void radeon_cp_dispatch_clear(struct drm_device * dev, 851static void radeon_cp_dispatch_clear(struct drm_device * dev,
852 struct drm_master *master,
851 drm_radeon_clear_t * clear, 853 drm_radeon_clear_t * clear,
852 drm_radeon_clear_rect_t * depth_boxes) 854 drm_radeon_clear_rect_t * depth_boxes)
853{ 855{
854 drm_radeon_private_t *dev_priv = dev->dev_private; 856 drm_radeon_private_t *dev_priv = dev->dev_private;
855 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 857 struct drm_radeon_master_private *master_priv = master->driver_priv;
858 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
856 drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; 859 drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
857 int nbox = sarea_priv->nbox; 860 int nbox = sarea_priv->nbox;
858 struct drm_clip_rect *pbox = sarea_priv->boxes; 861 struct drm_clip_rect *pbox = sarea_priv->boxes;
@@ -864,7 +867,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
864 867
865 dev_priv->stats.clears++; 868 dev_priv->stats.clears++;
866 869
867 if (dev_priv->sarea_priv->pfCurrentPage == 1) { 870 if (sarea_priv->pfCurrentPage == 1) {
868 unsigned int tmp = flags; 871 unsigned int tmp = flags;
869 872
870 flags &= ~(RADEON_FRONT | RADEON_BACK); 873 flags &= ~(RADEON_FRONT | RADEON_BACK);
@@ -890,7 +893,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
890 893
891 /* Make sure we restore the 3D state next time. 894 /* Make sure we restore the 3D state next time.
892 */ 895 */
893 dev_priv->sarea_priv->ctx_owner = 0; 896 sarea_priv->ctx_owner = 0;
894 897
895 for (i = 0; i < nbox; i++) { 898 for (i = 0; i < nbox; i++) {
896 int x = pbox[i].x1; 899 int x = pbox[i].x1;
@@ -967,7 +970,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
967 /* Make sure we restore the 3D state next time. 970 /* Make sure we restore the 3D state next time.
968 * we haven't touched any "normal" state - still need this? 971 * we haven't touched any "normal" state - still need this?
969 */ 972 */
970 dev_priv->sarea_priv->ctx_owner = 0; 973 sarea_priv->ctx_owner = 0;
971 974
972 if ((dev_priv->flags & RADEON_HAS_HIERZ) 975 if ((dev_priv->flags & RADEON_HAS_HIERZ)
973 && (flags & RADEON_USE_HIERZ)) { 976 && (flags & RADEON_USE_HIERZ)) {
@@ -1214,7 +1217,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
1214 1217
1215 /* Make sure we restore the 3D state next time. 1218 /* Make sure we restore the 3D state next time.
1216 */ 1219 */
1217 dev_priv->sarea_priv->ctx_owner = 0; 1220 sarea_priv->ctx_owner = 0;
1218 1221
1219 for (i = 0; i < nbox; i++) { 1222 for (i = 0; i < nbox; i++) {
1220 1223
@@ -1285,7 +1288,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
1285 1288
1286 /* Make sure we restore the 3D state next time. 1289 /* Make sure we restore the 3D state next time.
1287 */ 1290 */
1288 dev_priv->sarea_priv->ctx_owner = 0; 1291 sarea_priv->ctx_owner = 0;
1289 1292
1290 for (i = 0; i < nbox; i++) { 1293 for (i = 0; i < nbox; i++) {
1291 1294
@@ -1328,20 +1331,21 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
1328 * wait on this value before performing the clear ioctl. We 1331 * wait on this value before performing the clear ioctl. We
1329 * need this because the card's so damned fast... 1332 * need this because the card's so damned fast...
1330 */ 1333 */
1331 dev_priv->sarea_priv->last_clear++; 1334 sarea_priv->last_clear++;
1332 1335
1333 BEGIN_RING(4); 1336 BEGIN_RING(4);
1334 1337
1335 RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear); 1338 RADEON_CLEAR_AGE(sarea_priv->last_clear);
1336 RADEON_WAIT_UNTIL_IDLE(); 1339 RADEON_WAIT_UNTIL_IDLE();
1337 1340
1338 ADVANCE_RING(); 1341 ADVANCE_RING();
1339} 1342}
1340 1343
1341static void radeon_cp_dispatch_swap(struct drm_device * dev) 1344static void radeon_cp_dispatch_swap(struct drm_device *dev, struct drm_master *master)
1342{ 1345{
1343 drm_radeon_private_t *dev_priv = dev->dev_private; 1346 drm_radeon_private_t *dev_priv = dev->dev_private;
1344 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 1347 struct drm_radeon_master_private *master_priv = master->driver_priv;
1348 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
1345 int nbox = sarea_priv->nbox; 1349 int nbox = sarea_priv->nbox;
1346 struct drm_clip_rect *pbox = sarea_priv->boxes; 1350 struct drm_clip_rect *pbox = sarea_priv->boxes;
1347 int i; 1351 int i;
@@ -1351,7 +1355,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
1351 /* Do some trivial performance monitoring... 1355 /* Do some trivial performance monitoring...
1352 */ 1356 */
1353 if (dev_priv->do_boxes) 1357 if (dev_priv->do_boxes)
1354 radeon_cp_performance_boxes(dev_priv); 1358 radeon_cp_performance_boxes(dev_priv, master_priv);
1355 1359
1356 /* Wait for the 3D stream to idle before dispatching the bitblt. 1360 /* Wait for the 3D stream to idle before dispatching the bitblt.
1357 * This will prevent data corruption between the two streams. 1361 * This will prevent data corruption between the two streams.
@@ -1385,7 +1389,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
1385 /* Make this work even if front & back are flipped: 1389 /* Make this work even if front & back are flipped:
1386 */ 1390 */
1387 OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1)); 1391 OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
1388 if (dev_priv->sarea_priv->pfCurrentPage == 0) { 1392 if (sarea_priv->pfCurrentPage == 0) {
1389 OUT_RING(dev_priv->back_pitch_offset); 1393 OUT_RING(dev_priv->back_pitch_offset);
1390 OUT_RING(dev_priv->front_pitch_offset); 1394 OUT_RING(dev_priv->front_pitch_offset);
1391 } else { 1395 } else {
@@ -1405,31 +1409,32 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
1405 * throttle the framerate by waiting for this value before 1409 * throttle the framerate by waiting for this value before
1406 * performing the swapbuffer ioctl. 1410 * performing the swapbuffer ioctl.
1407 */ 1411 */
1408 dev_priv->sarea_priv->last_frame++; 1412 sarea_priv->last_frame++;
1409 1413
1410 BEGIN_RING(4); 1414 BEGIN_RING(4);
1411 1415
1412 RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); 1416 RADEON_FRAME_AGE(sarea_priv->last_frame);
1413 RADEON_WAIT_UNTIL_2D_IDLE(); 1417 RADEON_WAIT_UNTIL_2D_IDLE();
1414 1418
1415 ADVANCE_RING(); 1419 ADVANCE_RING();
1416} 1420}
1417 1421
1418static void radeon_cp_dispatch_flip(struct drm_device * dev) 1422void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master)
1419{ 1423{
1420 drm_radeon_private_t *dev_priv = dev->dev_private; 1424 drm_radeon_private_t *dev_priv = dev->dev_private;
1421 struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle; 1425 struct drm_radeon_master_private *master_priv = master->driver_priv;
1422 int offset = (dev_priv->sarea_priv->pfCurrentPage == 1) 1426 struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle;
1427 int offset = (master_priv->sarea_priv->pfCurrentPage == 1)
1423 ? dev_priv->front_offset : dev_priv->back_offset; 1428 ? dev_priv->front_offset : dev_priv->back_offset;
1424 RING_LOCALS; 1429 RING_LOCALS;
1425 DRM_DEBUG("pfCurrentPage=%d\n", 1430 DRM_DEBUG("pfCurrentPage=%d\n",
1426 dev_priv->sarea_priv->pfCurrentPage); 1431 master_priv->sarea_priv->pfCurrentPage);
1427 1432
1428 /* Do some trivial performance monitoring... 1433 /* Do some trivial performance monitoring...
1429 */ 1434 */
1430 if (dev_priv->do_boxes) { 1435 if (dev_priv->do_boxes) {
1431 dev_priv->stats.boxes |= RADEON_BOX_FLIP; 1436 dev_priv->stats.boxes |= RADEON_BOX_FLIP;
1432 radeon_cp_performance_boxes(dev_priv); 1437 radeon_cp_performance_boxes(dev_priv, master_priv);
1433 } 1438 }
1434 1439
1435 /* Update the frame offsets for both CRTCs 1440 /* Update the frame offsets for both CRTCs
@@ -1441,7 +1446,7 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev)
1441 ((sarea->frame.y * dev_priv->front_pitch + 1446 ((sarea->frame.y * dev_priv->front_pitch +
1442 sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7) 1447 sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
1443 + offset); 1448 + offset);
1444 OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base 1449 OUT_RING_REG(RADEON_CRTC2_OFFSET, master_priv->sarea_priv->crtc2_base
1445 + offset); 1450 + offset);
1446 1451
1447 ADVANCE_RING(); 1452 ADVANCE_RING();
@@ -1450,13 +1455,13 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev)
1450 * throttle the framerate by waiting for this value before 1455 * throttle the framerate by waiting for this value before
1451 * performing the swapbuffer ioctl. 1456 * performing the swapbuffer ioctl.
1452 */ 1457 */
1453 dev_priv->sarea_priv->last_frame++; 1458 master_priv->sarea_priv->last_frame++;
1454 dev_priv->sarea_priv->pfCurrentPage = 1459 master_priv->sarea_priv->pfCurrentPage =
1455 1 - dev_priv->sarea_priv->pfCurrentPage; 1460 1 - master_priv->sarea_priv->pfCurrentPage;
1456 1461
1457 BEGIN_RING(2); 1462 BEGIN_RING(2);
1458 1463
1459 RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); 1464 RADEON_FRAME_AGE(master_priv->sarea_priv->last_frame);
1460 1465
1461 ADVANCE_RING(); 1466 ADVANCE_RING();
1462} 1467}
@@ -1494,11 +1499,13 @@ typedef struct {
1494} drm_radeon_tcl_prim_t; 1499} drm_radeon_tcl_prim_t;
1495 1500
1496static void radeon_cp_dispatch_vertex(struct drm_device * dev, 1501static void radeon_cp_dispatch_vertex(struct drm_device * dev,
1502 struct drm_file *file_priv,
1497 struct drm_buf * buf, 1503 struct drm_buf * buf,
1498 drm_radeon_tcl_prim_t * prim) 1504 drm_radeon_tcl_prim_t * prim)
1499{ 1505{
1500 drm_radeon_private_t *dev_priv = dev->dev_private; 1506 drm_radeon_private_t *dev_priv = dev->dev_private;
1501 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 1507 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
1508 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
1502 int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start; 1509 int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
1503 int numverts = (int)prim->numverts; 1510 int numverts = (int)prim->numverts;
1504 int nbox = sarea_priv->nbox; 1511 int nbox = sarea_priv->nbox;
@@ -1539,13 +1546,14 @@ static void radeon_cp_dispatch_vertex(struct drm_device * dev,
1539 } while (i < nbox); 1546 } while (i < nbox);
1540} 1547}
1541 1548
1542static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf) 1549static void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
1543{ 1550{
1544 drm_radeon_private_t *dev_priv = dev->dev_private; 1551 drm_radeon_private_t *dev_priv = dev->dev_private;
1552 struct drm_radeon_master_private *master_priv = master->driver_priv;
1545 drm_radeon_buf_priv_t *buf_priv = buf->dev_private; 1553 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1546 RING_LOCALS; 1554 RING_LOCALS;
1547 1555
1548 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; 1556 buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
1549 1557
1550 /* Emit the vertex buffer age */ 1558 /* Emit the vertex buffer age */
1551 BEGIN_RING(2); 1559 BEGIN_RING(2);
@@ -1590,12 +1598,14 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev,
1590 } 1598 }
1591} 1599}
1592 1600
1593static void radeon_cp_dispatch_indices(struct drm_device * dev, 1601static void radeon_cp_dispatch_indices(struct drm_device *dev,
1602 struct drm_master *master,
1594 struct drm_buf * elt_buf, 1603 struct drm_buf * elt_buf,
1595 drm_radeon_tcl_prim_t * prim) 1604 drm_radeon_tcl_prim_t * prim)
1596{ 1605{
1597 drm_radeon_private_t *dev_priv = dev->dev_private; 1606 drm_radeon_private_t *dev_priv = dev->dev_private;
1598 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 1607 struct drm_radeon_master_private *master_priv = master->driver_priv;
1608 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
1599 int offset = dev_priv->gart_buffers_offset + prim->offset; 1609 int offset = dev_priv->gart_buffers_offset + prim->offset;
1600 u32 *data; 1610 u32 *data;
1601 int dwords; 1611 int dwords;
@@ -1870,7 +1880,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
1870 ADVANCE_RING(); 1880 ADVANCE_RING();
1871 COMMIT_RING(); 1881 COMMIT_RING();
1872 1882
1873 radeon_cp_discard_buffer(dev, buf); 1883 radeon_cp_discard_buffer(dev, file_priv->master, buf);
1874 1884
1875 /* Update the input parameters for next time */ 1885 /* Update the input parameters for next time */
1876 image->y += height; 1886 image->y += height;
@@ -2110,7 +2120,8 @@ static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_fi
2110static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) 2120static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
2111{ 2121{
2112 drm_radeon_private_t *dev_priv = dev->dev_private; 2122 drm_radeon_private_t *dev_priv = dev->dev_private;
2113 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2123 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
2124 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
2114 drm_radeon_clear_t *clear = data; 2125 drm_radeon_clear_t *clear = data;
2115 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; 2126 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
2116 DRM_DEBUG("\n"); 2127 DRM_DEBUG("\n");
@@ -2126,7 +2137,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
2126 sarea_priv->nbox * sizeof(depth_boxes[0]))) 2137 sarea_priv->nbox * sizeof(depth_boxes[0])))
2127 return -EFAULT; 2138 return -EFAULT;
2128 2139
2129 radeon_cp_dispatch_clear(dev, clear, depth_boxes); 2140 radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes);
2130 2141
2131 COMMIT_RING(); 2142 COMMIT_RING();
2132 return 0; 2143 return 0;
@@ -2134,9 +2145,10 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
2134 2145
2135/* Not sure why this isn't set all the time: 2146/* Not sure why this isn't set all the time:
2136 */ 2147 */
2137static int radeon_do_init_pageflip(struct drm_device * dev) 2148static int radeon_do_init_pageflip(struct drm_device *dev, struct drm_master *master)
2138{ 2149{
2139 drm_radeon_private_t *dev_priv = dev->dev_private; 2150 drm_radeon_private_t *dev_priv = dev->dev_private;
2151 struct drm_radeon_master_private *master_priv = master->driver_priv;
2140 RING_LOCALS; 2152 RING_LOCALS;
2141 2153
2142 DRM_DEBUG("\n"); 2154 DRM_DEBUG("\n");
@@ -2153,8 +2165,8 @@ static int radeon_do_init_pageflip(struct drm_device * dev)
2153 2165
2154 dev_priv->page_flipping = 1; 2166 dev_priv->page_flipping = 1;
2155 2167
2156 if (dev_priv->sarea_priv->pfCurrentPage != 1) 2168 if (master_priv->sarea_priv->pfCurrentPage != 1)
2157 dev_priv->sarea_priv->pfCurrentPage = 0; 2169 master_priv->sarea_priv->pfCurrentPage = 0;
2158 2170
2159 return 0; 2171 return 0;
2160} 2172}
@@ -2172,9 +2184,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f
2172 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2184 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2173 2185
2174 if (!dev_priv->page_flipping) 2186 if (!dev_priv->page_flipping)
2175 radeon_do_init_pageflip(dev); 2187 radeon_do_init_pageflip(dev, file_priv->master);
2176 2188
2177 radeon_cp_dispatch_flip(dev); 2189 radeon_cp_dispatch_flip(dev, file_priv->master);
2178 2190
2179 COMMIT_RING(); 2191 COMMIT_RING();
2180 return 0; 2192 return 0;
@@ -2183,7 +2195,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f
2183static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) 2195static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
2184{ 2196{
2185 drm_radeon_private_t *dev_priv = dev->dev_private; 2197 drm_radeon_private_t *dev_priv = dev->dev_private;
2186 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2198 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
2199 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
2200
2187 DRM_DEBUG("\n"); 2201 DRM_DEBUG("\n");
2188 2202
2189 LOCK_TEST_WITH_RETURN(dev, file_priv); 2203 LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -2193,8 +2207,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f
2193 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) 2207 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2194 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; 2208 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
2195 2209
2196 radeon_cp_dispatch_swap(dev); 2210 radeon_cp_dispatch_swap(dev, file_priv->master);
2197 dev_priv->sarea_priv->ctx_owner = 0; 2211 sarea_priv->ctx_owner = 0;
2198 2212
2199 COMMIT_RING(); 2213 COMMIT_RING();
2200 return 0; 2214 return 0;
@@ -2203,7 +2217,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f
2203static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) 2217static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
2204{ 2218{
2205 drm_radeon_private_t *dev_priv = dev->dev_private; 2219 drm_radeon_private_t *dev_priv = dev->dev_private;
2206 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2220 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
2221 drm_radeon_sarea_t *sarea_priv;
2207 struct drm_device_dma *dma = dev->dma; 2222 struct drm_device_dma *dma = dev->dma;
2208 struct drm_buf *buf; 2223 struct drm_buf *buf;
2209 drm_radeon_vertex_t *vertex = data; 2224 drm_radeon_vertex_t *vertex = data;
@@ -2211,6 +2226,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file
2211 2226
2212 LOCK_TEST_WITH_RETURN(dev, file_priv); 2227 LOCK_TEST_WITH_RETURN(dev, file_priv);
2213 2228
2229 sarea_priv = master_priv->sarea_priv;
2230
2214 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", 2231 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
2215 DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); 2232 DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
2216 2233
@@ -2263,13 +2280,13 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file
2263 prim.finish = vertex->count; /* unused */ 2280 prim.finish = vertex->count; /* unused */
2264 prim.prim = vertex->prim; 2281 prim.prim = vertex->prim;
2265 prim.numverts = vertex->count; 2282 prim.numverts = vertex->count;
2266 prim.vc_format = dev_priv->sarea_priv->vc_format; 2283 prim.vc_format = sarea_priv->vc_format;
2267 2284
2268 radeon_cp_dispatch_vertex(dev, buf, &prim); 2285 radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim);
2269 } 2286 }
2270 2287
2271 if (vertex->discard) { 2288 if (vertex->discard) {
2272 radeon_cp_discard_buffer(dev, buf); 2289 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2273 } 2290 }
2274 2291
2275 COMMIT_RING(); 2292 COMMIT_RING();
@@ -2279,7 +2296,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file
2279static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) 2296static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
2280{ 2297{
2281 drm_radeon_private_t *dev_priv = dev->dev_private; 2298 drm_radeon_private_t *dev_priv = dev->dev_private;
2282 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2299 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
2300 drm_radeon_sarea_t *sarea_priv;
2283 struct drm_device_dma *dma = dev->dma; 2301 struct drm_device_dma *dma = dev->dma;
2284 struct drm_buf *buf; 2302 struct drm_buf *buf;
2285 drm_radeon_indices_t *elts = data; 2303 drm_radeon_indices_t *elts = data;
@@ -2288,6 +2306,8 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file
2288 2306
2289 LOCK_TEST_WITH_RETURN(dev, file_priv); 2307 LOCK_TEST_WITH_RETURN(dev, file_priv);
2290 2308
2309 sarea_priv = master_priv->sarea_priv;
2310
2291 DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", 2311 DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
2292 DRM_CURRENTPID, elts->idx, elts->start, elts->end, 2312 DRM_CURRENTPID, elts->idx, elts->start, elts->end,
2293 elts->discard); 2313 elts->discard);
@@ -2353,11 +2373,11 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file
2353 prim.prim = elts->prim; 2373 prim.prim = elts->prim;
2354 prim.offset = 0; /* offset from start of dma buffers */ 2374 prim.offset = 0; /* offset from start of dma buffers */
2355 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ 2375 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2356 prim.vc_format = dev_priv->sarea_priv->vc_format; 2376 prim.vc_format = sarea_priv->vc_format;
2357 2377
2358 radeon_cp_dispatch_indices(dev, buf, &prim); 2378 radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim);
2359 if (elts->discard) { 2379 if (elts->discard) {
2360 radeon_cp_discard_buffer(dev, buf); 2380 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2361 } 2381 }
2362 2382
2363 COMMIT_RING(); 2383 COMMIT_RING();
@@ -2468,7 +2488,7 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil
2468 */ 2488 */
2469 radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); 2489 radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
2470 if (indirect->discard) { 2490 if (indirect->discard) {
2471 radeon_cp_discard_buffer(dev, buf); 2491 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2472 } 2492 }
2473 2493
2474 COMMIT_RING(); 2494 COMMIT_RING();
@@ -2478,7 +2498,8 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil
2478static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) 2498static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
2479{ 2499{
2480 drm_radeon_private_t *dev_priv = dev->dev_private; 2500 drm_radeon_private_t *dev_priv = dev->dev_private;
2481 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2501 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
2502 drm_radeon_sarea_t *sarea_priv;
2482 struct drm_device_dma *dma = dev->dma; 2503 struct drm_device_dma *dma = dev->dma;
2483 struct drm_buf *buf; 2504 struct drm_buf *buf;
2484 drm_radeon_vertex2_t *vertex = data; 2505 drm_radeon_vertex2_t *vertex = data;
@@ -2487,6 +2508,8 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
2487 2508
2488 LOCK_TEST_WITH_RETURN(dev, file_priv); 2509 LOCK_TEST_WITH_RETURN(dev, file_priv);
2489 2510
2511 sarea_priv = master_priv->sarea_priv;
2512
2490 DRM_DEBUG("pid=%d index=%d discard=%d\n", 2513 DRM_DEBUG("pid=%d index=%d discard=%d\n",
2491 DRM_CURRENTPID, vertex->idx, vertex->discard); 2514 DRM_CURRENTPID, vertex->idx, vertex->discard);
2492 2515
@@ -2547,12 +2570,12 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
2547 tclprim.offset = prim.numverts * 64; 2570 tclprim.offset = prim.numverts * 64;
2548 tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ 2571 tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2549 2572
2550 radeon_cp_dispatch_indices(dev, buf, &tclprim); 2573 radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim);
2551 } else { 2574 } else {
2552 tclprim.numverts = prim.numverts; 2575 tclprim.numverts = prim.numverts;
2553 tclprim.offset = 0; /* not used */ 2576 tclprim.offset = 0; /* not used */
2554 2577
2555 radeon_cp_dispatch_vertex(dev, buf, &tclprim); 2578 radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim);
2556 } 2579 }
2557 2580
2558 if (sarea_priv->nbox == 1) 2581 if (sarea_priv->nbox == 1)
@@ -2560,7 +2583,7 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
2560 } 2583 }
2561 2584
2562 if (vertex->discard) { 2585 if (vertex->discard) {
2563 radeon_cp_discard_buffer(dev, buf); 2586 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2564 } 2587 }
2565 2588
2566 COMMIT_RING(); 2589 COMMIT_RING();
@@ -2909,7 +2932,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2909 goto err; 2932 goto err;
2910 } 2933 }
2911 2934
2912 radeon_cp_discard_buffer(dev, buf); 2935 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2913 break; 2936 break;
2914 2937
2915 case RADEON_CMD_PACKET3: 2938 case RADEON_CMD_PACKET3:
@@ -3020,7 +3043,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
3020 */ 3043 */
3021 case RADEON_PARAM_SAREA_HANDLE: 3044 case RADEON_PARAM_SAREA_HANDLE:
3022 /* The lock is the first dword in the sarea. */ 3045 /* The lock is the first dword in the sarea. */
3023 value = (long)dev->lock.hw_lock; 3046 /* no users of this parameter */
3024 break; 3047 break;
3025#endif 3048#endif
3026 case RADEON_PARAM_GART_TEX_HANDLE: 3049 case RADEON_PARAM_GART_TEX_HANDLE:
@@ -3064,6 +3087,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
3064static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) 3087static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
3065{ 3088{
3066 drm_radeon_private_t *dev_priv = dev->dev_private; 3089 drm_radeon_private_t *dev_priv = dev->dev_private;
3090 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
3067 drm_radeon_setparam_t *sp = data; 3091 drm_radeon_setparam_t *sp = data;
3068 struct drm_radeon_driver_file_fields *radeon_priv; 3092 struct drm_radeon_driver_file_fields *radeon_priv;
3069 3093
@@ -3078,12 +3102,14 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil
3078 DRM_DEBUG("color tiling disabled\n"); 3102 DRM_DEBUG("color tiling disabled\n");
3079 dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; 3103 dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3080 dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; 3104 dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3081 dev_priv->sarea_priv->tiling_enabled = 0; 3105 if (master_priv->sarea_priv)
3106 master_priv->sarea_priv->tiling_enabled = 0;
3082 } else if (sp->value == 1) { 3107 } else if (sp->value == 1) {
3083 DRM_DEBUG("color tiling enabled\n"); 3108 DRM_DEBUG("color tiling enabled\n");
3084 dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; 3109 dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
3085 dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; 3110 dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
3086 dev_priv->sarea_priv->tiling_enabled = 1; 3111 if (master_priv->sarea_priv)
3112 master_priv->sarea_priv->tiling_enabled = 1;
3087 } 3113 }
3088 break; 3114 break;
3089 case RADEON_SETPARAM_PCIGART_LOCATION: 3115 case RADEON_SETPARAM_PCIGART_LOCATION:
@@ -3129,14 +3155,6 @@ void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
3129 3155
3130void radeon_driver_lastclose(struct drm_device *dev) 3156void radeon_driver_lastclose(struct drm_device *dev)
3131{ 3157{
3132 if (dev->dev_private) {
3133 drm_radeon_private_t *dev_priv = dev->dev_private;
3134
3135 if (dev_priv->sarea_priv &&
3136 dev_priv->sarea_priv->pfCurrentPage != 0)
3137 radeon_cp_dispatch_flip(dev);
3138 }
3139
3140 radeon_do_release(dev); 3158 radeon_do_release(dev);
3141} 3159}
3142 3160
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 906f9b9d715d..587f5b2380d4 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1016,7 +1016,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
1016 snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u", 1016 snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u",
1017 i2c->adap.nr); 1017 i2c->adap.nr);
1018 1018
1019 i2c->clk = clk_get(&dev->dev, "I2CCLK"); 1019 i2c->clk = clk_get(&dev->dev, NULL);
1020 if (IS_ERR(i2c->clk)) { 1020 if (IS_ERR(i2c->clk)) {
1021 ret = PTR_ERR(i2c->clk); 1021 ret = PTR_ERR(i2c->clk);
1022 goto eclk; 1022 goto eclk;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index b7434d24904e..c39079f9c73f 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -40,8 +40,8 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#include <mach/regs-gpio.h> 42#include <mach/regs-gpio.h>
43#include <asm/plat-s3c/regs-iic.h> 43#include <plat/regs-iic.h>
44#include <asm/plat-s3c/iic.h> 44#include <plat/iic.h>
45 45
46/* i2c controller state */ 46/* i2c controller state */
47 47
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index e6857e01d1ba..c9f21e3d4ead 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -62,6 +62,9 @@ config IDE_TIMINGS
62config IDE_ATAPI 62config IDE_ATAPI
63 bool 63 bool
64 64
65config IDE_LEGACY
66 bool
67
65config BLK_DEV_IDE_SATA 68config BLK_DEV_IDE_SATA
66 bool "Support for SATA (deprecated; conflicts with libata SATA driver)" 69 bool "Support for SATA (deprecated; conflicts with libata SATA driver)"
67 default n 70 default n
@@ -724,7 +727,7 @@ config BLK_DEV_IDE_TX4939
724 727
725config IDE_ARM 728config IDE_ARM
726 tristate "ARM IDE support" 729 tristate "ARM IDE support"
727 depends on ARM && (ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK) 730 depends on ARM && (ARCH_RPC || ARCH_SHARK)
728 default y 731 default y
729 732
730config BLK_DEV_IDE_ICSIDE 733config BLK_DEV_IDE_ICSIDE
@@ -856,6 +859,7 @@ config BLK_DEV_4DRIVES
856config BLK_DEV_ALI14XX 859config BLK_DEV_ALI14XX
857 tristate "ALI M14xx support" 860 tristate "ALI M14xx support"
858 select IDE_TIMINGS 861 select IDE_TIMINGS
862 select IDE_LEGACY
859 help 863 help
860 This driver is enabled at runtime using the "ali14xx.probe" kernel 864 This driver is enabled at runtime using the "ali14xx.probe" kernel
861 boot parameter. It enables support for the secondary IDE interface 865 boot parameter. It enables support for the secondary IDE interface
@@ -866,6 +870,7 @@ config BLK_DEV_ALI14XX
866 870
867config BLK_DEV_DTC2278 871config BLK_DEV_DTC2278
868 tristate "DTC-2278 support" 872 tristate "DTC-2278 support"
873 select IDE_LEGACY
869 help 874 help
870 This driver is enabled at runtime using the "dtc2278.probe" kernel 875 This driver is enabled at runtime using the "dtc2278.probe" kernel
871 boot parameter. It enables support for the secondary IDE interface 876 boot parameter. It enables support for the secondary IDE interface
@@ -876,6 +881,7 @@ config BLK_DEV_DTC2278
876config BLK_DEV_HT6560B 881config BLK_DEV_HT6560B
877 tristate "Holtek HT6560B support" 882 tristate "Holtek HT6560B support"
878 select IDE_TIMINGS 883 select IDE_TIMINGS
884 select IDE_LEGACY
879 help 885 help
880 This driver is enabled at runtime using the "ht6560b.probe" kernel 886 This driver is enabled at runtime using the "ht6560b.probe" kernel
881 boot parameter. It enables support for the secondary IDE interface 887 boot parameter. It enables support for the secondary IDE interface
@@ -886,6 +892,7 @@ config BLK_DEV_HT6560B
886config BLK_DEV_QD65XX 892config BLK_DEV_QD65XX
887 tristate "QDI QD65xx support" 893 tristate "QDI QD65xx support"
888 select IDE_TIMINGS 894 select IDE_TIMINGS
895 select IDE_LEGACY
889 help 896 help
890 This driver is enabled at runtime using the "qd65xx.probe" kernel 897 This driver is enabled at runtime using the "qd65xx.probe" kernel
891 boot parameter. It permits faster I/O speeds to be set. See the 898 boot parameter. It permits faster I/O speeds to be set. See the
@@ -894,6 +901,7 @@ config BLK_DEV_QD65XX
894 901
895config BLK_DEV_UMC8672 902config BLK_DEV_UMC8672
896 tristate "UMC-8672 support" 903 tristate "UMC-8672 support"
904 select IDE_LEGACY
897 help 905 help
898 This driver is enabled at runtime using the "umc8672.probe" kernel 906 This driver is enabled at runtime using the "umc8672.probe" kernel
899 boot parameter. It enables support for the secondary IDE interface 907 boot parameter. It enables support for the secondary IDE interface
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 7818d402b188..177e3f8523ed 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -5,7 +5,7 @@
5EXTRA_CFLAGS += -Idrivers/ide 5EXTRA_CFLAGS += -Idrivers/ide
6 6
7ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \ 7ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \
8 ide-taskfile.o ide-park.o ide-pio-blacklist.o 8 ide-taskfile.o ide-pm.o ide-park.o ide-pio-blacklist.o
9 9
10# core IDE code 10# core IDE code
11ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o 11ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o
@@ -15,6 +15,7 @@ ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o
15ide-core-$(CONFIG_BLK_DEV_IDEDMA_SFF) += ide-dma-sff.o 15ide-core-$(CONFIG_BLK_DEV_IDEDMA_SFF) += ide-dma-sff.o
16ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o 16ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o
17ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o 17ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
18ide-core-$(CONFIG_IDE_LEGACY) += ide-legacy.o
18 19
19obj-$(CONFIG_IDE) += ide-core.o 20obj-$(CONFIG_IDE) += ide-core.o
20 21
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index 935385c77e06..3623bf013bcf 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -424,10 +424,10 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
424 .name = DRV_NAME, 424 .name = DRV_NAME,
425 .init_chipset = init_chipset_cmd64x, 425 .init_chipset = init_chipset_cmd64x,
426 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 426 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
427 .chipset = ide_cmd646,
428 .port_ops = &cmd64x_port_ops, 427 .port_ops = &cmd64x_port_ops,
429 .dma_ops = &cmd648_dma_ops, 428 .dma_ops = &cmd648_dma_ops,
430 .host_flags = IDE_HFLAG_ABUSE_PREFETCH, 429 .host_flags = IDE_HFLAG_SERIALIZE |
430 IDE_HFLAG_ABUSE_PREFETCH,
431 .pio_mask = ATA_PIO5, 431 .pio_mask = ATA_PIO5,
432 .mwdma_mask = ATA_MWDMA2, 432 .mwdma_mask = ATA_MWDMA2,
433 .udma_mask = ATA_UDMA2, 433 .udma_mask = ATA_UDMA2,
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 5297f07d2933..d37baf8ecc5f 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -292,7 +292,6 @@ static const struct ide_port_info cy82c693_chipset __devinitdata = {
292 .name = DRV_NAME, 292 .name = DRV_NAME,
293 .init_iops = init_iops_cy82c693, 293 .init_iops = init_iops_cy82c693,
294 .port_ops = &cy82c693_port_ops, 294 .port_ops = &cy82c693_port_ops,
295 .chipset = ide_cy82c693,
296 .host_flags = IDE_HFLAG_SINGLE, 295 .host_flags = IDE_HFLAG_SINGLE,
297 .pio_mask = ATA_PIO4, 296 .pio_mask = ATA_PIO4,
298 .swdma_mask = ATA_SWDMA2, 297 .swdma_mask = ATA_SWDMA2,
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index 691506886561..59bd0be9dcb3 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -117,6 +117,10 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
117 hw->chipset = ide_generic; 117 hw->chipset = ide_generic;
118} 118}
119 119
120static const struct ide_port_info gayle_port_info = {
121 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA,
122};
123
120 /* 124 /*
121 * Probe for a Gayle IDE interface (and optionally for an IDE doubler) 125 * Probe for a Gayle IDE interface (and optionally for an IDE doubler)
122 */ 126 */
@@ -178,7 +182,7 @@ found:
178 hws[i] = &hw[i]; 182 hws[i] = &hw[i];
179 } 183 }
180 184
181 rc = ide_host_add(NULL, hws, NULL); 185 rc = ide_host_add(&gayle_port_info, hws, NULL);
182 if (rc) 186 if (rc)
183 release_mem_region(res_start, res_n); 187 release_mem_region(res_start, res_n);
184 188
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index f5afd46ed51c..b18e10d99d2e 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -135,7 +135,6 @@
135/* various tuning parameters */ 135/* various tuning parameters */
136#define HPT_RESET_STATE_ENGINE 136#define HPT_RESET_STATE_ENGINE
137#undef HPT_DELAY_INTERRUPT 137#undef HPT_DELAY_INTERRUPT
138#define HPT_SERIALIZE_IO 0
139 138
140static const char *quirk_drives[] = { 139static const char *quirk_drives[] = {
141 "QUANTUM FIREBALLlct08 08", 140 "QUANTUM FIREBALLlct08 08",
@@ -1288,7 +1287,6 @@ static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
1288static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) 1287static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1289{ 1288{
1290 struct hpt_info *info = hpt3xx_get_info(hwif->dev); 1289 struct hpt_info *info = hpt3xx_get_info(hwif->dev);
1291 int serialize = HPT_SERIALIZE_IO;
1292 u8 chip_type = info->chip_type; 1290 u8 chip_type = info->chip_type;
1293 1291
1294 /* Cache the channel's MISC. control registers' offset */ 1292 /* Cache the channel's MISC. control registers' offset */
@@ -1305,13 +1303,9 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1305 * Clock is shared between the channels, 1303 * Clock is shared between the channels,
1306 * so we'll have to serialize them... :-( 1304 * so we'll have to serialize them... :-(
1307 */ 1305 */
1308 serialize = 1; 1306 hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
1309 hwif->rw_disk = &hpt3xxn_rw_disk; 1307 hwif->rw_disk = &hpt3xxn_rw_disk;
1310 } 1308 }
1311
1312 /* Serialize access to this device if needed */
1313 if (serialize && hwif->mate)
1314 hwif->serialized = hwif->mate->serialized = 1;
1315} 1309}
1316 1310
1317static int __devinit init_dma_hpt366(ide_hwif_t *hwif, 1311static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 244a8a052ce8..fd4a36433050 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -615,10 +615,10 @@ void ide_acpi_push_timing(ide_hwif_t *hwif)
615 in_params[0].buffer.length = sizeof(struct GTM_buffer); 615 in_params[0].buffer.length = sizeof(struct GTM_buffer);
616 in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm; 616 in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm;
617 in_params[1].type = ACPI_TYPE_BUFFER; 617 in_params[1].type = ACPI_TYPE_BUFFER;
618 in_params[1].buffer.length = sizeof(ATA_ID_WORDS * 2); 618 in_params[1].buffer.length = ATA_ID_WORDS * 2;
619 in_params[1].buffer.pointer = (u8 *)&master->idbuff; 619 in_params[1].buffer.pointer = (u8 *)&master->idbuff;
620 in_params[2].type = ACPI_TYPE_BUFFER; 620 in_params[2].type = ACPI_TYPE_BUFFER;
621 in_params[2].buffer.length = sizeof(ATA_ID_WORDS * 2); 621 in_params[2].buffer.length = ATA_ID_WORDS * 2;
622 in_params[2].buffer.pointer = (u8 *)&slave->idbuff; 622 in_params[2].buffer.pointer = (u8 *)&slave->idbuff;
623 /* Output buffer: _STM has no output */ 623 /* Output buffer: _STM has no output */
624 624
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 42ab6d8715f2..5daa4dd1b018 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -262,7 +262,6 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate)
262 struct request *failed = (struct request *) rq->buffer; 262 struct request *failed = (struct request *) rq->buffer;
263 struct cdrom_info *info = drive->driver_data; 263 struct cdrom_info *info = drive->driver_data;
264 void *sense = &info->sense_data; 264 void *sense = &info->sense_data;
265 unsigned long flags;
266 265
267 if (failed) { 266 if (failed) {
268 if (failed->sense) { 267 if (failed->sense) {
@@ -278,11 +277,9 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate)
278 failed->hard_nr_sectors)) 277 failed->hard_nr_sectors))
279 BUG(); 278 BUG();
280 } else { 279 } else {
281 spin_lock_irqsave(&ide_lock, flags); 280 if (blk_end_request(failed, -EIO,
282 if (__blk_end_request(failed, -EIO, 281 failed->data_len))
283 failed->data_len))
284 BUG(); 282 BUG();
285 spin_unlock_irqrestore(&ide_lock, flags);
286 } 283 }
287 } else 284 } else
288 cdrom_analyze_sense_data(drive, NULL, sense); 285 cdrom_analyze_sense_data(drive, NULL, sense);
@@ -317,7 +314,8 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
317static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) 314static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
318{ 315{
319 ide_hwif_t *hwif = drive->hwif; 316 ide_hwif_t *hwif = drive->hwif;
320 struct request *rq = hwif->hwgroup->rq; 317 ide_hwgroup_t *hwgroup = hwif->hwgroup;
318 struct request *rq = hwgroup->rq;
321 int stat, err, sense_key; 319 int stat, err, sense_key;
322 320
323 /* check for errors */ 321 /* check for errors */
@@ -426,16 +424,17 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
426 if (time_after(jiffies, info->write_timeout)) 424 if (time_after(jiffies, info->write_timeout))
427 do_end_request = 1; 425 do_end_request = 1;
428 else { 426 else {
427 struct request_queue *q = drive->queue;
429 unsigned long flags; 428 unsigned long flags;
430 429
431 /* 430 /*
432 * take a breather relying on the unplug 431 * take a breather relying on the unplug
433 * timer to kick us again 432 * timer to kick us again
434 */ 433 */
435 spin_lock_irqsave(&ide_lock, flags); 434 spin_lock_irqsave(q->queue_lock, flags);
436 blk_plug_device(drive->queue); 435 blk_plug_device(q);
437 spin_unlock_irqrestore(&ide_lock, 436 spin_unlock_irqrestore(q->queue_lock, flags);
438 flags); 437
439 return 1; 438 return 1;
440 } 439 }
441 } 440 }
@@ -504,12 +503,14 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
504 503
505end_request: 504end_request:
506 if (stat & ATA_ERR) { 505 if (stat & ATA_ERR) {
506 struct request_queue *q = drive->queue;
507 unsigned long flags; 507 unsigned long flags;
508 508
509 spin_lock_irqsave(&ide_lock, flags); 509 spin_lock_irqsave(q->queue_lock, flags);
510 blkdev_dequeue_request(rq); 510 blkdev_dequeue_request(rq);
511 HWGROUP(drive)->rq = NULL; 511 spin_unlock_irqrestore(q->queue_lock, flags);
512 spin_unlock_irqrestore(&ide_lock, flags); 512
513 hwgroup->rq = NULL;
513 514
514 cdrom_queue_request_sense(drive, rq->sense, rq); 515 cdrom_queue_request_sense(drive, rq->sense, rq);
515 } else 516 } else
@@ -773,52 +774,6 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
773 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); 774 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr);
774} 775}
775 776
776#define IDECD_SEEK_THRESHOLD (1000) /* 1000 blocks */
777#define IDECD_SEEK_TIMER (5 * WAIT_MIN_SLEEP) /* 100 ms */
778#define IDECD_SEEK_TIMEOUT (2 * WAIT_CMD) /* 20 sec */
779
780static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
781{
782 struct cdrom_info *info = drive->driver_data;
783 int stat;
784 static int retry = 10;
785
786 ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
787
788 if (cdrom_decode_status(drive, 0, &stat))
789 return ide_stopped;
790
791 drive->atapi_flags |= IDE_AFLAG_SEEKING;
792
793 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
794 if (--retry == 0)
795 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
796 }
797 return ide_stopped;
798}
799
800static void ide_cd_prepare_seek_request(ide_drive_t *drive, struct request *rq)
801{
802 sector_t frame = rq->sector;
803
804 ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
805
806 sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS);
807
808 memset(rq->cmd, 0, BLK_MAX_CDB);
809 rq->cmd[0] = GPCMD_SEEK;
810 put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]);
811
812 rq->timeout = ATAPI_WAIT_PC;
813}
814
815static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive)
816{
817 struct request *rq = drive->hwif->hwgroup->rq;
818
819 return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr);
820}
821
822/* 777/*
823 * Fix up a possibly partially-processed request so that we can start it over 778 * Fix up a possibly partially-processed request so that we can start it over
824 * entirely, or even put it back on the request queue. 779 * entirely, or even put it back on the request queue.
@@ -950,7 +905,8 @@ static int cdrom_newpc_intr_dummy_cb(struct request *rq)
950static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) 905static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
951{ 906{
952 ide_hwif_t *hwif = drive->hwif; 907 ide_hwif_t *hwif = drive->hwif;
953 struct request *rq = HWGROUP(drive)->rq; 908 ide_hwgroup_t *hwgroup = hwif->hwgroup;
909 struct request *rq = hwgroup->rq;
954 xfer_func_t *xferfunc; 910 xfer_func_t *xferfunc;
955 ide_expiry_t *expiry = NULL; 911 ide_expiry_t *expiry = NULL;
956 int dma_error = 0, dma, stat, thislen, uptodate = 0; 912 int dma_error = 0, dma, stat, thislen, uptodate = 0;
@@ -1148,17 +1104,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1148 1104
1149end_request: 1105end_request:
1150 if (blk_pc_request(rq)) { 1106 if (blk_pc_request(rq)) {
1151 unsigned long flags;
1152 unsigned int dlen = rq->data_len; 1107 unsigned int dlen = rq->data_len;
1153 1108
1154 if (dma) 1109 if (dma)
1155 rq->data_len = 0; 1110 rq->data_len = 0;
1156 1111
1157 spin_lock_irqsave(&ide_lock, flags); 1112 if (blk_end_request(rq, 0, dlen))
1158 if (__blk_end_request(rq, 0, dlen))
1159 BUG(); 1113 BUG();
1160 HWGROUP(drive)->rq = NULL; 1114
1161 spin_unlock_irqrestore(&ide_lock, flags); 1115 hwgroup->rq = NULL;
1162 } else { 1116 } else {
1163 if (!uptodate) 1117 if (!uptodate)
1164 rq->cmd_flags |= REQ_FAILED; 1118 rq->cmd_flags |= REQ_FAILED;
@@ -1260,7 +1214,6 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
1260static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, 1214static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1261 sector_t block) 1215 sector_t block)
1262{ 1216{
1263 struct cdrom_info *info = drive->driver_data;
1264 ide_handler_t *fn; 1217 ide_handler_t *fn;
1265 int xferlen; 1218 int xferlen;
1266 1219
@@ -1270,44 +1223,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1270 (unsigned long long)block); 1223 (unsigned long long)block);
1271 1224
1272 if (blk_fs_request(rq)) { 1225 if (blk_fs_request(rq)) {
1273 if (drive->atapi_flags & IDE_AFLAG_SEEKING) { 1226 xferlen = 32768;
1274 ide_hwif_t *hwif = drive->hwif; 1227 fn = cdrom_start_rw_cont;
1275 unsigned long elapsed = jiffies - info->start_seek;
1276 int stat = hwif->tp_ops->read_status(hwif);
1277
1278 if ((stat & ATA_DSC) != ATA_DSC) {
1279 if (elapsed < IDECD_SEEK_TIMEOUT) {
1280 ide_stall_queue(drive,
1281 IDECD_SEEK_TIMER);
1282 return ide_stopped;
1283 }
1284 printk(KERN_ERR PFX "%s: DSC timeout\n",
1285 drive->name);
1286 }
1287 drive->atapi_flags &= ~IDE_AFLAG_SEEKING;
1288 }
1289 if (rq_data_dir(rq) == READ &&
1290 IDE_LARGE_SEEK(info->last_block, block,
1291 IDECD_SEEK_THRESHOLD) &&
1292 (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)) {
1293 xferlen = 0;
1294 fn = cdrom_start_seek_continuation;
1295 1228
1296 drive->dma = 0; 1229 if (cdrom_start_rw(drive, rq) == ide_stopped)
1297 info->start_seek = jiffies; 1230 return ide_stopped;
1298
1299 ide_cd_prepare_seek_request(drive, rq);
1300 } else {
1301 xferlen = 32768;
1302 fn = cdrom_start_rw_cont;
1303
1304 if (cdrom_start_rw(drive, rq) == ide_stopped)
1305 return ide_stopped;
1306 1231
1307 if (ide_cd_prepare_rw_request(drive, rq) == ide_stopped) 1232 if (ide_cd_prepare_rw_request(drive, rq) == ide_stopped)
1308 return ide_stopped; 1233 return ide_stopped;
1309 }
1310 info->last_block = block;
1311 } else if (blk_sense_request(rq) || blk_pc_request(rq) || 1234 } else if (blk_sense_request(rq) || blk_pc_request(rq) ||
1312 rq->cmd_type == REQ_TYPE_ATA_PC) { 1235 rq->cmd_type == REQ_TYPE_ATA_PC) {
1313 xferlen = rq->data_len; 1236 xferlen = rq->data_len;
@@ -1908,13 +1831,6 @@ static ide_proc_entry_t idecd_proc[] = {
1908 { NULL, 0, NULL, NULL } 1831 { NULL, 0, NULL, NULL }
1909}; 1832};
1910 1833
1911ide_devset_rw_flag(dsc_overlap, IDE_DFLAG_DSC_OVERLAP);
1912
1913static const struct ide_proc_devset idecd_settings[] = {
1914 IDE_PROC_DEVSET(dsc_overlap, 0, 1),
1915 { 0 },
1916};
1917
1918static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive) 1834static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive)
1919{ 1835{
1920 return idecd_proc; 1836 return idecd_proc;
@@ -1922,7 +1838,7 @@ static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive)
1922 1838
1923static const struct ide_proc_devset *ide_cd_proc_devsets(ide_drive_t *drive) 1839static const struct ide_proc_devset *ide_cd_proc_devsets(ide_drive_t *drive)
1924{ 1840{
1925 return idecd_settings; 1841 return NULL;
1926} 1842}
1927#endif 1843#endif
1928 1844
@@ -2022,11 +1938,6 @@ static int ide_cdrom_setup(ide_drive_t *drive)
2022 /* set correct block size */ 1938 /* set correct block size */
2023 blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE); 1939 blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
2024 1940
2025 if (drive->next != drive)
2026 drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
2027 else
2028 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
2029
2030 if (ide_cdrom_register(drive, nslots)) { 1941 if (ide_cdrom_register(drive, nslots)) {
2031 printk(KERN_ERR PFX "%s: %s failed to register device with the" 1942 printk(KERN_ERR PFX "%s: %s failed to register device with the"
2032 " cdrom driver.\n", drive->name, __func__); 1943 " cdrom driver.\n", drive->name, __func__);
@@ -2063,7 +1974,6 @@ static void ide_cd_release(struct kref *kref)
2063 kfree(info->toc); 1974 kfree(info->toc);
2064 if (devinfo->handle == drive) 1975 if (devinfo->handle == drive)
2065 unregister_cdrom(devinfo); 1976 unregister_cdrom(devinfo);
2066 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
2067 drive->driver_data = NULL; 1977 drive->driver_data = NULL;
2068 blk_queue_prep_rq(drive->queue, NULL); 1978 blk_queue_prep_rq(drive->queue, NULL);
2069 g->private_data = NULL; 1979 g->private_data = NULL;
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 5882b9a9ea8b..d5ce3362dbd1 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -88,8 +88,6 @@ struct cdrom_info {
88 struct request_sense sense_data; 88 struct request_sense sense_data;
89 89
90 struct request request_sense_request; 90 struct request request_sense_request;
91 unsigned long last_block;
92 unsigned long start_seek;
93 91
94 u8 max_speed; /* Max speed of the drive. */ 92 u8 max_speed; /* Max speed of the drive. */
95 u8 current_speed; /* Current speed of the drive. */ 93 u8 current_speed; /* Current speed of the drive. */
diff --git a/drivers/ide/ide-dma-sff.c b/drivers/ide/ide-dma-sff.c
index cac431f0df17..f6d2d44d8a9a 100644
--- a/drivers/ide/ide-dma-sff.c
+++ b/drivers/ide/ide-dma-sff.c
@@ -98,10 +98,10 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq)
98{ 98{
99 ide_hwif_t *hwif = drive->hwif; 99 ide_hwif_t *hwif = drive->hwif;
100 __le32 *table = (__le32 *)hwif->dmatable_cpu; 100 __le32 *table = (__le32 *)hwif->dmatable_cpu;
101 unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0;
102 unsigned int count = 0; 101 unsigned int count = 0;
103 int i; 102 int i;
104 struct scatterlist *sg; 103 struct scatterlist *sg;
104 u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
105 105
106 hwif->sg_nents = ide_build_sglist(drive, rq); 106 hwif->sg_nents = ide_build_sglist(drive, rq);
107 if (hwif->sg_nents == 0) 107 if (hwif->sg_nents == 0)
@@ -176,15 +176,10 @@ int ide_dma_setup(ide_drive_t *drive)
176{ 176{
177 ide_hwif_t *hwif = drive->hwif; 177 ide_hwif_t *hwif = drive->hwif;
178 struct request *rq = hwif->hwgroup->rq; 178 struct request *rq = hwif->hwgroup->rq;
179 unsigned int reading; 179 unsigned int reading = rq_data_dir(rq) ? 0 : ATA_DMA_WR;
180 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 180 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
181 u8 dma_stat; 181 u8 dma_stat;
182 182
183 if (rq_data_dir(rq))
184 reading = 0;
185 else
186 reading = 1 << 3;
187
188 /* fall back to pio! */ 183 /* fall back to pio! */
189 if (!ide_build_dmatable(drive, rq)) { 184 if (!ide_build_dmatable(drive, rq)) {
190 ide_map_sg(drive, rq); 185 ide_map_sg(drive, rq);
@@ -209,10 +204,11 @@ int ide_dma_setup(ide_drive_t *drive)
209 204
210 /* clear INTR & ERROR flags */ 205 /* clear INTR & ERROR flags */
211 if (mmio) 206 if (mmio)
212 writeb(dma_stat | 6, 207 writeb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
213 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); 208 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
214 else 209 else
215 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); 210 outb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
211 hwif->dma_base + ATA_DMA_STATUS);
216 212
217 drive->waiting_for_dma = 1; 213 drive->waiting_for_dma = 1;
218 return 0; 214 return 0;
@@ -246,14 +242,13 @@ static int dma_timer_expiry(ide_drive_t *drive)
246 242
247 hwif->hwgroup->expiry = NULL; /* one free ride for now */ 243 hwif->hwgroup->expiry = NULL; /* one free ride for now */
248 244
249 /* 1 dmaing, 2 error, 4 intr */ 245 if (dma_stat & ATA_DMA_ERR) /* ERROR */
250 if (dma_stat & 2) /* ERROR */
251 return -1; 246 return -1;
252 247
253 if (dma_stat & 1) /* DMAing */ 248 if (dma_stat & ATA_DMA_ACTIVE) /* DMAing */
254 return WAIT_CMD; 249 return WAIT_CMD;
255 250
256 if (dma_stat & 4) /* Got an Interrupt */ 251 if (dma_stat & ATA_DMA_INTR) /* Got an Interrupt */
257 return WAIT_CMD; 252 return WAIT_CMD;
258 253
259 return 0; /* Status is unknown -- reset the bus */ 254 return 0; /* Status is unknown -- reset the bus */
@@ -279,12 +274,11 @@ void ide_dma_start(ide_drive_t *drive)
279 */ 274 */
280 if (hwif->host_flags & IDE_HFLAG_MMIO) { 275 if (hwif->host_flags & IDE_HFLAG_MMIO) {
281 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 276 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
282 /* start DMA */ 277 writeb(dma_cmd | ATA_DMA_START,
283 writeb(dma_cmd | 1,
284 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 278 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
285 } else { 279 } else {
286 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 280 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
287 outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD); 281 outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
288 } 282 }
289 283
290 wmb(); 284 wmb();
@@ -296,19 +290,18 @@ int ide_dma_end(ide_drive_t *drive)
296{ 290{
297 ide_hwif_t *hwif = drive->hwif; 291 ide_hwif_t *hwif = drive->hwif;
298 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 292 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
299 u8 dma_stat = 0, dma_cmd = 0; 293 u8 dma_stat = 0, dma_cmd = 0, mask;
300 294
301 drive->waiting_for_dma = 0; 295 drive->waiting_for_dma = 0;
302 296
297 /* stop DMA */
303 if (mmio) { 298 if (mmio) {
304 /* get DMA command mode */
305 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 299 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
306 /* stop DMA */ 300 writeb(dma_cmd & ~ATA_DMA_START,
307 writeb(dma_cmd & ~1,
308 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 301 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
309 } else { 302 } else {
310 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 303 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
311 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); 304 outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
312 } 305 }
313 306
314 /* get DMA status */ 307 /* get DMA status */
@@ -316,16 +309,21 @@ int ide_dma_end(ide_drive_t *drive)
316 309
317 if (mmio) 310 if (mmio)
318 /* clear the INTR & ERROR bits */ 311 /* clear the INTR & ERROR bits */
319 writeb(dma_stat | 6, 312 writeb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
320 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); 313 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
321 else 314 else
322 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); 315 outb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
316 hwif->dma_base + ATA_DMA_STATUS);
323 317
324 /* purge DMA mappings */ 318 /* purge DMA mappings */
325 ide_destroy_dmatable(drive); 319 ide_destroy_dmatable(drive);
326 /* verify good DMA status */
327 wmb(); 320 wmb();
328 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; 321
322 /* verify good DMA status */
323 mask = ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR;
324 if ((dma_stat & mask) != ATA_DMA_INTR)
325 return 0x10 | dma_stat;
326 return 0;
329} 327}
330EXPORT_SYMBOL_GPL(ide_dma_end); 328EXPORT_SYMBOL_GPL(ide_dma_end);
331 329
@@ -335,11 +333,7 @@ int ide_dma_test_irq(ide_drive_t *drive)
335 ide_hwif_t *hwif = drive->hwif; 333 ide_hwif_t *hwif = drive->hwif;
336 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); 334 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
337 335
338 /* return 1 if INTR asserted */ 336 return (dma_stat & ATA_DMA_INTR) ? 1 : 0;
339 if ((dma_stat & 4) == 4)
340 return 1;
341
342 return 0;
343} 337}
344EXPORT_SYMBOL_GPL(ide_dma_test_irq); 338EXPORT_SYMBOL_GPL(ide_dma_test_irq);
345 339
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index cc35d6dbd410..ecacc008fdaf 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -84,11 +84,11 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
84 ide_dma_on(drive); 84 ide_dma_on(drive);
85 } 85 }
86 86
87 if (!__blk_end_request(rq, error, nr_bytes)) { 87 if (!blk_end_request(rq, error, nr_bytes))
88 if (dequeue)
89 HWGROUP(drive)->rq = NULL;
90 ret = 0; 88 ret = 0;
91 } 89
90 if (ret == 0 && dequeue)
91 drive->hwif->hwgroup->rq = NULL;
92 92
93 return ret; 93 return ret;
94} 94}
@@ -107,16 +107,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
108{ 108{
109 unsigned int nr_bytes = nr_sectors << 9; 109 unsigned int nr_bytes = nr_sectors << 9;
110 struct request *rq; 110 struct request *rq = drive->hwif->hwgroup->rq;
111 unsigned long flags;
112 int ret = 1;
113
114 /*
115 * room for locking improvements here, the calls below don't
116 * need the queue lock held at all
117 */
118 spin_lock_irqsave(&ide_lock, flags);
119 rq = HWGROUP(drive)->rq;
120 111
121 if (!nr_bytes) { 112 if (!nr_bytes) {
122 if (blk_pc_request(rq)) 113 if (blk_pc_request(rq))
@@ -125,105 +116,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
125 nr_bytes = rq->hard_cur_sectors << 9; 116 nr_bytes = rq->hard_cur_sectors << 9;
126 } 117 }
127 118
128 ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); 119 return __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
129
130 spin_unlock_irqrestore(&ide_lock, flags);
131 return ret;
132} 120}
133EXPORT_SYMBOL(ide_end_request); 121EXPORT_SYMBOL(ide_end_request);
134 122
135static void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
136{
137 struct request_pm_state *pm = rq->data;
138
139#ifdef DEBUG_PM
140 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
141 drive->name, pm->pm_step);
142#endif
143 if (drive->media != ide_disk)
144 return;
145
146 switch (pm->pm_step) {
147 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
148 if (pm->pm_state == PM_EVENT_FREEZE)
149 pm->pm_step = IDE_PM_COMPLETED;
150 else
151 pm->pm_step = IDE_PM_STANDBY;
152 break;
153 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
154 pm->pm_step = IDE_PM_COMPLETED;
155 break;
156 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
157 pm->pm_step = IDE_PM_IDLE;
158 break;
159 case IDE_PM_IDLE: /* Resume step 2 (idle)*/
160 pm->pm_step = IDE_PM_RESTORE_DMA;
161 break;
162 }
163}
164
165static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
166{
167 struct request_pm_state *pm = rq->data;
168 ide_task_t *args = rq->special;
169
170 memset(args, 0, sizeof(*args));
171
172 switch (pm->pm_step) {
173 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
174 if (drive->media != ide_disk)
175 break;
176 /* Not supported? Switch to next step now. */
177 if (ata_id_flush_enabled(drive->id) == 0 ||
178 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
179 ide_complete_power_step(drive, rq);
180 return ide_stopped;
181 }
182 if (ata_id_flush_ext_enabled(drive->id))
183 args->tf.command = ATA_CMD_FLUSH_EXT;
184 else
185 args->tf.command = ATA_CMD_FLUSH;
186 goto out_do_tf;
187 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
188 args->tf.command = ATA_CMD_STANDBYNOW1;
189 goto out_do_tf;
190 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
191 ide_set_max_pio(drive);
192 /*
193 * skip IDE_PM_IDLE for ATAPI devices
194 */
195 if (drive->media != ide_disk)
196 pm->pm_step = IDE_PM_RESTORE_DMA;
197 else
198 ide_complete_power_step(drive, rq);
199 return ide_stopped;
200 case IDE_PM_IDLE: /* Resume step 2 (idle) */
201 args->tf.command = ATA_CMD_IDLEIMMEDIATE;
202 goto out_do_tf;
203 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
204 /*
205 * Right now, all we do is call ide_set_dma(drive),
206 * we could be smarter and check for current xfer_speed
207 * in struct drive etc...
208 */
209 if (drive->hwif->dma_ops == NULL)
210 break;
211 /*
212 * TODO: respect IDE_DFLAG_USING_DMA
213 */
214 ide_set_dma(drive);
215 break;
216 }
217
218 pm->pm_step = IDE_PM_COMPLETED;
219 return ide_stopped;
220
221out_do_tf:
222 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
223 args->data_phase = TASKFILE_NO_DATA;
224 return do_rw_taskfile(drive, args);
225}
226
227/** 123/**
228 * ide_end_dequeued_request - complete an IDE I/O 124 * ide_end_dequeued_request - complete an IDE I/O
229 * @drive: IDE device for the I/O 125 * @drive: IDE device for the I/O
@@ -242,48 +138,12 @@ out_do_tf:
242int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 138int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
243 int uptodate, int nr_sectors) 139 int uptodate, int nr_sectors)
244{ 140{
245 unsigned long flags;
246 int ret;
247
248 spin_lock_irqsave(&ide_lock, flags);
249 BUG_ON(!blk_rq_started(rq)); 141 BUG_ON(!blk_rq_started(rq));
250 ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
251 spin_unlock_irqrestore(&ide_lock, flags);
252 142
253 return ret; 143 return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
254} 144}
255EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 145EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
256 146
257
258/**
259 * ide_complete_pm_request - end the current Power Management request
260 * @drive: target drive
261 * @rq: request
262 *
263 * This function cleans up the current PM request and stops the queue
264 * if necessary.
265 */
266static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
267{
268 unsigned long flags;
269
270#ifdef DEBUG_PM
271 printk("%s: completing PM request, %s\n", drive->name,
272 blk_pm_suspend_request(rq) ? "suspend" : "resume");
273#endif
274 spin_lock_irqsave(&ide_lock, flags);
275 if (blk_pm_suspend_request(rq)) {
276 blk_stop_queue(drive->queue);
277 } else {
278 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
279 blk_start_queue(drive->queue);
280 }
281 HWGROUP(drive)->rq = NULL;
282 if (__blk_end_request(rq, 0, 0))
283 BUG();
284 spin_unlock_irqrestore(&ide_lock, flags);
285}
286
287/** 147/**
288 * ide_end_drive_cmd - end an explicit drive command 148 * ide_end_drive_cmd - end an explicit drive command
289 * @drive: command 149 * @drive: command
@@ -300,19 +160,12 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
300 160
301void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 161void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
302{ 162{
303 unsigned long flags; 163 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
304 struct request *rq; 164 struct request *rq = hwgroup->rq;
305
306 spin_lock_irqsave(&ide_lock, flags);
307 rq = HWGROUP(drive)->rq;
308 spin_unlock_irqrestore(&ide_lock, flags);
309 165
310 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 166 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
311 ide_task_t *task = (ide_task_t *)rq->special; 167 ide_task_t *task = (ide_task_t *)rq->special;
312 168
313 if (rq->errors == 0)
314 rq->errors = !OK_STAT(stat, ATA_DRDY, BAD_STAT);
315
316 if (task) { 169 if (task) {
317 struct ide_taskfile *tf = &task->tf; 170 struct ide_taskfile *tf = &task->tf;
318 171
@@ -333,15 +186,14 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
333 return; 186 return;
334 } 187 }
335 188
336 spin_lock_irqsave(&ide_lock, flags); 189 hwgroup->rq = NULL;
337 HWGROUP(drive)->rq = NULL; 190
338 rq->errors = err; 191 rq->errors = err;
339 if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), 192
340 blk_rq_bytes(rq)))) 193 if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0),
194 blk_rq_bytes(rq))))
341 BUG(); 195 BUG();
342 spin_unlock_irqrestore(&ide_lock, flags);
343} 196}
344
345EXPORT_SYMBOL(ide_end_drive_cmd); 197EXPORT_SYMBOL(ide_end_drive_cmd);
346 198
347static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 199static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
@@ -720,40 +572,6 @@ static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
720 } 572 }
721} 573}
722 574
723static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
724{
725 struct request_pm_state *pm = rq->data;
726
727 if (blk_pm_suspend_request(rq) &&
728 pm->pm_step == IDE_PM_START_SUSPEND)
729 /* Mark drive blocked when starting the suspend sequence. */
730 drive->dev_flags |= IDE_DFLAG_BLOCKED;
731 else if (blk_pm_resume_request(rq) &&
732 pm->pm_step == IDE_PM_START_RESUME) {
733 /*
734 * The first thing we do on wakeup is to wait for BSY bit to
735 * go away (with a looong timeout) as a drive on this hwif may
736 * just be POSTing itself.
737 * We do that before even selecting as the "other" device on
738 * the bus may be broken enough to walk on our toes at this
739 * point.
740 */
741 ide_hwif_t *hwif = drive->hwif;
742 int rc;
743#ifdef DEBUG_PM
744 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
745#endif
746 rc = ide_wait_not_busy(hwif, 35000);
747 if (rc)
748 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
749 SELECT_DRIVE(drive);
750 hwif->tp_ops->set_irq(hwif, 1);
751 rc = ide_wait_not_busy(hwif, 100000);
752 if (rc)
753 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
754 }
755}
756
757/** 575/**
758 * start_request - start of I/O and command issuing for IDE 576 * start_request - start of I/O and command issuing for IDE
759 * 577 *
@@ -927,7 +745,7 @@ repeat:
927 745
928/* 746/*
929 * Issue a new request to a drive from hwgroup 747 * Issue a new request to a drive from hwgroup
930 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 748 * Caller must have already done spin_lock_irqsave(&hwgroup->lock, ..);
931 * 749 *
932 * A hwgroup is a serialized group of IDE interfaces. Usually there is 750 * A hwgroup is a serialized group of IDE interfaces. Usually there is
933 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 751 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
@@ -939,7 +757,7 @@ repeat:
939 * possibly along with many other devices. This is especially common in 757 * possibly along with many other devices. This is especially common in
940 * PCI-based systems with off-board IDE controller cards. 758 * PCI-based systems with off-board IDE controller cards.
941 * 759 *
942 * The IDE driver uses the single global ide_lock spinlock to protect 760 * The IDE driver uses a per-hwgroup spinlock to protect
943 * access to the request queues, and to protect the hwgroup->busy flag. 761 * access to the request queues, and to protect the hwgroup->busy flag.
944 * 762 *
945 * The first thread into the driver for a particular hwgroup sets the 763 * The first thread into the driver for a particular hwgroup sets the
@@ -955,7 +773,7 @@ repeat:
955 * will start the next request from the queue. If no more work remains, 773 * will start the next request from the queue. If no more work remains,
956 * the driver will clear the hwgroup->busy flag and exit. 774 * the driver will clear the hwgroup->busy flag and exit.
957 * 775 *
958 * The ide_lock (spinlock) is used to protect all access to the 776 * The per-hwgroup spinlock is used to protect all access to the
959 * hwgroup->busy flag, but is otherwise not needed for most processing in 777 * hwgroup->busy flag, but is otherwise not needed for most processing in
960 * the driver. This makes the driver much more friendlier to shared IRQs 778 * the driver. This makes the driver much more friendlier to shared IRQs
961 * than previous designs, while remaining 100% (?) SMP safe and capable. 779 * than previous designs, while remaining 100% (?) SMP safe and capable.
@@ -968,7 +786,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
968 ide_startstop_t startstop; 786 ide_startstop_t startstop;
969 int loops = 0; 787 int loops = 0;
970 788
971 /* caller must own ide_lock */ 789 /* caller must own hwgroup->lock */
972 BUG_ON(!irqs_disabled()); 790 BUG_ON(!irqs_disabled());
973 791
974 while (!hwgroup->busy) { 792 while (!hwgroup->busy) {
@@ -1023,12 +841,12 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1023 } 841 }
1024 again: 842 again:
1025 hwif = HWIF(drive); 843 hwif = HWIF(drive);
1026 if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) { 844 if (hwif != hwgroup->hwif) {
1027 /* 845 /*
1028 * set nIEN for previous hwif, drives in the 846 * set nIEN for previous hwif, drives in the
1029 * quirk_list may not like intr setups/cleanups 847 * quirk_list may not like intr setups/cleanups
1030 */ 848 */
1031 if (drive->quirk_list != 1) 849 if (drive->quirk_list == 0)
1032 hwif->tp_ops->set_irq(hwif, 0); 850 hwif->tp_ops->set_irq(hwif, 0);
1033 } 851 }
1034 hwgroup->hwif = hwif; 852 hwgroup->hwif = hwif;
@@ -1036,11 +854,6 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1036 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 854 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
1037 drive->service_start = jiffies; 855 drive->service_start = jiffies;
1038 856
1039 if (blk_queue_plugged(drive->queue)) {
1040 printk(KERN_ERR "ide: huh? queue was plugged!\n");
1041 break;
1042 }
1043
1044 /* 857 /*
1045 * we know that the queue isn't empty, but this can happen 858 * we know that the queue isn't empty, but this can happen
1046 * if the q->prep_rq_fn() decides to kill a request 859 * if the q->prep_rq_fn() decides to kill a request
@@ -1090,11 +903,11 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1090 */ 903 */
1091 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 904 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1092 disable_irq_nosync(hwif->irq); 905 disable_irq_nosync(hwif->irq);
1093 spin_unlock(&ide_lock); 906 spin_unlock(&hwgroup->lock);
1094 local_irq_enable_in_hardirq(); 907 local_irq_enable_in_hardirq();
1095 /* allow other IRQs while we start this request */ 908 /* allow other IRQs while we start this request */
1096 startstop = start_request(drive, rq); 909 startstop = start_request(drive, rq);
1097 spin_lock_irq(&ide_lock); 910 spin_lock_irq(&hwgroup->lock);
1098 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 911 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1099 enable_irq(hwif->irq); 912 enable_irq(hwif->irq);
1100 if (startstop == ide_stopped) 913 if (startstop == ide_stopped)
@@ -1192,7 +1005,7 @@ void ide_timer_expiry (unsigned long data)
1192 unsigned long flags; 1005 unsigned long flags;
1193 unsigned long wait = -1; 1006 unsigned long wait = -1;
1194 1007
1195 spin_lock_irqsave(&ide_lock, flags); 1008 spin_lock_irqsave(&hwgroup->lock, flags);
1196 1009
1197 if (((handler = hwgroup->handler) == NULL) || 1010 if (((handler = hwgroup->handler) == NULL) ||
1198 (hwgroup->req_gen != hwgroup->req_gen_timer)) { 1011 (hwgroup->req_gen != hwgroup->req_gen_timer)) {
@@ -1225,7 +1038,7 @@ void ide_timer_expiry (unsigned long data)
1225 hwgroup->timer.expires = jiffies + wait; 1038 hwgroup->timer.expires = jiffies + wait;
1226 hwgroup->req_gen_timer = hwgroup->req_gen; 1039 hwgroup->req_gen_timer = hwgroup->req_gen;
1227 add_timer(&hwgroup->timer); 1040 add_timer(&hwgroup->timer);
1228 spin_unlock_irqrestore(&ide_lock, flags); 1041 spin_unlock_irqrestore(&hwgroup->lock, flags);
1229 return; 1042 return;
1230 } 1043 }
1231 } 1044 }
@@ -1235,7 +1048,7 @@ void ide_timer_expiry (unsigned long data)
1235 * the handler() function, which means we need to 1048 * the handler() function, which means we need to
1236 * globally mask the specific IRQ: 1049 * globally mask the specific IRQ:
1237 */ 1050 */
1238 spin_unlock(&ide_lock); 1051 spin_unlock(&hwgroup->lock);
1239 hwif = HWIF(drive); 1052 hwif = HWIF(drive);
1240 /* disable_irq_nosync ?? */ 1053 /* disable_irq_nosync ?? */
1241 disable_irq(hwif->irq); 1054 disable_irq(hwif->irq);
@@ -1259,14 +1072,14 @@ void ide_timer_expiry (unsigned long data)
1259 hwif->tp_ops->read_status(hwif)); 1072 hwif->tp_ops->read_status(hwif));
1260 } 1073 }
1261 drive->service_time = jiffies - drive->service_start; 1074 drive->service_time = jiffies - drive->service_start;
1262 spin_lock_irq(&ide_lock); 1075 spin_lock_irq(&hwgroup->lock);
1263 enable_irq(hwif->irq); 1076 enable_irq(hwif->irq);
1264 if (startstop == ide_stopped) 1077 if (startstop == ide_stopped)
1265 hwgroup->busy = 0; 1078 hwgroup->busy = 0;
1266 } 1079 }
1267 } 1080 }
1268 ide_do_request(hwgroup, IDE_NO_IRQ); 1081 ide_do_request(hwgroup, IDE_NO_IRQ);
1269 spin_unlock_irqrestore(&ide_lock, flags); 1082 spin_unlock_irqrestore(&hwgroup->lock, flags);
1270} 1083}
1271 1084
1272/** 1085/**
@@ -1359,18 +1172,16 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1359{ 1172{
1360 unsigned long flags; 1173 unsigned long flags;
1361 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1174 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
1362 ide_hwif_t *hwif; 1175 ide_hwif_t *hwif = hwgroup->hwif;
1363 ide_drive_t *drive; 1176 ide_drive_t *drive;
1364 ide_handler_t *handler; 1177 ide_handler_t *handler;
1365 ide_startstop_t startstop; 1178 ide_startstop_t startstop;
1179 irqreturn_t irq_ret = IRQ_NONE;
1366 1180
1367 spin_lock_irqsave(&ide_lock, flags); 1181 spin_lock_irqsave(&hwgroup->lock, flags);
1368 hwif = hwgroup->hwif;
1369 1182
1370 if (!ide_ack_intr(hwif)) { 1183 if (!ide_ack_intr(hwif))
1371 spin_unlock_irqrestore(&ide_lock, flags); 1184 goto out;
1372 return IRQ_NONE;
1373 }
1374 1185
1375 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1186 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) {
1376 /* 1187 /*
@@ -1406,9 +1217,9 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1406 (void)hwif->tp_ops->read_status(hwif); 1217 (void)hwif->tp_ops->read_status(hwif);
1407#endif /* CONFIG_BLK_DEV_IDEPCI */ 1218#endif /* CONFIG_BLK_DEV_IDEPCI */
1408 } 1219 }
1409 spin_unlock_irqrestore(&ide_lock, flags); 1220 goto out;
1410 return IRQ_NONE;
1411 } 1221 }
1222
1412 drive = hwgroup->drive; 1223 drive = hwgroup->drive;
1413 if (!drive) { 1224 if (!drive) {
1414 /* 1225 /*
@@ -1417,10 +1228,10 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1417 * 1228 *
1418 * [Note - this can occur if the drive is hot unplugged] 1229 * [Note - this can occur if the drive is hot unplugged]
1419 */ 1230 */
1420 spin_unlock_irqrestore(&ide_lock, flags); 1231 goto out_handled;
1421 return IRQ_HANDLED;
1422 } 1232 }
1423 if (!drive_is_ready(drive)) { 1233
1234 if (!drive_is_ready(drive))
1424 /* 1235 /*
1425 * This happens regularly when we share a PCI IRQ with 1236 * This happens regularly when we share a PCI IRQ with
1426 * another device. Unfortunately, it can also happen 1237 * another device. Unfortunately, it can also happen
@@ -1428,9 +1239,8 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1428 * their status register is up to date. Hopefully we have 1239 * their status register is up to date. Hopefully we have
1429 * enough advance overhead that the latter isn't a problem. 1240 * enough advance overhead that the latter isn't a problem.
1430 */ 1241 */
1431 spin_unlock_irqrestore(&ide_lock, flags); 1242 goto out;
1432 return IRQ_NONE; 1243
1433 }
1434 if (!hwgroup->busy) { 1244 if (!hwgroup->busy) {
1435 hwgroup->busy = 1; /* paranoia */ 1245 hwgroup->busy = 1; /* paranoia */
1436 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1246 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
@@ -1438,7 +1248,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1438 hwgroup->handler = NULL; 1248 hwgroup->handler = NULL;
1439 hwgroup->req_gen++; 1249 hwgroup->req_gen++;
1440 del_timer(&hwgroup->timer); 1250 del_timer(&hwgroup->timer);
1441 spin_unlock(&ide_lock); 1251 spin_unlock(&hwgroup->lock);
1442 1252
1443 if (hwif->port_ops && hwif->port_ops->clear_irq) 1253 if (hwif->port_ops && hwif->port_ops->clear_irq)
1444 hwif->port_ops->clear_irq(drive); 1254 hwif->port_ops->clear_irq(drive);
@@ -1449,7 +1259,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1449 /* service this interrupt, may set handler for next interrupt */ 1259 /* service this interrupt, may set handler for next interrupt */
1450 startstop = handler(drive); 1260 startstop = handler(drive);
1451 1261
1452 spin_lock_irq(&ide_lock); 1262 spin_lock_irq(&hwgroup->lock);
1453 /* 1263 /*
1454 * Note that handler() may have set things up for another 1264 * Note that handler() may have set things up for another
1455 * interrupt to occur soon, but it cannot happen until 1265 * interrupt to occur soon, but it cannot happen until
@@ -1467,8 +1277,11 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1467 "on exit\n", drive->name); 1277 "on exit\n", drive->name);
1468 } 1278 }
1469 } 1279 }
1470 spin_unlock_irqrestore(&ide_lock, flags); 1280out_handled:
1471 return IRQ_HANDLED; 1281 irq_ret = IRQ_HANDLED;
1282out:
1283 spin_unlock_irqrestore(&hwgroup->lock, flags);
1284 return irq_ret;
1472} 1285}
1473 1286
1474/** 1287/**
@@ -1488,16 +1301,17 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1488 1301
1489void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) 1302void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
1490{ 1303{
1304 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
1305 struct request_queue *q = drive->queue;
1491 unsigned long flags; 1306 unsigned long flags;
1492 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1493 1307
1494 spin_lock_irqsave(&ide_lock, flags);
1495 hwgroup->rq = NULL; 1308 hwgroup->rq = NULL;
1496 __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
1497 blk_start_queueing(drive->queue);
1498 spin_unlock_irqrestore(&ide_lock, flags);
1499}
1500 1309
1310 spin_lock_irqsave(q->queue_lock, flags);
1311 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
1312 blk_start_queueing(q);
1313 spin_unlock_irqrestore(q->queue_lock, flags);
1314}
1501EXPORT_SYMBOL(ide_do_drive_cmd); 1315EXPORT_SYMBOL(ide_do_drive_cmd);
1502 1316
1503void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1317void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index fcde16bb53a7..28232c64c346 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -19,7 +19,6 @@ int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
19 const struct ide_ioctl_devset *s) 19 const struct ide_ioctl_devset *s)
20{ 20{
21 const struct ide_devset *ds; 21 const struct ide_devset *ds;
22 unsigned long flags;
23 int err = -EOPNOTSUPP; 22 int err = -EOPNOTSUPP;
24 23
25 for (; (ds = s->setting); s++) { 24 for (; (ds = s->setting); s++) {
@@ -33,9 +32,7 @@ int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
33 32
34read_val: 33read_val:
35 mutex_lock(&ide_setting_mtx); 34 mutex_lock(&ide_setting_mtx);
36 spin_lock_irqsave(&ide_lock, flags);
37 err = ds->get(drive); 35 err = ds->get(drive);
38 spin_unlock_irqrestore(&ide_lock, flags);
39 mutex_unlock(&ide_setting_mtx); 36 mutex_unlock(&ide_setting_mtx);
40 return err >= 0 ? put_user(err, (long __user *)arg) : err; 37 return err >= 0 ? put_user(err, (long __user *)arg) : err;
41 38
@@ -98,7 +95,7 @@ static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
98 return -EPERM; 95 return -EPERM;
99 96
100 if (((arg >> IDE_NICE_DSC_OVERLAP) & 1) && 97 if (((arg >> IDE_NICE_DSC_OVERLAP) & 1) &&
101 (drive->media == ide_disk || drive->media == ide_floppy || 98 (drive->media != ide_tape ||
102 (drive->dev_flags & IDE_DFLAG_SCSI))) 99 (drive->dev_flags & IDE_DFLAG_SCSI)))
103 return -EPERM; 100 return -EPERM;
104 101
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index c41c3b9b6f02..ad8bd6539283 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -835,10 +835,12 @@ static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
835void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, 835void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
836 unsigned int timeout, ide_expiry_t *expiry) 836 unsigned int timeout, ide_expiry_t *expiry)
837{ 837{
838 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
838 unsigned long flags; 839 unsigned long flags;
839 spin_lock_irqsave(&ide_lock, flags); 840
841 spin_lock_irqsave(&hwgroup->lock, flags);
840 __ide_set_handler(drive, handler, timeout, expiry); 842 __ide_set_handler(drive, handler, timeout, expiry);
841 spin_unlock_irqrestore(&ide_lock, flags); 843 spin_unlock_irqrestore(&hwgroup->lock, flags);
842} 844}
843 845
844EXPORT_SYMBOL(ide_set_handler); 846EXPORT_SYMBOL(ide_set_handler);
@@ -860,10 +862,11 @@ EXPORT_SYMBOL(ide_set_handler);
860void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler, 862void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
861 unsigned timeout, ide_expiry_t *expiry) 863 unsigned timeout, ide_expiry_t *expiry)
862{ 864{
865 ide_hwif_t *hwif = drive->hwif;
866 ide_hwgroup_t *hwgroup = hwif->hwgroup;
863 unsigned long flags; 867 unsigned long flags;
864 ide_hwif_t *hwif = HWIF(drive);
865 868
866 spin_lock_irqsave(&ide_lock, flags); 869 spin_lock_irqsave(&hwgroup->lock, flags);
867 __ide_set_handler(drive, handler, timeout, expiry); 870 __ide_set_handler(drive, handler, timeout, expiry);
868 hwif->tp_ops->exec_command(hwif, cmd); 871 hwif->tp_ops->exec_command(hwif, cmd);
869 /* 872 /*
@@ -873,19 +876,20 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
873 * FIXME: we could skip this delay with care on non shared devices 876 * FIXME: we could skip this delay with care on non shared devices
874 */ 877 */
875 ndelay(400); 878 ndelay(400);
876 spin_unlock_irqrestore(&ide_lock, flags); 879 spin_unlock_irqrestore(&hwgroup->lock, flags);
877} 880}
878EXPORT_SYMBOL(ide_execute_command); 881EXPORT_SYMBOL(ide_execute_command);
879 882
880void ide_execute_pkt_cmd(ide_drive_t *drive) 883void ide_execute_pkt_cmd(ide_drive_t *drive)
881{ 884{
882 ide_hwif_t *hwif = drive->hwif; 885 ide_hwif_t *hwif = drive->hwif;
886 ide_hwgroup_t *hwgroup = hwif->hwgroup;
883 unsigned long flags; 887 unsigned long flags;
884 888
885 spin_lock_irqsave(&ide_lock, flags); 889 spin_lock_irqsave(&hwgroup->lock, flags);
886 hwif->tp_ops->exec_command(hwif, ATA_CMD_PACKET); 890 hwif->tp_ops->exec_command(hwif, ATA_CMD_PACKET);
887 ndelay(400); 891 ndelay(400);
888 spin_unlock_irqrestore(&ide_lock, flags); 892 spin_unlock_irqrestore(&hwgroup->lock, flags);
889} 893}
890EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd); 894EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
891 895
@@ -1076,22 +1080,16 @@ static void pre_reset(ide_drive_t *drive)
1076 */ 1080 */
1077static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) 1081static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1078{ 1082{
1079 unsigned int unit; 1083 ide_hwif_t *hwif = drive->hwif;
1080 unsigned long flags, timeout; 1084 ide_hwgroup_t *hwgroup = hwif->hwgroup;
1081 ide_hwif_t *hwif; 1085 struct ide_io_ports *io_ports = &hwif->io_ports;
1082 ide_hwgroup_t *hwgroup; 1086 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
1083 struct ide_io_ports *io_ports;
1084 const struct ide_tp_ops *tp_ops;
1085 const struct ide_port_ops *port_ops; 1087 const struct ide_port_ops *port_ops;
1088 unsigned long flags, timeout;
1089 unsigned int unit;
1086 DEFINE_WAIT(wait); 1090 DEFINE_WAIT(wait);
1087 1091
1088 spin_lock_irqsave(&ide_lock, flags); 1092 spin_lock_irqsave(&hwgroup->lock, flags);
1089 hwif = HWIF(drive);
1090 hwgroup = HWGROUP(drive);
1091
1092 io_ports = &hwif->io_ports;
1093
1094 tp_ops = hwif->tp_ops;
1095 1093
1096 /* We must not reset with running handlers */ 1094 /* We must not reset with running handlers */
1097 BUG_ON(hwgroup->handler != NULL); 1095 BUG_ON(hwgroup->handler != NULL);
@@ -1106,7 +1104,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1106 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1104 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1107 hwgroup->polling = 1; 1105 hwgroup->polling = 1;
1108 __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL); 1106 __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
1109 spin_unlock_irqrestore(&ide_lock, flags); 1107 spin_unlock_irqrestore(&hwgroup->lock, flags);
1110 return ide_started; 1108 return ide_started;
1111 } 1109 }
1112 1110
@@ -1129,9 +1127,9 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1129 if (time_before_eq(timeout, now)) 1127 if (time_before_eq(timeout, now))
1130 break; 1128 break;
1131 1129
1132 spin_unlock_irqrestore(&ide_lock, flags); 1130 spin_unlock_irqrestore(&hwgroup->lock, flags);
1133 timeout = schedule_timeout_uninterruptible(timeout - now); 1131 timeout = schedule_timeout_uninterruptible(timeout - now);
1134 spin_lock_irqsave(&ide_lock, flags); 1132 spin_lock_irqsave(&hwgroup->lock, flags);
1135 } while (timeout); 1133 } while (timeout);
1136 finish_wait(&ide_park_wq, &wait); 1134 finish_wait(&ide_park_wq, &wait);
1137 1135
@@ -1143,7 +1141,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1143 pre_reset(&hwif->drives[unit]); 1141 pre_reset(&hwif->drives[unit]);
1144 1142
1145 if (io_ports->ctl_addr == 0) { 1143 if (io_ports->ctl_addr == 0) {
1146 spin_unlock_irqrestore(&ide_lock, flags); 1144 spin_unlock_irqrestore(&hwgroup->lock, flags);
1147 ide_complete_drive_reset(drive, -ENXIO); 1145 ide_complete_drive_reset(drive, -ENXIO);
1148 return ide_stopped; 1146 return ide_stopped;
1149 } 1147 }
@@ -1179,7 +1177,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1179 if (port_ops && port_ops->resetproc) 1177 if (port_ops && port_ops->resetproc)
1180 port_ops->resetproc(drive); 1178 port_ops->resetproc(drive);
1181 1179
1182 spin_unlock_irqrestore(&ide_lock, flags); 1180 spin_unlock_irqrestore(&hwgroup->lock, flags);
1183 return ide_started; 1181 return ide_started;
1184} 1182}
1185 1183
diff --git a/drivers/ide/ide-legacy.c b/drivers/ide/ide-legacy.c
new file mode 100644
index 000000000000..8c5dcbf22547
--- /dev/null
+++ b/drivers/ide/ide-legacy.c
@@ -0,0 +1,58 @@
1#include <linux/kernel.h>
2#include <linux/ide.h>
3
4static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
5 u8 port_no, const struct ide_port_info *d,
6 unsigned long config)
7{
8 unsigned long base, ctl;
9 int irq;
10
11 if (port_no == 0) {
12 base = 0x1f0;
13 ctl = 0x3f6;
14 irq = 14;
15 } else {
16 base = 0x170;
17 ctl = 0x376;
18 irq = 15;
19 }
20
21 if (!request_region(base, 8, d->name)) {
22 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
23 d->name, base, base + 7);
24 return;
25 }
26
27 if (!request_region(ctl, 1, d->name)) {
28 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
29 d->name, ctl);
30 release_region(base, 8);
31 return;
32 }
33
34 ide_std_init_ports(hw, base, ctl);
35 hw->irq = irq;
36 hw->chipset = d->chipset;
37 hw->config = config;
38
39 hws[port_no] = hw;
40}
41
42int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
43{
44 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
45
46 memset(&hw, 0, sizeof(hw));
47
48 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
49 ide_legacy_init_one(hws, &hw[0], 0, d, config);
50 ide_legacy_init_one(hws, &hw[1], 1, d, config);
51
52 if (hws[0] == NULL && hws[1] == NULL &&
53 (d->host_flags & IDE_HFLAG_SINGLE))
54 return -ENOENT;
55
56 return ide_host_add(d, hws, NULL);
57}
58EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 9fc4cfb2a272..9f6e33d8a8b2 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -43,7 +43,6 @@ const char *ide_xfer_verbose(u8 mode)
43 43
44 return s; 44 return s;
45} 45}
46
47EXPORT_SYMBOL(ide_xfer_verbose); 46EXPORT_SYMBOL(ide_xfer_verbose);
48 47
49/** 48/**
@@ -87,7 +86,7 @@ static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
87 * This is used by most chipset support modules when "auto-tuning". 86 * This is used by most chipset support modules when "auto-tuning".
88 */ 87 */
89 88
90u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode) 89u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
91{ 90{
92 u16 *id = drive->id; 91 u16 *id = drive->id;
93 int pio_mode = -1, overridden = 0; 92 int pio_mode = -1, overridden = 0;
@@ -131,7 +130,6 @@ u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
131 130
132 return pio_mode; 131 return pio_mode;
133} 132}
134
135EXPORT_SYMBOL_GPL(ide_get_best_pio_mode); 133EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
136 134
137/* req_pio == "255" for auto-tune */ 135/* req_pio == "255" for auto-tune */
@@ -162,7 +160,6 @@ void ide_set_pio(ide_drive_t *drive, u8 req_pio)
162 160
163 (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio); 161 (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio);
164} 162}
165
166EXPORT_SYMBOL_GPL(ide_set_pio); 163EXPORT_SYMBOL_GPL(ide_set_pio);
167 164
168/** 165/**
@@ -173,7 +170,7 @@ EXPORT_SYMBOL_GPL(ide_set_pio);
173 * Enable or disable bounce buffering for the device. Drives move 170 * Enable or disable bounce buffering for the device. Drives move
174 * between PIO and DMA and that changes the rules we need. 171 * between PIO and DMA and that changes the rules we need.
175 */ 172 */
176 173
177void ide_toggle_bounce(ide_drive_t *drive, int on) 174void ide_toggle_bounce(ide_drive_t *drive, int on)
178{ 175{
179 u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */ 176 u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
@@ -243,14 +240,13 @@ int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
243 return ide_config_drive_speed(drive, mode); 240 return ide_config_drive_speed(drive, mode);
244 } 241 }
245} 242}
246
247EXPORT_SYMBOL_GPL(ide_set_dma_mode); 243EXPORT_SYMBOL_GPL(ide_set_dma_mode);
248 244
249/** 245/**
250 * ide_set_xfer_rate - set transfer rate 246 * ide_set_xfer_rate - set transfer rate
251 * @drive: drive to set 247 * @drive: drive to set
252 * @rate: speed to attempt to set 248 * @rate: speed to attempt to set
253 * 249 *
254 * General helper for setting the speed of an IDE device. This 250 * General helper for setting the speed of an IDE device. This
255 * function knows about user enforced limits from the configuration 251 * function knows about user enforced limits from the configuration
256 * which ->set_pio_mode/->set_dma_mode does not. 252 * which ->set_pio_mode/->set_dma_mode does not.
@@ -277,21 +273,16 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
277 273
278static void ide_dump_opcode(ide_drive_t *drive) 274static void ide_dump_opcode(ide_drive_t *drive)
279{ 275{
280 struct request *rq; 276 struct request *rq = drive->hwif->hwgroup->rq;
281 ide_task_t *task = NULL; 277 ide_task_t *task = NULL;
282 278
283 spin_lock(&ide_lock);
284 rq = NULL;
285 if (HWGROUP(drive))
286 rq = HWGROUP(drive)->rq;
287 spin_unlock(&ide_lock);
288 if (!rq) 279 if (!rq)
289 return; 280 return;
290 281
291 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 282 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
292 task = rq->special; 283 task = rq->special;
293 284
294 printk("ide: failed opcode was: "); 285 printk(KERN_ERR "ide: failed opcode was: ");
295 if (task == NULL) 286 if (task == NULL)
296 printk(KERN_CONT "unknown\n"); 287 printk(KERN_CONT "unknown\n");
297 else 288 else
@@ -329,44 +320,55 @@ static void ide_dump_sector(ide_drive_t *drive)
329 drive->hwif->tp_ops->tf_read(drive, &task); 320 drive->hwif->tp_ops->tf_read(drive, &task);
330 321
331 if (lba48 || (tf->device & ATA_LBA)) 322 if (lba48 || (tf->device & ATA_LBA))
332 printk(", LBAsect=%llu", 323 printk(KERN_CONT ", LBAsect=%llu",
333 (unsigned long long)ide_get_lba_addr(tf, lba48)); 324 (unsigned long long)ide_get_lba_addr(tf, lba48));
334 else 325 else
335 printk(", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam, 326 printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam,
336 tf->device & 0xf, tf->lbal); 327 tf->device & 0xf, tf->lbal);
337} 328}
338 329
339static void ide_dump_ata_error(ide_drive_t *drive, u8 err) 330static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
340{ 331{
341 printk("{ "); 332 printk(KERN_ERR "{ ");
342 if (err & ATA_ABORTED) printk("DriveStatusError "); 333 if (err & ATA_ABORTED)
334 printk(KERN_CONT "DriveStatusError ");
343 if (err & ATA_ICRC) 335 if (err & ATA_ICRC)
344 printk((err & ATA_ABORTED) ? "BadCRC " : "BadSector "); 336 printk(KERN_CONT "%s",
345 if (err & ATA_UNC) printk("UncorrectableError "); 337 (err & ATA_ABORTED) ? "BadCRC " : "BadSector ");
346 if (err & ATA_IDNF) printk("SectorIdNotFound "); 338 if (err & ATA_UNC)
347 if (err & ATA_TRK0NF) printk("TrackZeroNotFound "); 339 printk(KERN_CONT "UncorrectableError ");
348 if (err & ATA_AMNF) printk("AddrMarkNotFound "); 340 if (err & ATA_IDNF)
349 printk("}"); 341 printk(KERN_CONT "SectorIdNotFound ");
342 if (err & ATA_TRK0NF)
343 printk(KERN_CONT "TrackZeroNotFound ");
344 if (err & ATA_AMNF)
345 printk(KERN_CONT "AddrMarkNotFound ");
346 printk(KERN_CONT "}");
350 if ((err & (ATA_BBK | ATA_ABORTED)) == ATA_BBK || 347 if ((err & (ATA_BBK | ATA_ABORTED)) == ATA_BBK ||
351 (err & (ATA_UNC | ATA_IDNF | ATA_AMNF))) { 348 (err & (ATA_UNC | ATA_IDNF | ATA_AMNF))) {
352 ide_dump_sector(drive); 349 ide_dump_sector(drive);
353 if (HWGROUP(drive) && HWGROUP(drive)->rq) 350 if (HWGROUP(drive) && HWGROUP(drive)->rq)
354 printk(", sector=%llu", 351 printk(KERN_CONT ", sector=%llu",
355 (unsigned long long)HWGROUP(drive)->rq->sector); 352 (unsigned long long)HWGROUP(drive)->rq->sector);
356 } 353 }
357 printk("\n"); 354 printk(KERN_CONT "\n");
358} 355}
359 356
360static void ide_dump_atapi_error(ide_drive_t *drive, u8 err) 357static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
361{ 358{
362 printk("{ "); 359 printk(KERN_ERR "{ ");
363 if (err & ATAPI_ILI) printk("IllegalLengthIndication "); 360 if (err & ATAPI_ILI)
364 if (err & ATAPI_EOM) printk("EndOfMedia "); 361 printk(KERN_CONT "IllegalLengthIndication ");
365 if (err & ATA_ABORTED) printk("AbortedCommand "); 362 if (err & ATAPI_EOM)
366 if (err & ATA_MCR) printk("MediaChangeRequested "); 363 printk(KERN_CONT "EndOfMedia ");
367 if (err & ATAPI_LFS) printk("LastFailedSense=0x%02x ", 364 if (err & ATA_ABORTED)
368 (err & ATAPI_LFS) >> 4); 365 printk(KERN_CONT "AbortedCommand ");
369 printk("}\n"); 366 if (err & ATA_MCR)
367 printk(KERN_CONT "MediaChangeRequested ");
368 if (err & ATAPI_LFS)
369 printk(KERN_CONT "LastFailedSense=0x%02x ",
370 (err & ATAPI_LFS) >> 4);
371 printk(KERN_CONT "}\n");
370} 372}
371 373
372/** 374/**
@@ -382,34 +384,37 @@ static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
382 384
383u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat) 385u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
384{ 386{
385 unsigned long flags;
386 u8 err = 0; 387 u8 err = 0;
387 388
388 local_irq_save(flags); 389 printk(KERN_ERR "%s: %s: status=0x%02x { ", drive->name, msg, stat);
389 printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
390 if (stat & ATA_BUSY) 390 if (stat & ATA_BUSY)
391 printk("Busy "); 391 printk(KERN_CONT "Busy ");
392 else { 392 else {
393 if (stat & ATA_DRDY) printk("DriveReady "); 393 if (stat & ATA_DRDY)
394 if (stat & ATA_DF) printk("DeviceFault "); 394 printk(KERN_CONT "DriveReady ");
395 if (stat & ATA_DSC) printk("SeekComplete "); 395 if (stat & ATA_DF)
396 if (stat & ATA_DRQ) printk("DataRequest "); 396 printk(KERN_CONT "DeviceFault ");
397 if (stat & ATA_CORR) printk("CorrectedError "); 397 if (stat & ATA_DSC)
398 if (stat & ATA_IDX) printk("Index "); 398 printk(KERN_CONT "SeekComplete ");
399 if (stat & ATA_ERR) printk("Error "); 399 if (stat & ATA_DRQ)
400 printk(KERN_CONT "DataRequest ");
401 if (stat & ATA_CORR)
402 printk(KERN_CONT "CorrectedError ");
403 if (stat & ATA_IDX)
404 printk(KERN_CONT "Index ");
405 if (stat & ATA_ERR)
406 printk(KERN_CONT "Error ");
400 } 407 }
401 printk("}\n"); 408 printk(KERN_CONT "}\n");
402 if ((stat & (ATA_BUSY | ATA_ERR)) == ATA_ERR) { 409 if ((stat & (ATA_BUSY | ATA_ERR)) == ATA_ERR) {
403 err = ide_read_error(drive); 410 err = ide_read_error(drive);
404 printk("%s: %s: error=0x%02x ", drive->name, msg, err); 411 printk(KERN_ERR "%s: %s: error=0x%02x ", drive->name, msg, err);
405 if (drive->media == ide_disk) 412 if (drive->media == ide_disk)
406 ide_dump_ata_error(drive, err); 413 ide_dump_ata_error(drive, err);
407 else 414 else
408 ide_dump_atapi_error(drive, err); 415 ide_dump_atapi_error(drive, err);
409 } 416 }
410 ide_dump_opcode(drive); 417 ide_dump_opcode(drive);
411 local_irq_restore(flags);
412 return err; 418 return err;
413} 419}
414
415EXPORT_SYMBOL(ide_dump_status); 420EXPORT_SYMBOL(ide_dump_status);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 03b00e57e93f..63d01c55f865 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -7,17 +7,16 @@ DECLARE_WAIT_QUEUE_HEAD(ide_park_wq);
7 7
8static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) 8static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
9{ 9{
10 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
10 struct request_queue *q = drive->queue; 11 struct request_queue *q = drive->queue;
11 struct request *rq; 12 struct request *rq;
12 int rc; 13 int rc;
13 14
14 timeout += jiffies; 15 timeout += jiffies;
15 spin_lock_irq(&ide_lock); 16 spin_lock_irq(&hwgroup->lock);
16 if (drive->dev_flags & IDE_DFLAG_PARKED) { 17 if (drive->dev_flags & IDE_DFLAG_PARKED) {
17 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup; 18 int reset_timer = time_before(timeout, drive->sleep);
18 int reset_timer;
19 19
20 reset_timer = time_before(timeout, drive->sleep);
21 drive->sleep = timeout; 20 drive->sleep = timeout;
22 wake_up_all(&ide_park_wq); 21 wake_up_all(&ide_park_wq);
23 if (reset_timer && hwgroup->sleeping && 22 if (reset_timer && hwgroup->sleeping &&
@@ -26,10 +25,10 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
26 hwgroup->busy = 0; 25 hwgroup->busy = 0;
27 blk_start_queueing(q); 26 blk_start_queueing(q);
28 } 27 }
29 spin_unlock_irq(&ide_lock); 28 spin_unlock_irq(&hwgroup->lock);
30 return; 29 return;
31 } 30 }
32 spin_unlock_irq(&ide_lock); 31 spin_unlock_irq(&hwgroup->lock);
33 32
34 rq = blk_get_request(q, READ, __GFP_WAIT); 33 rq = blk_get_request(q, READ, __GFP_WAIT);
35 rq->cmd[0] = REQ_PARK_HEADS; 34 rq->cmd[0] = REQ_PARK_HEADS;
@@ -62,20 +61,21 @@ ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
62 char *buf) 61 char *buf)
63{ 62{
64 ide_drive_t *drive = to_ide_device(dev); 63 ide_drive_t *drive = to_ide_device(dev);
64 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
65 unsigned long now; 65 unsigned long now;
66 unsigned int msecs; 66 unsigned int msecs;
67 67
68 if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD) 68 if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
69 return -EOPNOTSUPP; 69 return -EOPNOTSUPP;
70 70
71 spin_lock_irq(&ide_lock); 71 spin_lock_irq(&hwgroup->lock);
72 now = jiffies; 72 now = jiffies;
73 if (drive->dev_flags & IDE_DFLAG_PARKED && 73 if (drive->dev_flags & IDE_DFLAG_PARKED &&
74 time_after(drive->sleep, now)) 74 time_after(drive->sleep, now))
75 msecs = jiffies_to_msecs(drive->sleep - now); 75 msecs = jiffies_to_msecs(drive->sleep - now);
76 else 76 else
77 msecs = 0; 77 msecs = 0;
78 spin_unlock_irq(&ide_lock); 78 spin_unlock_irq(&hwgroup->lock);
79 79
80 return snprintf(buf, 20, "%u\n", msecs); 80 return snprintf(buf, 20, "%u\n", msecs);
81} 81}
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
new file mode 100644
index 000000000000..8282c6086e6a
--- /dev/null
+++ b/drivers/ide/ide-pm.c
@@ -0,0 +1,235 @@
1#include <linux/kernel.h>
2#include <linux/ide.h>
3#include <linux/hdreg.h>
4
5int generic_ide_suspend(struct device *dev, pm_message_t mesg)
6{
7 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
8 ide_hwif_t *hwif = HWIF(drive);
9 struct request *rq;
10 struct request_pm_state rqpm;
11 ide_task_t args;
12 int ret;
13
14 /* call ACPI _GTM only once */
15 if ((drive->dn & 1) == 0 || pair == NULL)
16 ide_acpi_get_timing(hwif);
17
18 memset(&rqpm, 0, sizeof(rqpm));
19 memset(&args, 0, sizeof(args));
20 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
21 rq->cmd_type = REQ_TYPE_PM_SUSPEND;
22 rq->special = &args;
23 rq->data = &rqpm;
24 rqpm.pm_step = IDE_PM_START_SUSPEND;
25 if (mesg.event == PM_EVENT_PRETHAW)
26 mesg.event = PM_EVENT_FREEZE;
27 rqpm.pm_state = mesg.event;
28
29 ret = blk_execute_rq(drive->queue, NULL, rq, 0);
30 blk_put_request(rq);
31
32 /* call ACPI _PS3 only after both devices are suspended */
33 if (ret == 0 && ((drive->dn & 1) || pair == NULL))
34 ide_acpi_set_state(hwif, 0);
35
36 return ret;
37}
38
39int generic_ide_resume(struct device *dev)
40{
41 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
42 ide_hwif_t *hwif = HWIF(drive);
43 struct request *rq;
44 struct request_pm_state rqpm;
45 ide_task_t args;
46 int err;
47
48 /* call ACPI _PS0 / _STM only once */
49 if ((drive->dn & 1) == 0 || pair == NULL) {
50 ide_acpi_set_state(hwif, 1);
51 ide_acpi_push_timing(hwif);
52 }
53
54 ide_acpi_exec_tfs(drive);
55
56 memset(&rqpm, 0, sizeof(rqpm));
57 memset(&args, 0, sizeof(args));
58 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
59 rq->cmd_type = REQ_TYPE_PM_RESUME;
60 rq->cmd_flags |= REQ_PREEMPT;
61 rq->special = &args;
62 rq->data = &rqpm;
63 rqpm.pm_step = IDE_PM_START_RESUME;
64 rqpm.pm_state = PM_EVENT_ON;
65
66 err = blk_execute_rq(drive->queue, NULL, rq, 1);
67 blk_put_request(rq);
68
69 if (err == 0 && dev->driver) {
70 ide_driver_t *drv = to_ide_driver(dev->driver);
71
72 if (drv->resume)
73 drv->resume(drive);
74 }
75
76 return err;
77}
78
79void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
80{
81 struct request_pm_state *pm = rq->data;
82
83#ifdef DEBUG_PM
84 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
85 drive->name, pm->pm_step);
86#endif
87 if (drive->media != ide_disk)
88 return;
89
90 switch (pm->pm_step) {
91 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
92 if (pm->pm_state == PM_EVENT_FREEZE)
93 pm->pm_step = IDE_PM_COMPLETED;
94 else
95 pm->pm_step = IDE_PM_STANDBY;
96 break;
97 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
98 pm->pm_step = IDE_PM_COMPLETED;
99 break;
100 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
101 pm->pm_step = IDE_PM_IDLE;
102 break;
103 case IDE_PM_IDLE: /* Resume step 2 (idle)*/
104 pm->pm_step = IDE_PM_RESTORE_DMA;
105 break;
106 }
107}
108
109ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
110{
111 struct request_pm_state *pm = rq->data;
112 ide_task_t *args = rq->special;
113
114 memset(args, 0, sizeof(*args));
115
116 switch (pm->pm_step) {
117 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
118 if (drive->media != ide_disk)
119 break;
120 /* Not supported? Switch to next step now. */
121 if (ata_id_flush_enabled(drive->id) == 0 ||
122 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
123 ide_complete_power_step(drive, rq);
124 return ide_stopped;
125 }
126 if (ata_id_flush_ext_enabled(drive->id))
127 args->tf.command = ATA_CMD_FLUSH_EXT;
128 else
129 args->tf.command = ATA_CMD_FLUSH;
130 goto out_do_tf;
131 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
132 args->tf.command = ATA_CMD_STANDBYNOW1;
133 goto out_do_tf;
134 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
135 ide_set_max_pio(drive);
136 /*
137 * skip IDE_PM_IDLE for ATAPI devices
138 */
139 if (drive->media != ide_disk)
140 pm->pm_step = IDE_PM_RESTORE_DMA;
141 else
142 ide_complete_power_step(drive, rq);
143 return ide_stopped;
144 case IDE_PM_IDLE: /* Resume step 2 (idle) */
145 args->tf.command = ATA_CMD_IDLEIMMEDIATE;
146 goto out_do_tf;
147 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
148 /*
149 * Right now, all we do is call ide_set_dma(drive),
150 * we could be smarter and check for current xfer_speed
151 * in struct drive etc...
152 */
153 if (drive->hwif->dma_ops == NULL)
154 break;
155 /*
156 * TODO: respect IDE_DFLAG_USING_DMA
157 */
158 ide_set_dma(drive);
159 break;
160 }
161
162 pm->pm_step = IDE_PM_COMPLETED;
163 return ide_stopped;
164
165out_do_tf:
166 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
167 args->data_phase = TASKFILE_NO_DATA;
168 return do_rw_taskfile(drive, args);
169}
170
171/**
172 * ide_complete_pm_request - end the current Power Management request
173 * @drive: target drive
174 * @rq: request
175 *
176 * This function cleans up the current PM request and stops the queue
177 * if necessary.
178 */
179void ide_complete_pm_request(ide_drive_t *drive, struct request *rq)
180{
181 struct request_queue *q = drive->queue;
182 unsigned long flags;
183
184#ifdef DEBUG_PM
185 printk("%s: completing PM request, %s\n", drive->name,
186 blk_pm_suspend_request(rq) ? "suspend" : "resume");
187#endif
188 spin_lock_irqsave(q->queue_lock, flags);
189 if (blk_pm_suspend_request(rq)) {
190 blk_stop_queue(q);
191 } else {
192 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
193 blk_start_queue(q);
194 }
195 spin_unlock_irqrestore(q->queue_lock, flags);
196
197 drive->hwif->hwgroup->rq = NULL;
198
199 if (blk_end_request(rq, 0, 0))
200 BUG();
201}
202
203void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
204{
205 struct request_pm_state *pm = rq->data;
206
207 if (blk_pm_suspend_request(rq) &&
208 pm->pm_step == IDE_PM_START_SUSPEND)
209 /* Mark drive blocked when starting the suspend sequence. */
210 drive->dev_flags |= IDE_DFLAG_BLOCKED;
211 else if (blk_pm_resume_request(rq) &&
212 pm->pm_step == IDE_PM_START_RESUME) {
213 /*
214 * The first thing we do on wakeup is to wait for BSY bit to
215 * go away (with a looong timeout) as a drive on this hwif may
216 * just be POSTing itself.
217 * We do that before even selecting as the "other" device on
218 * the bus may be broken enough to walk on our toes at this
219 * point.
220 */
221 ide_hwif_t *hwif = drive->hwif;
222 int rc;
223#ifdef DEBUG_PM
224 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
225#endif
226 rc = ide_wait_not_busy(hwif, 35000);
227 if (rc)
228 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
229 SELECT_DRIVE(drive);
230 hwif->tp_ops->set_irq(hwif, 1);
231 rc = ide_wait_not_busy(hwif, 100000);
232 if (rc)
233 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
234 }
235}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index c55bdbd22314..a64ec259f3d1 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -110,20 +110,22 @@ static void ide_disk_init_mult_count(ide_drive_t *drive)
110 * read and parse the results. This function is run with 110 * read and parse the results. This function is run with
111 * interrupts disabled. 111 * interrupts disabled.
112 */ 112 */
113 113
114static inline void do_identify (ide_drive_t *drive, u8 cmd) 114static void do_identify(ide_drive_t *drive, u8 cmd)
115{ 115{
116 ide_hwif_t *hwif = HWIF(drive); 116 ide_hwif_t *hwif = HWIF(drive);
117 u16 *id = drive->id; 117 u16 *id = drive->id;
118 char *m = (char *)&id[ATA_ID_PROD]; 118 char *m = (char *)&id[ATA_ID_PROD];
119 unsigned long flags;
119 int bswap = 1, is_cfa; 120 int bswap = 1, is_cfa;
120 121
122 /* local CPU only; some systems need this */
123 local_irq_save(flags);
121 /* read 512 bytes of id info */ 124 /* read 512 bytes of id info */
122 hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); 125 hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
126 local_irq_restore(flags);
123 127
124 drive->dev_flags |= IDE_DFLAG_ID_READ; 128 drive->dev_flags |= IDE_DFLAG_ID_READ;
125
126 local_irq_enable();
127#ifdef DEBUG 129#ifdef DEBUG
128 printk(KERN_INFO "%s: dumping identify data\n", drive->name); 130 printk(KERN_INFO "%s: dumping identify data\n", drive->name);
129 ide_dump_identify((u8 *)id); 131 ide_dump_identify((u8 *)id);
@@ -306,17 +308,12 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
306 s = tp_ops->read_status(hwif); 308 s = tp_ops->read_status(hwif);
307 309
308 if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) { 310 if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) {
309 unsigned long flags;
310
311 /* local CPU only; some systems need this */
312 local_irq_save(flags);
313 /* drive returned ID */ 311 /* drive returned ID */
314 do_identify(drive, cmd); 312 do_identify(drive, cmd);
315 /* drive responded with ID */ 313 /* drive responded with ID */
316 rc = 0; 314 rc = 0;
317 /* clear drive IRQ */ 315 /* clear drive IRQ */
318 (void)tp_ops->read_status(hwif); 316 (void)tp_ops->read_status(hwif);
319 local_irq_restore(flags);
320 } else { 317 } else {
321 /* drive refused ID */ 318 /* drive refused ID */
322 rc = 2; 319 rc = 2;
@@ -554,8 +551,8 @@ static void enable_nest (ide_drive_t *drive)
554 * 1 device was found 551 * 1 device was found
555 * (note: IDE_DFLAG_PRESENT might still be not set) 552 * (note: IDE_DFLAG_PRESENT might still be not set)
556 */ 553 */
557 554
558static inline u8 probe_for_drive (ide_drive_t *drive) 555static u8 probe_for_drive(ide_drive_t *drive)
559{ 556{
560 char *m; 557 char *m;
561 558
@@ -642,7 +639,7 @@ static int ide_register_port(ide_hwif_t *hwif)
642 int ret; 639 int ret;
643 640
644 /* register with global device tree */ 641 /* register with global device tree */
645 strlcpy(hwif->gendev.bus_id,hwif->name,BUS_ID_SIZE); 642 dev_set_name(&hwif->gendev, hwif->name);
646 hwif->gendev.driver_data = hwif; 643 hwif->gendev.driver_data = hwif;
647 if (hwif->gendev.parent == NULL) { 644 if (hwif->gendev.parent == NULL) {
648 if (hwif->dev) 645 if (hwif->dev)
@@ -864,31 +861,6 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
864} 861}
865 862
866/* 863/*
867 * save_match() is used to simplify logic in init_irq() below.
868 *
869 * A loophole here is that we may not know about a particular
870 * hwif's irq until after that hwif is actually probed/initialized..
871 * This could be a problem for the case where an hwif is on a
872 * dual interface that requires serialization (eg. cmd640) and another
873 * hwif using one of the same irqs is initialized beforehand.
874 *
875 * This routine detects and reports such situations, but does not fix them.
876 */
877static void save_match(ide_hwif_t *hwif, ide_hwif_t *new, ide_hwif_t **match)
878{
879 ide_hwif_t *m = *match;
880
881 if (m && m->hwgroup && m->hwgroup != new->hwgroup) {
882 if (!new->hwgroup)
883 return;
884 printk(KERN_WARNING "%s: potential IRQ problem with %s and %s\n",
885 hwif->name, new->name, m->name);
886 }
887 if (!m || m->irq != hwif->irq) /* don't undo a prior perfect match */
888 *match = new;
889}
890
891/*
892 * init request queue 864 * init request queue
893 */ 865 */
894static int ide_init_queue(ide_drive_t *drive) 866static int ide_init_queue(ide_drive_t *drive)
@@ -906,7 +878,8 @@ static int ide_init_queue(ide_drive_t *drive)
906 * do not. 878 * do not.
907 */ 879 */
908 880
909 q = blk_init_queue_node(do_ide_request, &ide_lock, hwif_to_node(hwif)); 881 q = blk_init_queue_node(do_ide_request, &hwif->hwgroup->lock,
882 hwif_to_node(hwif));
910 if (!q) 883 if (!q)
911 return 1; 884 return 1;
912 885
@@ -947,7 +920,7 @@ static void ide_add_drive_to_hwgroup(ide_drive_t *drive)
947{ 920{
948 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup; 921 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
949 922
950 spin_lock_irq(&ide_lock); 923 spin_lock_irq(&hwgroup->lock);
951 if (!hwgroup->drive) { 924 if (!hwgroup->drive) {
952 /* first drive for hwgroup. */ 925 /* first drive for hwgroup. */
953 drive->next = drive; 926 drive->next = drive;
@@ -957,7 +930,7 @@ static void ide_add_drive_to_hwgroup(ide_drive_t *drive)
957 drive->next = hwgroup->drive->next; 930 drive->next = hwgroup->drive->next;
958 hwgroup->drive->next = drive; 931 hwgroup->drive->next = drive;
959 } 932 }
960 spin_unlock_irq(&ide_lock); 933 spin_unlock_irq(&hwgroup->lock);
961} 934}
962 935
963/* 936/*
@@ -1002,7 +975,7 @@ void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
1002 975
1003 ide_ports[hwif->index] = NULL; 976 ide_ports[hwif->index] = NULL;
1004 977
1005 spin_lock_irq(&ide_lock); 978 spin_lock_irq(&hwgroup->lock);
1006 /* 979 /*
1007 * Remove us from the hwgroup, and free 980 * Remove us from the hwgroup, and free
1008 * the hwgroup if we were the only member 981 * the hwgroup if we were the only member
@@ -1030,7 +1003,7 @@ void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
1030 } 1003 }
1031 BUG_ON(hwgroup->hwif == hwif); 1004 BUG_ON(hwgroup->hwif == hwif);
1032 } 1005 }
1033 spin_unlock_irq(&ide_lock); 1006 spin_unlock_irq(&hwgroup->lock);
1034} 1007}
1035 1008
1036/* 1009/*
@@ -1051,27 +1024,13 @@ static int init_irq (ide_hwif_t *hwif)
1051 mutex_lock(&ide_cfg_mtx); 1024 mutex_lock(&ide_cfg_mtx);
1052 hwif->hwgroup = NULL; 1025 hwif->hwgroup = NULL;
1053 1026
1054 /*
1055 * Group up with any other hwifs that share our irq(s).
1056 */
1057 for (index = 0; index < MAX_HWIFS; index++) { 1027 for (index = 0; index < MAX_HWIFS; index++) {
1058 ide_hwif_t *h = ide_ports[index]; 1028 ide_hwif_t *h = ide_ports[index];
1059 1029
1060 if (h && h->hwgroup) { /* scan only initialized ports */ 1030 if (h && h->hwgroup) { /* scan only initialized ports */
1061 if (hwif->irq == h->irq) { 1031 if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE) {
1062 hwif->sharing_irq = h->sharing_irq = 1; 1032 if (hwif->host == h->host)
1063 if (hwif->chipset != ide_pci || 1033 match = h;
1064 h->chipset != ide_pci) {
1065 save_match(hwif, h, &match);
1066 }
1067 }
1068 if (hwif->serialized) {
1069 if (hwif->mate && hwif->mate->irq == h->irq)
1070 save_match(hwif, h, &match);
1071 }
1072 if (h->serialized) {
1073 if (h->mate && hwif->irq == h->mate->irq)
1074 save_match(hwif, h, &match);
1075 } 1034 }
1076 } 1035 }
1077 } 1036 }
@@ -1092,17 +1051,19 @@ static int init_irq (ide_hwif_t *hwif)
1092 * linked list, the first entry is the hwif that owns 1051 * linked list, the first entry is the hwif that owns
1093 * hwgroup->handler - do not change that. 1052 * hwgroup->handler - do not change that.
1094 */ 1053 */
1095 spin_lock_irq(&ide_lock); 1054 spin_lock_irq(&hwgroup->lock);
1096 hwif->next = hwgroup->hwif->next; 1055 hwif->next = hwgroup->hwif->next;
1097 hwgroup->hwif->next = hwif; 1056 hwgroup->hwif->next = hwif;
1098 BUG_ON(hwif->next == hwif); 1057 BUG_ON(hwif->next == hwif);
1099 spin_unlock_irq(&ide_lock); 1058 spin_unlock_irq(&hwgroup->lock);
1100 } else { 1059 } else {
1101 hwgroup = kmalloc_node(sizeof(*hwgroup), GFP_KERNEL|__GFP_ZERO, 1060 hwgroup = kmalloc_node(sizeof(*hwgroup), GFP_KERNEL|__GFP_ZERO,
1102 hwif_to_node(hwif)); 1061 hwif_to_node(hwif));
1103 if (hwgroup == NULL) 1062 if (hwgroup == NULL)
1104 goto out_up; 1063 goto out_up;
1105 1064
1065 spin_lock_init(&hwgroup->lock);
1066
1106 hwif->hwgroup = hwgroup; 1067 hwif->hwgroup = hwgroup;
1107 hwgroup->hwif = hwif->next = hwif; 1068 hwgroup->hwif = hwif->next = hwif;
1108 1069
@@ -1122,8 +1083,7 @@ static int init_irq (ide_hwif_t *hwif)
1122 sa = IRQF_SHARED; 1083 sa = IRQF_SHARED;
1123#endif /* __mc68000__ */ 1084#endif /* __mc68000__ */
1124 1085
1125 if (hwif->chipset == ide_pci || hwif->chipset == ide_cmd646 || 1086 if (hwif->chipset == ide_pci)
1126 hwif->chipset == ide_ali14xx)
1127 sa = IRQF_SHARED; 1087 sa = IRQF_SHARED;
1128 1088
1129 if (io_ports->ctl_addr) 1089 if (io_ports->ctl_addr)
@@ -1150,8 +1110,7 @@ static int init_irq (ide_hwif_t *hwif)
1150 io_ports->data_addr, hwif->irq); 1110 io_ports->data_addr, hwif->irq);
1151#endif /* __mc68000__ */ 1111#endif /* __mc68000__ */
1152 if (match) 1112 if (match)
1153 printk(KERN_CONT " (%sed with %s)", 1113 printk(KERN_CONT " (serialized with %s)", match->name);
1154 hwif->sharing_irq ? "shar" : "serializ", match->name);
1155 printk(KERN_CONT "\n"); 1114 printk(KERN_CONT "\n");
1156 1115
1157 mutex_unlock(&ide_cfg_mtx); 1116 mutex_unlock(&ide_cfg_mtx);
@@ -1263,20 +1222,21 @@ static void ide_remove_drive_from_hwgroup(ide_drive_t *drive)
1263static void drive_release_dev (struct device *dev) 1222static void drive_release_dev (struct device *dev)
1264{ 1223{
1265 ide_drive_t *drive = container_of(dev, ide_drive_t, gendev); 1224 ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
1225 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
1266 1226
1267 ide_proc_unregister_device(drive); 1227 ide_proc_unregister_device(drive);
1268 1228
1269 spin_lock_irq(&ide_lock); 1229 spin_lock_irq(&hwgroup->lock);
1270 ide_remove_drive_from_hwgroup(drive); 1230 ide_remove_drive_from_hwgroup(drive);
1271 kfree(drive->id); 1231 kfree(drive->id);
1272 drive->id = NULL; 1232 drive->id = NULL;
1273 drive->dev_flags &= ~IDE_DFLAG_PRESENT; 1233 drive->dev_flags &= ~IDE_DFLAG_PRESENT;
1274 /* Messed up locking ... */ 1234 /* Messed up locking ... */
1275 spin_unlock_irq(&ide_lock); 1235 spin_unlock_irq(&hwgroup->lock);
1276 blk_cleanup_queue(drive->queue); 1236 blk_cleanup_queue(drive->queue);
1277 spin_lock_irq(&ide_lock); 1237 spin_lock_irq(&hwgroup->lock);
1278 drive->queue = NULL; 1238 drive->queue = NULL;
1279 spin_unlock_irq(&ide_lock); 1239 spin_unlock_irq(&hwgroup->lock);
1280 1240
1281 complete(&drive->gendev_rel_comp); 1241 complete(&drive->gendev_rel_comp);
1282} 1242}
@@ -1352,7 +1312,7 @@ static void hwif_register_devices(ide_hwif_t *hwif)
1352 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 1312 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
1353 continue; 1313 continue;
1354 1314
1355 snprintf(dev->bus_id, BUS_ID_SIZE, "%u.%u", hwif->index, i); 1315 dev_set_name(dev, "%u.%u", hwif->index, i);
1356 dev->parent = &hwif->gendev; 1316 dev->parent = &hwif->gendev;
1357 dev->bus = &ide_bus_type; 1317 dev->bus = &ide_bus_type;
1358 dev->driver_data = drive; 1318 dev->driver_data = drive;
@@ -1436,13 +1396,11 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1436 } 1396 }
1437 1397
1438 if ((d->host_flags & IDE_HFLAG_SERIALIZE) || 1398 if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
1439 ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) { 1399 ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base))
1440 if (hwif->mate) 1400 hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
1441 hwif->mate->serialized = hwif->serialized = 1;
1442 }
1443 1401
1444 if (d->host_flags & IDE_HFLAG_RQSIZE_256) 1402 if (d->max_sectors)
1445 hwif->rqsize = 256; 1403 hwif->rqsize = d->max_sectors;
1446 1404
1447 /* call chipset specific routine for each enabled port */ 1405 /* call chipset specific routine for each enabled port */
1448 if (d->init_hwif) 1406 if (d->init_hwif)
@@ -1794,59 +1752,3 @@ void ide_port_scan(ide_hwif_t *hwif)
1794 ide_proc_port_register_devices(hwif); 1752 ide_proc_port_register_devices(hwif);
1795} 1753}
1796EXPORT_SYMBOL_GPL(ide_port_scan); 1754EXPORT_SYMBOL_GPL(ide_port_scan);
1797
1798static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
1799 u8 port_no, const struct ide_port_info *d,
1800 unsigned long config)
1801{
1802 unsigned long base, ctl;
1803 int irq;
1804
1805 if (port_no == 0) {
1806 base = 0x1f0;
1807 ctl = 0x3f6;
1808 irq = 14;
1809 } else {
1810 base = 0x170;
1811 ctl = 0x376;
1812 irq = 15;
1813 }
1814
1815 if (!request_region(base, 8, d->name)) {
1816 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
1817 d->name, base, base + 7);
1818 return;
1819 }
1820
1821 if (!request_region(ctl, 1, d->name)) {
1822 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
1823 d->name, ctl);
1824 release_region(base, 8);
1825 return;
1826 }
1827
1828 ide_std_init_ports(hw, base, ctl);
1829 hw->irq = irq;
1830 hw->chipset = d->chipset;
1831 hw->config = config;
1832
1833 hws[port_no] = hw;
1834}
1835
1836int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
1837{
1838 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
1839
1840 memset(&hw, 0, sizeof(hw));
1841
1842 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
1843 ide_legacy_init_one(hws, &hw[0], 0, d, config);
1844 ide_legacy_init_one(hws, &hw[1], 1, d, config);
1845
1846 if (hws[0] == NULL && hws[1] == NULL &&
1847 (d->host_flags & IDE_HFLAG_SINGLE))
1848 return -ENOENT;
1849
1850 return ide_host_add(d, hws, NULL);
1851}
1852EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index f3cddd1b2f8f..a14e2938e4f3 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -46,10 +46,6 @@ static int proc_ide_read_imodel
46 case ide_qd65xx: name = "qd65xx"; break; 46 case ide_qd65xx: name = "qd65xx"; break;
47 case ide_umc8672: name = "umc8672"; break; 47 case ide_umc8672: name = "umc8672"; break;
48 case ide_ht6560b: name = "ht6560b"; break; 48 case ide_ht6560b: name = "ht6560b"; break;
49 case ide_rz1000: name = "rz1000"; break;
50 case ide_trm290: name = "trm290"; break;
51 case ide_cmd646: name = "cmd646"; break;
52 case ide_cy82c693: name = "cy82c693"; break;
53 case ide_4drives: name = "4drives"; break; 49 case ide_4drives: name = "4drives"; break;
54 case ide_pmac: name = "mac-io"; break; 50 case ide_pmac: name = "mac-io"; break;
55 case ide_au1xxx: name = "au1xxx"; break; 51 case ide_au1xxx: name = "au1xxx"; break;
@@ -155,13 +151,8 @@ static int ide_read_setting(ide_drive_t *drive,
155 const struct ide_devset *ds = setting->setting; 151 const struct ide_devset *ds = setting->setting;
156 int val = -EINVAL; 152 int val = -EINVAL;
157 153
158 if (ds->get) { 154 if (ds->get)
159 unsigned long flags;
160
161 spin_lock_irqsave(&ide_lock, flags);
162 val = ds->get(drive); 155 val = ds->get(drive);
163 spin_unlock_irqrestore(&ide_lock, flags);
164 }
165 156
166 return val; 157 return val;
167} 158}
@@ -583,31 +574,19 @@ EXPORT_SYMBOL(ide_proc_register_driver);
583 * Clean up the driver specific /proc files and IDE settings 574 * Clean up the driver specific /proc files and IDE settings
584 * for a given drive. 575 * for a given drive.
585 * 576 *
586 * Takes ide_setting_mtx and ide_lock. 577 * Takes ide_setting_mtx.
587 * Caller must hold none of the locks.
588 */ 578 */
589 579
590void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) 580void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver)
591{ 581{
592 unsigned long flags;
593
594 ide_remove_proc_entries(drive->proc, driver->proc_entries(drive)); 582 ide_remove_proc_entries(drive->proc, driver->proc_entries(drive));
595 583
596 mutex_lock(&ide_setting_mtx); 584 mutex_lock(&ide_setting_mtx);
597 spin_lock_irqsave(&ide_lock, flags);
598 /* 585 /*
599 * ide_setting_mtx protects the settings list 586 * ide_setting_mtx protects both the settings list and the use
600 * ide_lock protects the use of settings 587 * of settings (we cannot take a setting out that is being used).
601 *
602 * so we need to hold both, ide_settings_sem because we want to
603 * modify the settings list, and ide_lock because we cannot take
604 * a setting out that is being used.
605 *
606 * OTOH both ide_{read,write}_setting are only ever used under
607 * ide_setting_mtx.
608 */ 588 */
609 drive->settings = NULL; 589 drive->settings = NULL;
610 spin_unlock_irqrestore(&ide_lock, flags);
611 mutex_unlock(&ide_setting_mtx); 590 mutex_unlock(&ide_setting_mtx);
612} 591}
613EXPORT_SYMBOL(ide_proc_unregister_driver); 592EXPORT_SYMBOL(ide_proc_unregister_driver);
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 04f8f13cb9d7..f0f09f702e9c 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -74,9 +74,6 @@ static const u8 ide_hwif_to_major[] = { IDE0_MAJOR, IDE1_MAJOR,
74 74
75DEFINE_MUTEX(ide_cfg_mtx); 75DEFINE_MUTEX(ide_cfg_mtx);
76 76
77__cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
78EXPORT_SYMBOL(ide_lock);
79
80static void ide_port_init_devices_data(ide_hwif_t *); 77static void ide_port_init_devices_data(ide_hwif_t *);
81 78
82/* 79/*
@@ -130,7 +127,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
130 } 127 }
131} 128}
132 129
133/* Called with ide_lock held. */
134static void __ide_port_unregister_devices(ide_hwif_t *hwif) 130static void __ide_port_unregister_devices(ide_hwif_t *hwif)
135{ 131{
136 int i; 132 int i;
@@ -139,10 +135,8 @@ static void __ide_port_unregister_devices(ide_hwif_t *hwif)
139 ide_drive_t *drive = &hwif->drives[i]; 135 ide_drive_t *drive = &hwif->drives[i];
140 136
141 if (drive->dev_flags & IDE_DFLAG_PRESENT) { 137 if (drive->dev_flags & IDE_DFLAG_PRESENT) {
142 spin_unlock_irq(&ide_lock);
143 device_unregister(&drive->gendev); 138 device_unregister(&drive->gendev);
144 wait_for_completion(&drive->gendev_rel_comp); 139 wait_for_completion(&drive->gendev_rel_comp);
145 spin_lock_irq(&ide_lock);
146 } 140 }
147 } 141 }
148} 142}
@@ -150,11 +144,9 @@ static void __ide_port_unregister_devices(ide_hwif_t *hwif)
150void ide_port_unregister_devices(ide_hwif_t *hwif) 144void ide_port_unregister_devices(ide_hwif_t *hwif)
151{ 145{
152 mutex_lock(&ide_cfg_mtx); 146 mutex_lock(&ide_cfg_mtx);
153 spin_lock_irq(&ide_lock);
154 __ide_port_unregister_devices(hwif); 147 __ide_port_unregister_devices(hwif);
155 hwif->present = 0; 148 hwif->present = 0;
156 ide_port_init_devices_data(hwif); 149 ide_port_init_devices_data(hwif);
157 spin_unlock_irq(&ide_lock);
158 mutex_unlock(&ide_cfg_mtx); 150 mutex_unlock(&ide_cfg_mtx);
159} 151}
160EXPORT_SYMBOL_GPL(ide_port_unregister_devices); 152EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
@@ -192,12 +184,10 @@ void ide_unregister(ide_hwif_t *hwif)
192 184
193 mutex_lock(&ide_cfg_mtx); 185 mutex_lock(&ide_cfg_mtx);
194 186
195 spin_lock_irq(&ide_lock);
196 if (hwif->present) { 187 if (hwif->present) {
197 __ide_port_unregister_devices(hwif); 188 __ide_port_unregister_devices(hwif);
198 hwif->present = 0; 189 hwif->present = 0;
199 } 190 }
200 spin_unlock_irq(&ide_lock);
201 191
202 ide_proc_unregister_port(hwif); 192 ide_proc_unregister_port(hwif);
203 193
@@ -340,6 +330,7 @@ static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio)
340static int set_pio_mode(ide_drive_t *drive, int arg) 330static int set_pio_mode(ide_drive_t *drive, int arg)
341{ 331{
342 ide_hwif_t *hwif = drive->hwif; 332 ide_hwif_t *hwif = drive->hwif;
333 ide_hwgroup_t *hwgroup = hwif->hwgroup;
343 const struct ide_port_ops *port_ops = hwif->port_ops; 334 const struct ide_port_ops *port_ops = hwif->port_ops;
344 335
345 if (arg < 0 || arg > 255) 336 if (arg < 0 || arg > 255)
@@ -354,9 +345,9 @@ static int set_pio_mode(ide_drive_t *drive, int arg)
354 unsigned long flags; 345 unsigned long flags;
355 346
356 /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */ 347 /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
357 spin_lock_irqsave(&ide_lock, flags); 348 spin_lock_irqsave(&hwgroup->lock, flags);
358 port_ops->set_pio_mode(drive, arg); 349 port_ops->set_pio_mode(drive, arg);
359 spin_unlock_irqrestore(&ide_lock, flags); 350 spin_unlock_irqrestore(&hwgroup->lock, flags);
360 } else 351 } else
361 port_ops->set_pio_mode(drive, arg); 352 port_ops->set_pio_mode(drive, arg);
362 } else { 353 } else {
@@ -397,80 +388,6 @@ ide_ext_devset_rw_sync(unmaskirq, unmaskirq);
397ide_ext_devset_rw_sync(using_dma, using_dma); 388ide_ext_devset_rw_sync(using_dma, using_dma);
398__IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode); 389__IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode);
399 390
400static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
401{
402 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
403 ide_hwif_t *hwif = HWIF(drive);
404 struct request *rq;
405 struct request_pm_state rqpm;
406 ide_task_t args;
407 int ret;
408
409 /* call ACPI _GTM only once */
410 if ((drive->dn & 1) == 0 || pair == NULL)
411 ide_acpi_get_timing(hwif);
412
413 memset(&rqpm, 0, sizeof(rqpm));
414 memset(&args, 0, sizeof(args));
415 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
416 rq->cmd_type = REQ_TYPE_PM_SUSPEND;
417 rq->special = &args;
418 rq->data = &rqpm;
419 rqpm.pm_step = IDE_PM_START_SUSPEND;
420 if (mesg.event == PM_EVENT_PRETHAW)
421 mesg.event = PM_EVENT_FREEZE;
422 rqpm.pm_state = mesg.event;
423
424 ret = blk_execute_rq(drive->queue, NULL, rq, 0);
425 blk_put_request(rq);
426
427 /* call ACPI _PS3 only after both devices are suspended */
428 if (ret == 0 && ((drive->dn & 1) || pair == NULL))
429 ide_acpi_set_state(hwif, 0);
430
431 return ret;
432}
433
434static int generic_ide_resume(struct device *dev)
435{
436 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
437 ide_hwif_t *hwif = HWIF(drive);
438 struct request *rq;
439 struct request_pm_state rqpm;
440 ide_task_t args;
441 int err;
442
443 /* call ACPI _PS0 / _STM only once */
444 if ((drive->dn & 1) == 0 || pair == NULL) {
445 ide_acpi_set_state(hwif, 1);
446 ide_acpi_push_timing(hwif);
447 }
448
449 ide_acpi_exec_tfs(drive);
450
451 memset(&rqpm, 0, sizeof(rqpm));
452 memset(&args, 0, sizeof(args));
453 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
454 rq->cmd_type = REQ_TYPE_PM_RESUME;
455 rq->cmd_flags |= REQ_PREEMPT;
456 rq->special = &args;
457 rq->data = &rqpm;
458 rqpm.pm_step = IDE_PM_START_RESUME;
459 rqpm.pm_state = PM_EVENT_ON;
460
461 err = blk_execute_rq(drive->queue, NULL, rq, 1);
462 blk_put_request(rq);
463
464 if (err == 0 && dev->driver) {
465 ide_driver_t *drv = to_ide_driver(dev->driver);
466
467 if (drv->resume)
468 drv->resume(drive);
469 }
470
471 return err;
472}
473
474/** 391/**
475 * ide_device_get - get an additional reference to a ide_drive_t 392 * ide_device_get - get an additional reference to a ide_drive_t
476 * @drive: device to get a reference to 393 * @drive: device to get a reference to
diff --git a/drivers/ide/ide_arm.c b/drivers/ide/ide_arm.c
index f728f2927b5a..bdcac94d7c1f 100644
--- a/drivers/ide/ide_arm.c
+++ b/drivers/ide/ide_arm.c
@@ -15,15 +15,8 @@
15 15
16#define DRV_NAME "ide_arm" 16#define DRV_NAME "ide_arm"
17 17
18#ifdef CONFIG_ARCH_CLPS7500 18#define IDE_ARM_IO 0x1f0
19# include <mach/hardware.h> 19#define IDE_ARM_IRQ IRQ_HARDDISK
20#
21# define IDE_ARM_IO (ISASLOT_IO + 0x1f0)
22# define IDE_ARM_IRQ IRQ_ISA_14
23#else
24# define IDE_ARM_IO 0x1f0
25# define IDE_ARM_IRQ IRQ_HARDDISK
26#endif
27 20
28static int __init ide_arm_init(void) 21static int __init ide_arm_init(void)
29{ 22{
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 799557c25eef..624e62e5cc9a 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -350,16 +350,17 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
350 .dma_timeout = pdc202xx_dma_timeout, 350 .dma_timeout = pdc202xx_dma_timeout,
351}; 351};
352 352
353#define DECLARE_PDC2026X_DEV(udma, extra_flags) \ 353#define DECLARE_PDC2026X_DEV(udma, sectors) \
354 { \ 354 { \
355 .name = DRV_NAME, \ 355 .name = DRV_NAME, \
356 .init_chipset = init_chipset_pdc202xx, \ 356 .init_chipset = init_chipset_pdc202xx, \
357 .port_ops = &pdc2026x_port_ops, \ 357 .port_ops = &pdc2026x_port_ops, \
358 .dma_ops = &pdc2026x_dma_ops, \ 358 .dma_ops = &pdc2026x_dma_ops, \
359 .host_flags = IDE_HFLAGS_PDC202XX | extra_flags, \ 359 .host_flags = IDE_HFLAGS_PDC202XX, \
360 .pio_mask = ATA_PIO4, \ 360 .pio_mask = ATA_PIO4, \
361 .mwdma_mask = ATA_MWDMA2, \ 361 .mwdma_mask = ATA_MWDMA2, \
362 .udma_mask = udma, \ 362 .udma_mask = udma, \
363 .max_sectors = sectors, \
363 } 364 }
364 365
365static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { 366static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
@@ -376,8 +377,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
376 377
377 /* 1: PDC2026{2,3} */ 378 /* 1: PDC2026{2,3} */
378 DECLARE_PDC2026X_DEV(ATA_UDMA4, 0), 379 DECLARE_PDC2026X_DEV(ATA_UDMA4, 0),
379 /* 2: PDC2026{5,7} */ 380 /* 2: PDC2026{5,7}: UDMA5, limit LBA48 requests to 256 sectors */
380 DECLARE_PDC2026X_DEV(ATA_UDMA5, IDE_HFLAG_RQSIZE_256), 381 DECLARE_PDC2026X_DEV(ATA_UDMA5, 256),
381}; 382};
382 383
383/** 384/**
diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
index 7daf0135cbac..a6414a884eb1 100644
--- a/drivers/ide/rz1000.c
+++ b/drivers/ide/rz1000.c
@@ -22,34 +22,48 @@
22 22
23#define DRV_NAME "rz1000" 23#define DRV_NAME "rz1000"
24 24
25static void __devinit init_hwif_rz1000 (ide_hwif_t *hwif) 25static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
26{ 26{
27 struct pci_dev *dev = to_pci_dev(hwif->dev);
28 u16 reg; 27 u16 reg;
29 28
30 if (!pci_read_config_word (dev, 0x40, &reg) && 29 if (!pci_read_config_word (dev, 0x40, &reg) &&
31 !pci_write_config_word(dev, 0x40, reg & 0xdfff)) { 30 !pci_write_config_word(dev, 0x40, reg & 0xdfff)) {
32 printk(KERN_INFO "%s: disabled chipset read-ahead " 31 printk(KERN_INFO "%s: disabled chipset read-ahead "
33 "(buggy RZ1000/RZ1001)\n", hwif->name); 32 "(buggy RZ1000/RZ1001)\n", pci_name(dev));
33 return 0;
34 } else { 34 } else {
35 if (hwif->mate)
36 hwif->mate->serialized = hwif->serialized = 1;
37 hwif->host_flags |= IDE_HFLAG_NO_UNMASK_IRQS;
38 printk(KERN_INFO "%s: serialized, disabled unmasking " 35 printk(KERN_INFO "%s: serialized, disabled unmasking "
39 "(buggy RZ1000/RZ1001)\n", hwif->name); 36 "(buggy RZ1000/RZ1001)\n", pci_name(dev));
37 return 1;
40 } 38 }
41} 39}
42 40
43static const struct ide_port_info rz1000_chipset __devinitdata = { 41static const struct ide_port_info rz1000_chipset __devinitdata = {
44 .name = DRV_NAME, 42 .name = DRV_NAME,
45 .init_hwif = init_hwif_rz1000,
46 .chipset = ide_rz1000,
47 .host_flags = IDE_HFLAG_NO_DMA, 43 .host_flags = IDE_HFLAG_NO_DMA,
48}; 44};
49 45
50static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id) 46static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
51{ 47{
52 return ide_pci_init_one(dev, &rz1000_chipset, NULL); 48 struct ide_port_info d = rz1000_chipset;
49 int rc;
50
51 rc = pci_enable_device(dev);
52 if (rc)
53 return rc;
54
55 if (rz1000_disable_readahead(dev)) {
56 d.host_flags |= IDE_HFLAG_SERIALIZE;
57 d.host_flags |= IDE_HFLAG_NO_UNMASK_IRQS;
58 }
59
60 return ide_pci_init_one(dev, &d, NULL);
61}
62
63static void rz1000_remove(struct pci_dev *dev)
64{
65 ide_pci_remove(dev);
66 pci_disable_device(dev);
53} 67}
54 68
55static const struct pci_device_id rz1000_pci_tbl[] = { 69static const struct pci_device_id rz1000_pci_tbl[] = {
@@ -63,7 +77,7 @@ static struct pci_driver rz1000_pci_driver = {
63 .name = "RZ1000_IDE", 77 .name = "RZ1000_IDE",
64 .id_table = rz1000_pci_tbl, 78 .id_table = rz1000_pci_tbl,
65 .probe = rz1000_init_one, 79 .probe = rz1000_init_one,
66 .remove = ide_pci_remove, 80 .remove = rz1000_remove,
67}; 81};
68 82
69static int __init rz1000_ide_init(void) 83static int __init rz1000_ide_init(void)
diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
index 75ea61526566..2a5ea90cf8b8 100644
--- a/drivers/ide/trm290.c
+++ b/drivers/ide/trm290.c
@@ -328,10 +328,10 @@ static struct ide_dma_ops trm290_dma_ops = {
328static const struct ide_port_info trm290_chipset __devinitdata = { 328static const struct ide_port_info trm290_chipset __devinitdata = {
329 .name = DRV_NAME, 329 .name = DRV_NAME,
330 .init_hwif = init_hwif_trm290, 330 .init_hwif = init_hwif_trm290,
331 .chipset = ide_trm290,
332 .port_ops = &trm290_port_ops, 331 .port_ops = &trm290_port_ops,
333 .dma_ops = &trm290_dma_ops, 332 .dma_ops = &trm290_dma_ops,
334 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 333 .host_flags = IDE_HFLAG_TRM290 |
334 IDE_HFLAG_NO_ATAPI_DMA |
335#if 0 /* play it safe for now */ 335#if 0 /* play it safe for now */
336 IDE_HFLAG_TRUST_BIOS_FOR_DMA | 336 IDE_HFLAG_TRUST_BIOS_FOR_DMA |
337#endif 337#endif
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index 9120063e8f87..13b63e7fa353 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -181,7 +181,7 @@ static void tx4938ide_input_data_swap(ide_drive_t *drive, struct request *rq,
181 181
182 while (count--) 182 while (count--)
183 *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port)); 183 *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
184 __ide_flush_dcache_range((unsigned long)buf, count * 2); 184 __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
185} 185}
186 186
187static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq, 187static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq,
@@ -195,7 +195,7 @@ static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq,
195 __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port); 195 __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
196 ptr++; 196 ptr++;
197 } 197 }
198 __ide_flush_dcache_range((unsigned long)buf, count * 2); 198 __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
199} 199}
200 200
201static const struct ide_tp_ops tx4938ide_tp_ops = { 201static const struct ide_tp_ops tx4938ide_tp_ops = {
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index bafb7d1a22e2..97cd9e0f66f6 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -259,6 +259,12 @@ static int tx4939ide_build_dmatable(ide_drive_t *drive, struct request *rq)
259 bcount = 0x10000 - (cur_addr & 0xffff); 259 bcount = 0x10000 - (cur_addr & 0xffff);
260 if (bcount > cur_len) 260 if (bcount > cur_len)
261 bcount = cur_len; 261 bcount = cur_len;
262 /*
263 * This workaround for zero count seems required.
264 * (standard ide_build_dmatable do it too)
265 */
266 if ((bcount & 0xffff) == 0x0000)
267 bcount = 0x8000;
262 *table++ = bcount & 0xffff; 268 *table++ = bcount & 0xffff;
263 *table++ = cur_addr; 269 *table++ = cur_addr;
264 cur_addr += bcount; 270 cur_addr += bcount;
@@ -558,7 +564,7 @@ static void tx4939ide_input_data_swap(ide_drive_t *drive, struct request *rq,
558 564
559 while (count--) 565 while (count--)
560 *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port)); 566 *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
561 __ide_flush_dcache_range((unsigned long)buf, count * 2); 567 __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
562} 568}
563 569
564static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq, 570static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq,
@@ -572,7 +578,7 @@ static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq,
572 __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port); 578 __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
573 ptr++; 579 ptr++;
574 } 580 }
575 __ide_flush_dcache_range((unsigned long)buf, count * 2); 581 __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
576} 582}
577 583
578static const struct ide_tp_ops tx4939ide_tp_ops = { 584static const struct ide_tp_ops tx4939ide_tp_ops = {
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
index 1da076e0c917..e29978cf6197 100644
--- a/drivers/ide/umc8672.c
+++ b/drivers/ide/umc8672.c
@@ -107,18 +107,21 @@ static void umc_set_speeds(u8 speeds[])
107static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio) 107static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio)
108{ 108{
109 ide_hwif_t *hwif = drive->hwif; 109 ide_hwif_t *hwif = drive->hwif;
110 unsigned long flags; 110 ide_hwgroup_t *mate_hwgroup = hwif->mate ? hwif->mate->hwgroup : NULL;
111 unsigned long uninitialized_var(flags);
111 112
112 printk("%s: setting umc8672 to PIO mode%d (speed %d)\n", 113 printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
113 drive->name, pio, pio_to_umc[pio]); 114 drive->name, pio, pio_to_umc[pio]);
114 spin_lock_irqsave(&ide_lock, flags); 115 if (mate_hwgroup)
115 if (hwif->mate && hwif->mate->hwgroup->handler) { 116 spin_lock_irqsave(&mate_hwgroup->lock, flags);
117 if (mate_hwgroup && mate_hwgroup->handler) {
116 printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n"); 118 printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n");
117 } else { 119 } else {
118 current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio]; 120 current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio];
119 umc_set_speeds(current_speeds); 121 umc_set_speeds(current_speeds);
120 } 122 }
121 spin_unlock_irqrestore(&ide_lock, flags); 123 if (mate_hwgroup)
124 spin_unlock_irqrestore(&mate_hwgroup->lock, flags);
122} 125}
123 126
124static const struct ide_port_ops umc8672_port_ops = { 127static const struct ide_port_ops umc8672_port_ops = {
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index a5dc78ae62d4..dd0db67bf8d7 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -37,6 +37,7 @@ config INFINIBAND_USER_MEM
37config INFINIBAND_ADDR_TRANS 37config INFINIBAND_ADDR_TRANS
38 bool 38 bool
39 depends on INET 39 depends on INET
40 depends on !(INFINIBAND = y && IPV6 = m)
40 default y 41 default y
41 42
42source "drivers/infiniband/hw/mthca/Kconfig" 43source "drivers/infiniband/hw/mthca/Kconfig"
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index d98b05b28262..ce511d8748ce 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -128,6 +128,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
128 ret = rdma_copy_addr(dev_addr, dev, NULL); 128 ret = rdma_copy_addr(dev_addr, dev, NULL);
129 dev_put(dev); 129 dev_put(dev);
130 break; 130 break;
131
132#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
131 case AF_INET6: 133 case AF_INET6:
132 for_each_netdev(&init_net, dev) { 134 for_each_netdev(&init_net, dev) {
133 if (ipv6_chk_addr(&init_net, 135 if (ipv6_chk_addr(&init_net,
@@ -138,8 +140,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
138 } 140 }
139 } 141 }
140 break; 142 break;
141 default: 143#endif
142 break;
143 } 144 }
144 return ret; 145 return ret;
145} 146}
@@ -179,10 +180,11 @@ static void addr_send_arp(struct sockaddr *dst_in)
179{ 180{
180 struct rtable *rt; 181 struct rtable *rt;
181 struct flowi fl; 182 struct flowi fl;
182 struct dst_entry *dst;
183 183
184 memset(&fl, 0, sizeof fl); 184 memset(&fl, 0, sizeof fl);
185 if (dst_in->sa_family == AF_INET) { 185
186 switch (dst_in->sa_family) {
187 case AF_INET:
186 fl.nl_u.ip4_u.daddr = 188 fl.nl_u.ip4_u.daddr =
187 ((struct sockaddr_in *) dst_in)->sin_addr.s_addr; 189 ((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
188 190
@@ -191,8 +193,13 @@ static void addr_send_arp(struct sockaddr *dst_in)
191 193
192 neigh_event_send(rt->u.dst.neighbour, NULL); 194 neigh_event_send(rt->u.dst.neighbour, NULL);
193 ip_rt_put(rt); 195 ip_rt_put(rt);
196 break;
197
198#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
199 case AF_INET6:
200 {
201 struct dst_entry *dst;
194 202
195 } else {
196 fl.nl_u.ip6_u.daddr = 203 fl.nl_u.ip6_u.daddr =
197 ((struct sockaddr_in6 *) dst_in)->sin6_addr; 204 ((struct sockaddr_in6 *) dst_in)->sin6_addr;
198 205
@@ -202,6 +209,9 @@ static void addr_send_arp(struct sockaddr *dst_in)
202 209
203 neigh_event_send(dst->neighbour, NULL); 210 neigh_event_send(dst->neighbour, NULL);
204 dst_release(dst); 211 dst_release(dst);
212 break;
213 }
214#endif
205 } 215 }
206} 216}
207 217
@@ -254,6 +264,7 @@ out:
254 return ret; 264 return ret;
255} 265}
256 266
267#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
257static int addr6_resolve_remote(struct sockaddr_in6 *src_in, 268static int addr6_resolve_remote(struct sockaddr_in6 *src_in,
258 struct sockaddr_in6 *dst_in, 269 struct sockaddr_in6 *dst_in,
259 struct rdma_dev_addr *addr) 270 struct rdma_dev_addr *addr)
@@ -282,6 +293,14 @@ static int addr6_resolve_remote(struct sockaddr_in6 *src_in,
282 dst_release(dst); 293 dst_release(dst);
283 return ret; 294 return ret;
284} 295}
296#else
297static int addr6_resolve_remote(struct sockaddr_in6 *src_in,
298 struct sockaddr_in6 *dst_in,
299 struct rdma_dev_addr *addr)
300{
301 return -EADDRNOTAVAIL;
302}
303#endif
285 304
286static int addr_resolve_remote(struct sockaddr *src_in, 305static int addr_resolve_remote(struct sockaddr *src_in,
287 struct sockaddr *dst_in, 306 struct sockaddr *dst_in,
@@ -340,7 +359,9 @@ static int addr_resolve_local(struct sockaddr *src_in,
340 struct net_device *dev; 359 struct net_device *dev;
341 int ret; 360 int ret;
342 361
343 if (dst_in->sa_family == AF_INET) { 362 switch (dst_in->sa_family) {
363 case AF_INET:
364 {
344 __be32 src_ip = ((struct sockaddr_in *) src_in)->sin_addr.s_addr; 365 __be32 src_ip = ((struct sockaddr_in *) src_in)->sin_addr.s_addr;
345 __be32 dst_ip = ((struct sockaddr_in *) dst_in)->sin_addr.s_addr; 366 __be32 dst_ip = ((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
346 367
@@ -362,7 +383,12 @@ static int addr_resolve_local(struct sockaddr *src_in,
362 memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); 383 memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
363 } 384 }
364 dev_put(dev); 385 dev_put(dev);
365 } else { 386 break;
387 }
388
389#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
390 case AF_INET6:
391 {
366 struct in6_addr *a; 392 struct in6_addr *a;
367 393
368 for_each_netdev(&init_net, dev) 394 for_each_netdev(&init_net, dev)
@@ -390,6 +416,13 @@ static int addr_resolve_local(struct sockaddr *src_in,
390 if (!ret) 416 if (!ret)
391 memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN); 417 memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
392 } 418 }
419 break;
420 }
421#endif
422
423 default:
424 ret = -EADDRNOTAVAIL;
425 break;
393 } 426 }
394 427
395 return ret; 428 return ret;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 8415ecce5c4c..a3c5af1d7ec0 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -699,7 +699,7 @@ repoll:
699 } 699 }
700 700
701 wc->slid = be16_to_cpu(cqe->rlid); 701 wc->slid = be16_to_cpu(cqe->rlid);
702 wc->sl = be16_to_cpu(cqe->sl_vid >> 12); 702 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
703 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 703 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
704 wc->src_qp = g_mlpath_rqpn & 0xffffff; 704 wc->src_qp = g_mlpath_rqpn & 0xffffff;
705 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 705 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 1e5b6446231d..12876392516e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -119,6 +119,14 @@ error:
119 iscsi_conn_failure(conn, rc); 119 iscsi_conn_failure(conn, rc);
120} 120}
121 121
122static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
123{
124 struct iscsi_iser_task *iser_task = task->dd_data;
125
126 task->hdr = (struct iscsi_hdr *)&iser_task->desc.iscsi_header;
127 task->hdr_max = sizeof(iser_task->desc.iscsi_header);
128 return 0;
129}
122 130
123/** 131/**
124 * iscsi_iser_task_init - Initialize task 132 * iscsi_iser_task_init - Initialize task
@@ -180,25 +188,26 @@ static int
180iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, 188iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
181 struct iscsi_task *task) 189 struct iscsi_task *task)
182{ 190{
183 struct iscsi_data hdr; 191 struct iscsi_r2t_info *r2t = &task->unsol_r2t;
192 struct iscsi_data hdr;
184 int error = 0; 193 int error = 0;
185 194
186 /* Send data-out PDUs while there's still unsolicited data to send */ 195 /* Send data-out PDUs while there's still unsolicited data to send */
187 while (task->unsol_count > 0) { 196 while (iscsi_task_has_unsol_data(task)) {
188 iscsi_prep_unsolicit_data_pdu(task, &hdr); 197 iscsi_prep_data_out_pdu(task, r2t, &hdr);
189 debug_scsi("Sending data-out: itt 0x%x, data count %d\n", 198 debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
190 hdr.itt, task->data_count); 199 hdr.itt, r2t->data_count);
191 200
192 /* the buffer description has been passed with the command */ 201 /* the buffer description has been passed with the command */
193 /* Send the command */ 202 /* Send the command */
194 error = iser_send_data_out(conn, task, &hdr); 203 error = iser_send_data_out(conn, task, &hdr);
195 if (error) { 204 if (error) {
196 task->unsol_datasn--; 205 r2t->datasn--;
197 goto iscsi_iser_task_xmit_unsol_data_exit; 206 goto iscsi_iser_task_xmit_unsol_data_exit;
198 } 207 }
199 task->unsol_count -= task->data_count; 208 r2t->sent += r2t->data_count;
200 debug_scsi("Need to send %d more as data-out PDUs\n", 209 debug_scsi("Need to send %d more as data-out PDUs\n",
201 task->unsol_count); 210 r2t->data_length - r2t->sent);
202 } 211 }
203 212
204iscsi_iser_task_xmit_unsol_data_exit: 213iscsi_iser_task_xmit_unsol_data_exit:
@@ -220,7 +229,7 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
220 229
221 debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", 230 debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
222 task->itt, scsi_bufflen(task->sc), 231 task->itt, scsi_bufflen(task->sc),
223 task->imm_count, task->unsol_count); 232 task->imm_count, task->unsol_r2t.data_length);
224 } 233 }
225 234
226 debug_scsi("task deq [cid %d itt 0x%x]\n", 235 debug_scsi("task deq [cid %d itt 0x%x]\n",
@@ -235,7 +244,7 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
235 } 244 }
236 245
237 /* Send unsolicited data-out PDU(s) if necessary */ 246 /* Send unsolicited data-out PDU(s) if necessary */
238 if (task->unsol_count) 247 if (iscsi_task_has_unsol_data(task))
239 error = iscsi_iser_task_xmit_unsol_data(conn, task); 248 error = iscsi_iser_task_xmit_unsol_data(conn, task);
240 249
241 iscsi_iser_task_xmit_exit: 250 iscsi_iser_task_xmit_exit:
@@ -244,13 +253,15 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
244 return error; 253 return error;
245} 254}
246 255
247static void 256static void iscsi_iser_cleanup_task(struct iscsi_task *task)
248iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
249{ 257{
250 struct iscsi_iser_task *iser_task = task->dd_data; 258 struct iscsi_iser_task *iser_task = task->dd_data;
251 259
252 /* mgmt tasks do not need special cleanup */ 260 /*
253 if (!task->sc) 261 * mgmt tasks do not need special cleanup and we do not
262 * allocate anything in the init task callout
263 */
264 if (!task->sc || task->state == ISCSI_TASK_PENDING)
254 return; 265 return;
255 266
256 if (iser_task->status == ISER_TASK_STATUS_STARTED) { 267 if (iser_task->status == ISER_TASK_STATUS_STARTED) {
@@ -391,9 +402,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
391 struct iscsi_cls_session *cls_session; 402 struct iscsi_cls_session *cls_session;
392 struct iscsi_session *session; 403 struct iscsi_session *session;
393 struct Scsi_Host *shost; 404 struct Scsi_Host *shost;
394 int i;
395 struct iscsi_task *task;
396 struct iscsi_iser_task *iser_task;
397 struct iser_conn *ib_conn; 405 struct iser_conn *ib_conn;
398 406
399 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN); 407 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
@@ -430,13 +438,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
430 session = cls_session->dd_data; 438 session = cls_session->dd_data;
431 439
432 shost->can_queue = session->scsi_cmds_max; 440 shost->can_queue = session->scsi_cmds_max;
433 /* libiscsi setup itts, data and pool so just set desc fields */
434 for (i = 0; i < session->cmds_max; i++) {
435 task = session->cmds[i];
436 iser_task = task->dd_data;
437 task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
438 task->hdr_max = sizeof(iser_task->desc.iscsi_header);
439 }
440 return cls_session; 441 return cls_session;
441 442
442remove_host: 443remove_host:
@@ -652,6 +653,7 @@ static struct iscsi_transport iscsi_iser_transport = {
652 .init_task = iscsi_iser_task_init, 653 .init_task = iscsi_iser_task_init,
653 .xmit_task = iscsi_iser_task_xmit, 654 .xmit_task = iscsi_iser_task_xmit,
654 .cleanup_task = iscsi_iser_cleanup_task, 655 .cleanup_task = iscsi_iser_cleanup_task,
656 .alloc_pdu = iscsi_iser_pdu_alloc,
655 /* recovery */ 657 /* recovery */
656 .session_recovery_timedout = iscsi_session_recovery_timedout, 658 .session_recovery_timedout = iscsi_session_recovery_timedout,
657 659
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index ed1aff21b7ea..e209cb8dd948 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -353,8 +353,7 @@ int iser_send_command(struct iscsi_conn *conn,
353 unsigned long edtl; 353 unsigned long edtl;
354 int err = 0; 354 int err = 0;
355 struct iser_data_buf *data_buf; 355 struct iser_data_buf *data_buf;
356 356 struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
357 struct iscsi_cmd *hdr = task->hdr;
358 struct scsi_cmnd *sc = task->sc; 357 struct scsi_cmnd *sc = task->sc;
359 358
360 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { 359 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
@@ -393,7 +392,7 @@ int iser_send_command(struct iscsi_conn *conn,
393 err = iser_prepare_write_cmd(task, 392 err = iser_prepare_write_cmd(task,
394 task->imm_count, 393 task->imm_count,
395 task->imm_count + 394 task->imm_count +
396 task->unsol_count, 395 task->unsol_r2t.data_length,
397 edtl); 396 edtl);
398 if (err) 397 if (err)
399 goto send_command_error; 398 goto send_command_error;
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 69e674ecf19a..db22fd9b4cf2 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -101,7 +101,7 @@ static irqreturn_t omap_kp_interrupt(int irq, void *dev_id)
101 if (cpu_is_omap24xx()) { 101 if (cpu_is_omap24xx()) {
102 int i; 102 int i;
103 for (i = 0; i < omap_kp->rows; i++) 103 for (i = 0; i < omap_kp->rows; i++)
104 disable_irq(OMAP_GPIO_IRQ(row_gpios[i])); 104 disable_irq(gpio_to_irq(row_gpios[i]));
105 } else 105 } else
106 /* disable keyboard interrupt and schedule for handling */ 106 /* disable keyboard interrupt and schedule for handling */
107 omap_writew(1, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); 107 omap_writew(1, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
@@ -224,7 +224,7 @@ static void omap_kp_tasklet(unsigned long data)
224 if (cpu_is_omap24xx()) { 224 if (cpu_is_omap24xx()) {
225 int i; 225 int i;
226 for (i = 0; i < omap_kp_data->rows; i++) 226 for (i = 0; i < omap_kp_data->rows; i++)
227 enable_irq(OMAP_GPIO_IRQ(row_gpios[i])); 227 enable_irq(gpio_to_irq(row_gpios[i]));
228 } else { 228 } else {
229 omap_writew(0, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); 229 omap_writew(0, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
230 kp_cur_group = -1; 230 kp_cur_group = -1;
@@ -397,7 +397,7 @@ static int __init omap_kp_probe(struct platform_device *pdev)
397 omap_writew(0, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); 397 omap_writew(0, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
398 } else { 398 } else {
399 for (irq_idx = 0; irq_idx < omap_kp->rows; irq_idx++) { 399 for (irq_idx = 0; irq_idx < omap_kp->rows; irq_idx++) {
400 if (request_irq(OMAP_GPIO_IRQ(row_gpios[irq_idx]), 400 if (request_irq(gpio_to_irq(row_gpios[irq_idx]),
401 omap_kp_interrupt, 401 omap_kp_interrupt,
402 IRQF_TRIGGER_FALLING, 402 IRQF_TRIGGER_FALLING,
403 "omap-keypad", omap_kp) < 0) 403 "omap-keypad", omap_kp) < 0)
@@ -438,7 +438,7 @@ static int omap_kp_remove(struct platform_device *pdev)
438 gpio_free(col_gpios[i]); 438 gpio_free(col_gpios[i]);
439 for (i = 0; i < omap_kp->rows; i++) { 439 for (i = 0; i < omap_kp->rows; i++) {
440 gpio_free(row_gpios[i]); 440 gpio_free(row_gpios[i]);
441 free_irq(OMAP_GPIO_IRQ(row_gpios[i]), 0); 441 free_irq(gpio_to_irq(row_gpios[i]), 0);
442 } 442 }
443 } else { 443 } else {
444 omap_writew(1, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); 444 omap_writew(1, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 6d30c6d334c3..0d2fc64a5e1c 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -475,7 +475,7 @@ static int __devinit pxa27x_keypad_probe(struct platform_device *pdev)
475 goto failed_free_mem; 475 goto failed_free_mem;
476 } 476 }
477 477
478 keypad->clk = clk_get(&pdev->dev, "KBDCLK"); 478 keypad->clk = clk_get(&pdev->dev, NULL);
479 if (IS_ERR(keypad->clk)) { 479 if (IS_ERR(keypad->clk)) {
480 dev_err(&pdev->dev, "failed to get keypad clock\n"); 480 dev_err(&pdev->dev, "failed to get keypad clock\n");
481 error = PTR_ERR(keypad->clk); 481 error = PTR_ERR(keypad->clk);
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 27d70d326ff3..da3c3a5d2689 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -79,7 +79,7 @@ config SERIO_PARKBD
79 79
80config SERIO_RPCKBD 80config SERIO_RPCKBD
81 tristate "Acorn RiscPC keyboard controller" 81 tristate "Acorn RiscPC keyboard controller"
82 depends on ARCH_ACORN || ARCH_CLPS7500 82 depends on ARCH_ACORN
83 default y 83 default y
84 help 84 help
85 Say Y here if you have the Acorn RiscPC and want to use an AT 85 Say Y here if you have the Acorn RiscPC and want to use an AT
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index b9b7fc6ff1eb..e1ece89fe922 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -697,7 +697,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
697 struct ads7846 *ts = container_of(handle, struct ads7846, timer); 697 struct ads7846 *ts = container_of(handle, struct ads7846, timer);
698 int status = 0; 698 int status = 0;
699 699
700 spin_lock_irq(&ts->lock); 700 spin_lock(&ts->lock);
701 701
702 if (unlikely(!get_pendown_state(ts) || 702 if (unlikely(!get_pendown_state(ts) ||
703 device_suspended(&ts->spi->dev))) { 703 device_suspended(&ts->spi->dev))) {
@@ -728,7 +728,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
728 dev_err(&ts->spi->dev, "spi_async --> %d\n", status); 728 dev_err(&ts->spi->dev, "spi_async --> %d\n", status);
729 } 729 }
730 730
731 spin_unlock_irq(&ts->lock); 731 spin_unlock(&ts->lock);
732 return HRTIMER_NORESTART; 732 return HRTIMER_NORESTART;
733} 733}
734 734
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index ba648750a8d9..1d11e2be9ef8 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -31,7 +31,7 @@
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/wm97xx.h> 32#include <linux/wm97xx.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <mach/pxa-regs.h> 34#include <mach/regs-ac97.h>
35 35
36#define VERSION "0.13" 36#define VERSION "0.13"
37 37
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 5faefeaf6790..f2c641e0bdde 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -164,7 +164,7 @@ void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt);
164void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt); 164void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
165 165
166/* page_tables.c: */ 166/* page_tables.c: */
167int init_guest_pagetable(struct lguest *lg, unsigned long pgtable); 167int init_guest_pagetable(struct lguest *lg);
168void free_guest_pagetable(struct lguest *lg); 168void free_guest_pagetable(struct lguest *lg);
169void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); 169void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
170void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); 170void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index a661bbdae3d6..915da6b8c924 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -250,7 +250,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
250 /* Figure out how many pages the ring will take, and map that memory */ 250 /* Figure out how many pages the ring will take, and map that memory */
251 lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT, 251 lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT,
252 DIV_ROUND_UP(vring_size(lvq->config.num, 252 DIV_ROUND_UP(vring_size(lvq->config.num,
253 PAGE_SIZE), 253 LGUEST_VRING_ALIGN),
254 PAGE_SIZE)); 254 PAGE_SIZE));
255 if (!lvq->pages) { 255 if (!lvq->pages) {
256 err = -ENOMEM; 256 err = -ENOMEM;
@@ -259,8 +259,8 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
259 259
260 /* OK, tell virtio_ring.c to set up a virtqueue now we know its size 260 /* OK, tell virtio_ring.c to set up a virtqueue now we know its size
261 * and we've got a pointer to its pages. */ 261 * and we've got a pointer to its pages. */
262 vq = vring_new_virtqueue(lvq->config.num, vdev, lvq->pages, 262 vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN,
263 lg_notify, callback); 263 vdev, lvq->pages, lg_notify, callback);
264 if (!vq) { 264 if (!vq) {
265 err = -ENOMEM; 265 err = -ENOMEM;
266 goto unmap; 266 goto unmap;
@@ -272,7 +272,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
272 * the interrupt as a source of randomness: it'd be nice to have that 272 * the interrupt as a source of randomness: it'd be nice to have that
273 * back.. */ 273 * back.. */
274 err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, 274 err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED,
275 vdev->dev.bus_id, vq); 275 dev_name(&vdev->dev), vq);
276 if (err) 276 if (err)
277 goto destroy_vring; 277 goto destroy_vring;
278 278
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index e73a000473cc..34bc017b8b3c 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -146,7 +146,7 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
146 return 0; 146 return 0;
147} 147}
148 148
149/*L:020 The initialization write supplies 4 pointer sized (32 or 64 bit) 149/*L:020 The initialization write supplies 3 pointer sized (32 or 64 bit)
150 * values (in addition to the LHREQ_INITIALIZE value). These are: 150 * values (in addition to the LHREQ_INITIALIZE value). These are:
151 * 151 *
152 * base: The start of the Guest-physical memory inside the Launcher memory. 152 * base: The start of the Guest-physical memory inside the Launcher memory.
@@ -155,9 +155,6 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
155 * allowed to access. The Guest memory lives inside the Launcher, so it sets 155 * allowed to access. The Guest memory lives inside the Launcher, so it sets
156 * this to ensure the Guest can only reach its own memory. 156 * this to ensure the Guest can only reach its own memory.
157 * 157 *
158 * pgdir: The (Guest-physical) address of the top of the initial Guest
159 * pagetables (which are set up by the Launcher).
160 *
161 * start: The first instruction to execute ("eip" in x86-speak). 158 * start: The first instruction to execute ("eip" in x86-speak).
162 */ 159 */
163static int initialize(struct file *file, const unsigned long __user *input) 160static int initialize(struct file *file, const unsigned long __user *input)
@@ -166,7 +163,7 @@ static int initialize(struct file *file, const unsigned long __user *input)
166 * Guest. */ 163 * Guest. */
167 struct lguest *lg; 164 struct lguest *lg;
168 int err; 165 int err;
169 unsigned long args[4]; 166 unsigned long args[3];
170 167
171 /* We grab the Big Lguest lock, which protects against multiple 168 /* We grab the Big Lguest lock, which protects against multiple
172 * simultaneous initializations. */ 169 * simultaneous initializations. */
@@ -192,14 +189,14 @@ static int initialize(struct file *file, const unsigned long __user *input)
192 lg->mem_base = (void __user *)args[0]; 189 lg->mem_base = (void __user *)args[0];
193 lg->pfn_limit = args[1]; 190 lg->pfn_limit = args[1];
194 191
195 /* This is the first cpu (cpu 0) and it will start booting at args[3] */ 192 /* This is the first cpu (cpu 0) and it will start booting at args[2] */
196 err = lg_cpu_start(&lg->cpus[0], 0, args[3]); 193 err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
197 if (err) 194 if (err)
198 goto release_guest; 195 goto release_guest;
199 196
200 /* Initialize the Guest's shadow page tables, using the toplevel 197 /* Initialize the Guest's shadow page tables, using the toplevel
201 * address the Launcher gave us. This allocates memory, so can fail. */ 198 * address the Launcher gave us. This allocates memory, so can fail. */
202 err = init_guest_pagetable(lg, args[2]); 199 err = init_guest_pagetable(lg);
203 if (err) 200 if (err)
204 goto free_regs; 201 goto free_regs;
205 202
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 81d0c6053447..576a8318221c 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -14,6 +14,7 @@
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17#include <asm/bootparam.h>
17#include "lg.h" 18#include "lg.h"
18 19
19/*M:008 We hold reference to pages, which prevents them from being swapped. 20/*M:008 We hold reference to pages, which prevents them from being swapped.
@@ -581,15 +582,82 @@ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
581 release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); 582 release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
582} 583}
583 584
585/* Once we know how much memory we have we can construct simple identity
586 * (which set virtual == physical) and linear mappings
587 * which will get the Guest far enough into the boot to create its own.
588 *
589 * We lay them out of the way, just below the initrd (which is why we need to
590 * know its size here). */
591static unsigned long setup_pagetables(struct lguest *lg,
592 unsigned long mem,
593 unsigned long initrd_size)
594{
595 pgd_t __user *pgdir;
596 pte_t __user *linear;
597 unsigned int mapped_pages, i, linear_pages, phys_linear;
598 unsigned long mem_base = (unsigned long)lg->mem_base;
599
600 /* We have mapped_pages frames to map, so we need
601 * linear_pages page tables to map them. */
602 mapped_pages = mem / PAGE_SIZE;
603 linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE;
604
605 /* We put the toplevel page directory page at the top of memory. */
606 pgdir = (pgd_t *)(mem + mem_base - initrd_size - PAGE_SIZE);
607
608 /* Now we use the next linear_pages pages as pte pages */
609 linear = (void *)pgdir - linear_pages * PAGE_SIZE;
610
611 /* Linear mapping is easy: put every page's address into the
612 * mapping in order. */
613 for (i = 0; i < mapped_pages; i++) {
614 pte_t pte;
615 pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER));
616 if (copy_to_user(&linear[i], &pte, sizeof(pte)) != 0)
617 return -EFAULT;
618 }
619
620 /* The top level points to the linear page table pages above.
621 * We setup the identity and linear mappings here. */
622 phys_linear = (unsigned long)linear - mem_base;
623 for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
624 pgd_t pgd;
625 pgd = __pgd((phys_linear + i * sizeof(pte_t)) |
626 (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
627
628 if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd))
629 || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET)
630 + i / PTRS_PER_PTE],
631 &pgd, sizeof(pgd)))
632 return -EFAULT;
633 }
634
635 /* We return the top level (guest-physical) address: remember where
636 * this is. */
637 return (unsigned long)pgdir - mem_base;
638}
639
584/*H:500 (vii) Setting up the page tables initially. 640/*H:500 (vii) Setting up the page tables initially.
585 * 641 *
586 * When a Guest is first created, the Launcher tells us where the toplevel of 642 * When a Guest is first created, the Launcher tells us where the toplevel of
587 * its first page table is. We set some things up here: */ 643 * its first page table is. We set some things up here: */
588int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) 644int init_guest_pagetable(struct lguest *lg)
589{ 645{
646 u64 mem;
647 u32 initrd_size;
648 struct boot_params __user *boot = (struct boot_params *)lg->mem_base;
649
650 /* Get the Guest memory size and the ramdisk size from the boot header
651 * located at lg->mem_base (Guest address 0). */
652 if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem))
653 || get_user(initrd_size, &boot->hdr.ramdisk_size))
654 return -EFAULT;
655
590 /* We start on the first shadow page table, and give it a blank PGD 656 /* We start on the first shadow page table, and give it a blank PGD
591 * page. */ 657 * page. */
592 lg->pgdirs[0].gpgdir = pgtable; 658 lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size);
659 if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir))
660 return lg->pgdirs[0].gpgdir;
593 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); 661 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
594 if (!lg->pgdirs[0].pgdir) 662 if (!lg->pgdirs[0].pgdir)
595 return -ENOMEM; 663 return -ENOMEM;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ce26c84af064..3326750ec02c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1060,7 +1060,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1060 goto bad_page_pool; 1060 goto bad_page_pool;
1061 } 1061 }
1062 1062
1063 cc->bs = bioset_create(MIN_IOS, MIN_IOS); 1063 cc->bs = bioset_create(MIN_IOS, 0);
1064 if (!cc->bs) { 1064 if (!cc->bs) {
1065 ti->error = "Cannot allocate crypt bioset"; 1065 ti->error = "Cannot allocate crypt bioset";
1066 goto bad_bs; 1066 goto bad_bs;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2fd6d4450637..a34338567a2a 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -56,7 +56,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
56 if (!client->pool) 56 if (!client->pool)
57 goto bad; 57 goto bad;
58 58
59 client->bios = bioset_create(16, 16); 59 client->bios = bioset_create(16, 0);
60 if (!client->bios) 60 if (!client->bios)
61 goto bad; 61 goto bad;
62 62
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 343094c3feeb..421c9f02d8ca 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1093,7 +1093,7 @@ static struct mapped_device *alloc_dev(int minor)
1093 if (!md->tio_pool) 1093 if (!md->tio_pool)
1094 goto bad_tio_pool; 1094 goto bad_tio_pool;
1095 1095
1096 md->bs = bioset_create(16, 16); 1096 md->bs = bioset_create(16, 0);
1097 if (!md->bs) 1097 if (!md->bs)
1098 goto bad_no_bioset; 1098 goto bad_no_bioset;
1099 1099
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
index 4952aeb5dd80..d8229a0e9a9c 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/common/ir-keymaps.c
@@ -2391,6 +2391,67 @@ IR_KEYTAB_TYPE ir_codes_powercolor_real_angel[IR_KEYTAB_SIZE] = {
2391}; 2391};
2392EXPORT_SYMBOL_GPL(ir_codes_powercolor_real_angel); 2392EXPORT_SYMBOL_GPL(ir_codes_powercolor_real_angel);
2393 2393
2394/* Kworld Plus TV Analog Lite PCI IR
2395 Mauro Carvalho Chehab <mchehab@infradead.org>
2396 */
2397IR_KEYTAB_TYPE ir_codes_kworld_plus_tv_analog[IR_KEYTAB_SIZE] = {
2398 [0x0c] = KEY_PROG1, /* Kworld key */
2399 [0x16] = KEY_CLOSECD, /* -> ) */
2400 [0x1d] = KEY_POWER2,
2401
2402 [0x00] = KEY_1,
2403 [0x01] = KEY_2,
2404 [0x02] = KEY_3, /* Two keys have the same code: 3 and left */
2405 [0x03] = KEY_4, /* Two keys have the same code: 3 and right */
2406 [0x04] = KEY_5,
2407 [0x05] = KEY_6,
2408 [0x06] = KEY_7,
2409 [0x07] = KEY_8,
2410 [0x08] = KEY_9,
2411 [0x0a] = KEY_0,
2412
2413 [0x09] = KEY_AGAIN,
2414 [0x14] = KEY_MUTE,
2415
2416 [0x20] = KEY_UP,
2417 [0x21] = KEY_DOWN,
2418 [0x0b] = KEY_ENTER,
2419
2420 [0x10] = KEY_CHANNELUP,
2421 [0x11] = KEY_CHANNELDOWN,
2422
2423 /* Couldn't map key left/key right since those
2424 conflict with '3' and '4' scancodes
2425 I dunno what the original driver does
2426 */
2427
2428 [0x13] = KEY_VOLUMEUP,
2429 [0x12] = KEY_VOLUMEDOWN,
2430
2431 /* The lower part of the IR
2432 There are several duplicated keycodes there.
2433 Most of them conflict with digits.
2434 Add mappings just to the unused scancodes.
2435 Somehow, the original driver has a way to know,
2436 but this doesn't seem to be on some GPIO.
2437 Also, it is not related to the time between keyup
2438 and keydown.
2439 */
2440 [0x19] = KEY_PAUSE, /* Timeshift */
2441 [0x1a] = KEY_STOP,
2442 [0x1b] = KEY_RECORD,
2443
2444 [0x22] = KEY_TEXT,
2445
2446 [0x15] = KEY_AUDIO, /* ((*)) */
2447 [0x0f] = KEY_ZOOM,
2448 [0x1c] = KEY_SHUFFLE, /* snapshot */
2449
2450 [0x18] = KEY_RED, /* B */
2451 [0x23] = KEY_GREEN, /* C */
2452};
2453EXPORT_SYMBOL_GPL(ir_codes_kworld_plus_tv_analog);
2454
2394IR_KEYTAB_TYPE ir_codes_avermedia_a16d[IR_KEYTAB_SIZE] = { 2455IR_KEYTAB_TYPE ir_codes_avermedia_a16d[IR_KEYTAB_SIZE] = {
2395 [0x20] = KEY_LIST, 2456 [0x20] = KEY_LIST,
2396 [0x00] = KEY_POWER, 2457 [0x00] = KEY_POWER,
@@ -2511,3 +2572,35 @@ IR_KEYTAB_TYPE ir_codes_real_audio_220_32_keys[IR_KEYTAB_SIZE] = {
2511 2572
2512}; 2573};
2513EXPORT_SYMBOL_GPL(ir_codes_real_audio_220_32_keys); 2574EXPORT_SYMBOL_GPL(ir_codes_real_audio_220_32_keys);
2575
2576/* ATI TV Wonder HD 600 USB
2577 Devin Heitmueller <devin.heitmueller@gmail.com>
2578 */
2579IR_KEYTAB_TYPE ir_codes_ati_tv_wonder_hd_600[IR_KEYTAB_SIZE] = {
2580 [0x00] = KEY_RECORD, /* Row 1 */
2581 [0x01] = KEY_PLAYPAUSE,
2582 [0x02] = KEY_STOP,
2583 [0x03] = KEY_POWER,
2584 [0x04] = KEY_PREVIOUS, /* Row 2 */
2585 [0x05] = KEY_REWIND,
2586 [0x06] = KEY_FORWARD,
2587 [0x07] = KEY_NEXT,
2588 [0x08] = KEY_EPG, /* Row 3 */
2589 [0x09] = KEY_HOME,
2590 [0x0a] = KEY_MENU,
2591 [0x0b] = KEY_CHANNELUP,
2592 [0x0c] = KEY_BACK, /* Row 4 */
2593 [0x0d] = KEY_UP,
2594 [0x0e] = KEY_INFO,
2595 [0x0f] = KEY_CHANNELDOWN,
2596 [0x10] = KEY_LEFT, /* Row 5 */
2597 [0x11] = KEY_SELECT,
2598 [0x12] = KEY_RIGHT,
2599 [0x13] = KEY_VOLUMEUP,
2600 [0x14] = KEY_LAST, /* Row 6 */
2601 [0x15] = KEY_DOWN,
2602 [0x16] = KEY_MUTE,
2603 [0x17] = KEY_VOLUMEDOWN,
2604};
2605
2606EXPORT_SYMBOL_GPL(ir_codes_ati_tv_wonder_hd_600);
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 127b0526a727..7d844af88384 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -313,7 +313,7 @@ static int fops_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
313/* 313/*
314 DEB_EE(("inode:%p, file:%p, cmd:%d, arg:%li\n",inode, file, cmd, arg)); 314 DEB_EE(("inode:%p, file:%p, cmd:%d, arg:%li\n",inode, file, cmd, arg));
315*/ 315*/
316 return video_usercopy(inode, file, cmd, arg, saa7146_video_do_ioctl); 316 return video_usercopy(file, cmd, arg, saa7146_video_do_ioctl);
317} 317}
318 318
319static int fops_mmap(struct file *file, struct vm_area_struct * vma) 319static int fops_mmap(struct file *file, struct vm_area_struct * vma)
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index fe0bd55977e3..101b01dbb8ea 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -834,7 +834,7 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
834 * copying is done already, arg is a kernel pointer. 834 * copying is done already, arg is a kernel pointer.
835 */ 835 */
836 836
837static int __saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg) 837int saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
838{ 838{
839 struct saa7146_fh *fh = file->private_data; 839 struct saa7146_fh *fh = file->private_data;
840 struct saa7146_dev *dev = fh->dev; 840 struct saa7146_dev *dev = fh->dev;
@@ -1216,17 +1216,11 @@ static int __saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *a
1216#endif 1216#endif
1217 default: 1217 default:
1218 return v4l_compat_translate_ioctl(file, cmd, arg, 1218 return v4l_compat_translate_ioctl(file, cmd, arg,
1219 __saa7146_video_do_ioctl); 1219 saa7146_video_do_ioctl);
1220 } 1220 }
1221 return 0; 1221 return 0;
1222} 1222}
1223 1223
1224int saa7146_video_do_ioctl(struct inode *inode, struct file *file,
1225 unsigned int cmd, void *arg)
1226{
1227 return __saa7146_video_do_ioctl(file, cmd, arg);
1228}
1229
1230/*********************************************************************************/ 1224/*********************************************************************************/
1231/* buffer handling functions */ 1225/* buffer handling functions */
1232 1226
diff --git a/drivers/media/common/tuners/mxl5005s.c b/drivers/media/common/tuners/mxl5005s.c
index a8878244bb3c..31522d2e318e 100644
--- a/drivers/media/common/tuners/mxl5005s.c
+++ b/drivers/media/common/tuners/mxl5005s.c
@@ -3598,7 +3598,7 @@ static u16 MXL_GetInitRegister(struct dvb_frontend *fe, u8 *RegNum,
3598 76, 77, 91, 134, 135, 137, 147, 3598 76, 77, 91, 134, 135, 137, 147,
3599 156, 166, 167, 168, 25 }; 3599 156, 166, 167, 168, 25 };
3600 3600
3601 *count = sizeof(RegAddr) / sizeof(u8); 3601 *count = ARRAY_SIZE(RegAddr);
3602 3602
3603 status += MXL_BlockInit(fe); 3603 status += MXL_BlockInit(fe);
3604 3604
@@ -3630,7 +3630,7 @@ static u16 MXL_GetCHRegister(struct dvb_frontend *fe, u8 *RegNum, u8 *RegVal,
3630 */ 3630 */
3631#endif 3631#endif
3632 3632
3633 *count = sizeof(RegAddr) / sizeof(u8); 3633 *count = ARRAY_SIZE(RegAddr);
3634 3634
3635 for (i = 0 ; i < *count; i++) { 3635 for (i = 0 ; i < *count; i++) {
3636 RegNum[i] = RegAddr[i]; 3636 RegNum[i] = RegAddr[i];
@@ -3648,7 +3648,7 @@ static u16 MXL_GetCHRegister_ZeroIF(struct dvb_frontend *fe, u8 *RegNum,
3648 3648
3649 u8 RegAddr[] = {43, 136}; 3649 u8 RegAddr[] = {43, 136};
3650 3650
3651 *count = sizeof(RegAddr) / sizeof(u8); 3651 *count = ARRAY_SIZE(RegAddr);
3652 3652
3653 for (i = 0; i < *count; i++) { 3653 for (i = 0; i < *count; i++) {
3654 RegNum[i] = RegAddr[i]; 3654 RegNum[i] = RegAddr[i];
diff --git a/drivers/media/common/tuners/tda827x.c b/drivers/media/common/tuners/tda827x.c
index 4a74f65e759a..f4d931f14fad 100644
--- a/drivers/media/common/tuners/tda827x.c
+++ b/drivers/media/common/tuners/tda827x.c
@@ -80,10 +80,11 @@ static void tda827x_set_std(struct dvb_frontend *fe,
80 mode = "xx"; 80 mode = "xx";
81 } 81 }
82 82
83 if (params->mode == V4L2_TUNER_RADIO) 83 if (params->mode == V4L2_TUNER_RADIO) {
84 priv->sgIF = 88; /* if frequency is 5.5 MHz */ 84 priv->sgIF = 88; /* if frequency is 5.5 MHz */
85 85 dprintk("setting tda827x to radio FM\n");
86 dprintk("setting tda827x to system %s\n", mode); 86 } else
87 dprintk("setting tda827x to system %s\n", mode);
87} 88}
88 89
89 90
@@ -199,7 +200,7 @@ static int tda827xo_set_params(struct dvb_frontend *fe,
199 fe->ops.i2c_gate_ctrl(fe, 1); 200 fe->ops.i2c_gate_ctrl(fe, 1);
200 i2c_transfer(priv->i2c_adap, &msg, 1); 201 i2c_transfer(priv->i2c_adap, &msg, 1);
201 202
202 priv->frequency = tuner_freq - if_freq; // FIXME 203 priv->frequency = params->frequency;
203 priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0; 204 priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
204 205
205 return 0; 206 return 0;
@@ -304,7 +305,7 @@ static int tda827xo_set_analog_params(struct dvb_frontend *fe,
304 reg2[1] = 0x08; /* Vsync en */ 305 reg2[1] = 0x08; /* Vsync en */
305 i2c_transfer(priv->i2c_adap, &msg, 1); 306 i2c_transfer(priv->i2c_adap, &msg, 1);
306 307
307 priv->frequency = freq * 62500; 308 priv->frequency = params->frequency;
308 309
309 return 0; 310 return 0;
310} 311}
@@ -591,7 +592,7 @@ static int tda827xa_set_params(struct dvb_frontend *fe,
591 fe->ops.i2c_gate_ctrl(fe, 1); 592 fe->ops.i2c_gate_ctrl(fe, 1);
592 i2c_transfer(priv->i2c_adap, &msg, 1); 593 i2c_transfer(priv->i2c_adap, &msg, 1);
593 594
594 priv->frequency = tuner_freq - if_freq; // FIXME 595 priv->frequency = params->frequency;
595 priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0; 596 priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
596 597
597 return 0; 598 return 0;
@@ -691,7 +692,7 @@ static int tda827xa_set_analog_params(struct dvb_frontend *fe,
691 tuner_reg[1] = 0x19 + (priv->lpsel << 1); 692 tuner_reg[1] = 0x19 + (priv->lpsel << 1);
692 i2c_transfer(priv->i2c_adap, &msg, 1); 693 i2c_transfer(priv->i2c_adap, &msg, 1);
693 694
694 priv->frequency = freq * 62500; 695 priv->frequency = params->frequency;
695 696
696 return 0; 697 return 0;
697} 698}
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index c112bdd4e0f0..0ee79fd7c7a9 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -32,6 +32,9 @@ static int debug;
32module_param(debug, int, 0644); 32module_param(debug, int, 0644);
33MODULE_PARM_DESC(debug, "enable verbose debug messages"); 33MODULE_PARM_DESC(debug, "enable verbose debug messages");
34 34
35static int deemphasis_50;
36MODULE_PARM_DESC(deemphasis_50, "0 - 75us deemphasis; 1 - 50us deemphasis");
37
35/* ---------------------------------------------------------------------- */ 38/* ---------------------------------------------------------------------- */
36 39
37struct tda8290_priv { 40struct tda8290_priv {
@@ -139,9 +142,34 @@ static void set_audio(struct dvb_frontend *fe,
139 mode = "xx"; 142 mode = "xx";
140 } 143 }
141 144
142 tuner_dbg("setting tda829x to system %s\n", mode); 145 if (params->mode == V4L2_TUNER_RADIO) {
146 priv->tda8290_easy_mode = 0x01; /* Start with MN values */
147 tuner_dbg("setting to radio FM\n");
148 } else {
149 tuner_dbg("setting tda829x to system %s\n", mode);
150 }
143} 151}
144 152
153struct {
154 unsigned char seq[2];
155} fm_mode[] = {
156 { { 0x01, 0x81} }, /* Put device into expert mode */
157 { { 0x03, 0x48} }, /* Disable NOTCH and VIDEO filters */
158 { { 0x04, 0x04} }, /* Disable color carrier filter (SSIF) */
159 { { 0x05, 0x04} }, /* ADC headroom */
160 { { 0x06, 0x10} }, /* group delay flat */
161
162 { { 0x07, 0x00} }, /* use the same radio DTO values as a tda8295 */
163 { { 0x08, 0x00} },
164 { { 0x09, 0x80} },
165 { { 0x0a, 0xda} },
166 { { 0x0b, 0x4b} },
167 { { 0x0c, 0x68} },
168
169 { { 0x0d, 0x00} }, /* PLL off, no video carrier detect */
170 { { 0x14, 0x00} }, /* disable auto mute if no video */
171};
172
145static void tda8290_set_params(struct dvb_frontend *fe, 173static void tda8290_set_params(struct dvb_frontend *fe,
146 struct analog_parameters *params) 174 struct analog_parameters *params)
147{ 175{
@@ -178,15 +206,30 @@ static void tda8290_set_params(struct dvb_frontend *fe,
178 tuner_i2c_xfer_send(&priv->i2c_props, soft_reset, 2); 206 tuner_i2c_xfer_send(&priv->i2c_props, soft_reset, 2);
179 msleep(1); 207 msleep(1);
180 208
181 expert_mode[1] = priv->tda8290_easy_mode + 0x80; 209 if (params->mode == V4L2_TUNER_RADIO) {
182 tuner_i2c_xfer_send(&priv->i2c_props, expert_mode, 2); 210 int i;
183 tuner_i2c_xfer_send(&priv->i2c_props, gainset_off, 2); 211 unsigned char deemphasis[] = { 0x13, 1 };
184 tuner_i2c_xfer_send(&priv->i2c_props, if_agc_spd, 2); 212
185 if (priv->tda8290_easy_mode & 0x60) 213 /* FIXME: allow using a different deemphasis */
186 tuner_i2c_xfer_send(&priv->i2c_props, adc_head_9, 2); 214
187 else 215 if (deemphasis_50)
188 tuner_i2c_xfer_send(&priv->i2c_props, adc_head_6, 2); 216 deemphasis[1] = 2;
189 tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_nom, 2); 217
218 for (i = 0; i < ARRAY_SIZE(fm_mode); i++)
219 tuner_i2c_xfer_send(&priv->i2c_props, fm_mode[i].seq, 2);
220
221 tuner_i2c_xfer_send(&priv->i2c_props, deemphasis, 2);
222 } else {
223 expert_mode[1] = priv->tda8290_easy_mode + 0x80;
224 tuner_i2c_xfer_send(&priv->i2c_props, expert_mode, 2);
225 tuner_i2c_xfer_send(&priv->i2c_props, gainset_off, 2);
226 tuner_i2c_xfer_send(&priv->i2c_props, if_agc_spd, 2);
227 if (priv->tda8290_easy_mode & 0x60)
228 tuner_i2c_xfer_send(&priv->i2c_props, adc_head_9, 2);
229 else
230 tuner_i2c_xfer_send(&priv->i2c_props, adc_head_6, 2);
231 tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_nom, 2);
232 }
190 233
191 tda8290_i2c_bridge(fe, 1); 234 tda8290_i2c_bridge(fe, 1);
192 235
diff --git a/drivers/media/common/tuners/tda9887.c b/drivers/media/common/tuners/tda9887.c
index ff1788cc5d48..544cdbe88a6c 100644
--- a/drivers/media/common/tuners/tda9887.c
+++ b/drivers/media/common/tuners/tda9887.c
@@ -180,11 +180,10 @@ static struct tvnorm tvnorms[] = {
180 },{ 180 },{
181 .std = V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 181 .std = V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
182 .name = "SECAM-BGH", 182 .name = "SECAM-BGH",
183 .b = ( cPositiveAmTV | 183 .b = ( cNegativeFmTV |
184 cQSS ), 184 cQSS ),
185 .c = ( cTopDefault), 185 .c = ( cTopDefault),
186 .e = ( cGating_36 | 186 .e = ( cAudioIF_5_5 |
187 cAudioIF_5_5 |
188 cVideoIF_38_90 ), 187 cVideoIF_38_90 ),
189 },{ 188 },{
190 .std = V4L2_STD_SECAM_L, 189 .std = V4L2_STD_SECAM_L,
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index b65e6803e6c6..1adce9ff52ce 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -28,6 +28,12 @@ static int debug;
28module_param(debug, int, 0644); 28module_param(debug, int, 0644);
29MODULE_PARM_DESC(debug, "enable verbose debug messages"); 29MODULE_PARM_DESC(debug, "enable verbose debug messages");
30 30
31static int no_poweroff;
32module_param(no_poweroff, int, 0644);
33MODULE_PARM_DESC(debug, "0 (default) powers device off when not used.\n"
34 "1 keep device energized and with tuner ready all the times.\n"
35 " Faster, but consumes more power and keeps the device hotter\n");
36
31static char audio_std[8]; 37static char audio_std[8];
32module_param_string(audio_std, audio_std, sizeof(audio_std), 0); 38module_param_string(audio_std, audio_std, sizeof(audio_std), 0);
33MODULE_PARM_DESC(audio_std, 39MODULE_PARM_DESC(audio_std,
@@ -1091,6 +1097,34 @@ static int xc2028_set_params(struct dvb_frontend *fe,
1091 T_DIGITAL_TV, type, 0, demod); 1097 T_DIGITAL_TV, type, 0, demod);
1092} 1098}
1093 1099
1100static int xc2028_sleep(struct dvb_frontend *fe)
1101{
1102 struct xc2028_data *priv = fe->tuner_priv;
1103 int rc = 0;
1104
1105 /* Avoid firmware reload on slow devices */
1106 if (no_poweroff)
1107 return 0;
1108
1109 tuner_dbg("Putting xc2028/3028 into poweroff mode.\n");
1110 if (debug > 1) {
1111 tuner_dbg("Printing sleep stack trace:\n");
1112 dump_stack();
1113 }
1114
1115 mutex_lock(&priv->lock);
1116
1117 if (priv->firm_version < 0x0202)
1118 rc = send_seq(priv, {0x00, 0x08, 0x00, 0x00});
1119 else
1120 rc = send_seq(priv, {0x80, 0x08, 0x00, 0x00});
1121
1122 priv->cur_fw.type = 0; /* need firmware reload */
1123
1124 mutex_unlock(&priv->lock);
1125
1126 return rc;
1127}
1094 1128
1095static int xc2028_dvb_release(struct dvb_frontend *fe) 1129static int xc2028_dvb_release(struct dvb_frontend *fe)
1096{ 1130{
@@ -1171,6 +1205,7 @@ static const struct dvb_tuner_ops xc2028_dvb_tuner_ops = {
1171 .get_frequency = xc2028_get_frequency, 1205 .get_frequency = xc2028_get_frequency,
1172 .get_rf_strength = xc2028_signal, 1206 .get_rf_strength = xc2028_signal,
1173 .set_params = xc2028_set_params, 1207 .set_params = xc2028_set_params,
1208 .sleep = xc2028_sleep,
1174}; 1209};
1175 1210
1176struct dvb_frontend *xc2028_attach(struct dvb_frontend *fe, 1211struct dvb_frontend *xc2028_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index e12d13e0cbe9..493ce93caf43 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -36,10 +36,6 @@ static int debug;
36module_param(debug, int, 0644); 36module_param(debug, int, 0644);
37MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); 37MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
38 38
39static int xc5000_load_fw_on_attach;
40module_param_named(init_fw, xc5000_load_fw_on_attach, int, 0644);
41MODULE_PARM_DESC(init_fw, "Load firmware during driver initialization.");
42
43static DEFINE_MUTEX(xc5000_list_mutex); 39static DEFINE_MUTEX(xc5000_list_mutex);
44static LIST_HEAD(hybrid_tuner_instance_list); 40static LIST_HEAD(hybrid_tuner_instance_list);
45 41
@@ -1017,9 +1013,6 @@ struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
1017 memcpy(&fe->ops.tuner_ops, &xc5000_tuner_ops, 1013 memcpy(&fe->ops.tuner_ops, &xc5000_tuner_ops,
1018 sizeof(struct dvb_tuner_ops)); 1014 sizeof(struct dvb_tuner_ops));
1019 1015
1020 if (xc5000_load_fw_on_attach)
1021 xc5000_init(fe);
1022
1023 return fe; 1016 return fe;
1024fail: 1017fail:
1025 mutex_unlock(&xc5000_list_mutex); 1018 mutex_unlock(&xc5000_list_mutex);
diff --git a/drivers/media/dvb/Kconfig b/drivers/media/dvb/Kconfig
index 0bcd852576d6..40ebde53b3ce 100644
--- a/drivers/media/dvb/Kconfig
+++ b/drivers/media/dvb/Kconfig
@@ -2,6 +2,19 @@
2# DVB device configuration 2# DVB device configuration
3# 3#
4 4
5config DVB_DYNAMIC_MINORS
6 bool "Dynamic DVB minor allocation"
7 depends on DVB_CORE
8 default n
9 help
10 If you say Y here, the DVB subsystem will use dynamic minor
11 allocation for any device that uses the DVB major number.
12 This means that you can have more than 4 of a single type
13 of device (like demuxes and frontends) per adapter, but udev
14 will be required to manage the device nodes.
15
16 If you are unsure about this, say N here.
17
5menuconfig DVB_CAPTURE_DRIVERS 18menuconfig DVB_CAPTURE_DRIVERS
6 bool "DVB/ATSC adapters" 19 bool "DVB/ATSC adapters"
7 depends on DVB_CORE 20 depends on DVB_CORE
diff --git a/drivers/media/dvb/b2c2/Kconfig b/drivers/media/dvb/b2c2/Kconfig
index b34301d56cd2..a8c6249c4099 100644
--- a/drivers/media/dvb/b2c2/Kconfig
+++ b/drivers/media/dvb/b2c2/Kconfig
@@ -14,6 +14,7 @@ config DVB_B2C2_FLEXCOP
14 select DVB_ISL6421 if !DVB_FE_CUSTOMISE 14 select DVB_ISL6421 if !DVB_FE_CUSTOMISE
15 select DVB_CX24123 if !DVB_FE_CUSTOMISE 15 select DVB_CX24123 if !DVB_FE_CUSTOMISE
16 select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE 16 select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE
17 select DVB_TUNER_CX24113 if !DVB_FE_CUSTOMISE
17 help 18 help
18 Support for the digital TV receiver chip made by B2C2 Inc. included in 19 Support for the digital TV receiver chip made by B2C2 Inc. included in
19 Technisats PCI cards and USB boxes. 20 Technisats PCI cards and USB boxes.
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index a21ce9edcc7e..f48f73aff195 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -19,7 +19,6 @@
19 * 19 *
20 */ 20 */
21 21
22#include <linux/version.h>
23#include <linux/i2c.h> 22#include <linux/i2c.h>
24#include <linux/init.h> 23#include <linux/init.h>
25#include <linux/kernel.h> 24#include <linux/kernel.h>
@@ -368,7 +367,7 @@ static int __devinit dm1105dvb_dma_map(struct dm1105dvb *dm1105dvb)
368{ 367{
369 dm1105dvb->ts_buf = pci_alloc_consistent(dm1105dvb->pdev, 6*DM1105_DMA_BYTES, &dm1105dvb->dma_addr); 368 dm1105dvb->ts_buf = pci_alloc_consistent(dm1105dvb->pdev, 6*DM1105_DMA_BYTES, &dm1105dvb->dma_addr);
370 369
371 return pci_dma_mapping_error(dm1105dvb->pdev, dm1105dvb->dma_addr); 370 return !dm1105dvb->ts_buf;
372} 371}
373 372
374static void dm1105dvb_dma_unmap(struct dm1105dvb *dm1105dvb) 373static void dm1105dvb_dma_unmap(struct dm1105dvb *dm1105dvb)
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 7a421e9dba5a..171f9ca124f7 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -128,6 +128,7 @@ struct dvb_frontend_private {
128 unsigned int step_size; 128 unsigned int step_size;
129 int quality; 129 int quality;
130 unsigned int check_wrapped; 130 unsigned int check_wrapped;
131 enum dvbfe_search algo_status;
131}; 132};
132 133
133static void dvb_frontend_wakeup(struct dvb_frontend *fe); 134static void dvb_frontend_wakeup(struct dvb_frontend *fe);
@@ -516,6 +517,8 @@ static int dvb_frontend_thread(void *data)
516 struct dvb_frontend_private *fepriv = fe->frontend_priv; 517 struct dvb_frontend_private *fepriv = fe->frontend_priv;
517 unsigned long timeout; 518 unsigned long timeout;
518 fe_status_t s; 519 fe_status_t s;
520 enum dvbfe_algo algo;
521
519 struct dvb_frontend_parameters *params; 522 struct dvb_frontend_parameters *params;
520 523
521 dprintk("%s\n", __func__); 524 dprintk("%s\n", __func__);
@@ -562,23 +565,80 @@ restart:
562 565
563 /* do an iteration of the tuning loop */ 566 /* do an iteration of the tuning loop */
564 if (fe->ops.get_frontend_algo) { 567 if (fe->ops.get_frontend_algo) {
565 if (fe->ops.get_frontend_algo(fe) == FE_ALGO_HW) { 568 algo = fe->ops.get_frontend_algo(fe);
566 /* have we been asked to retune? */ 569 switch (algo) {
567 params = NULL; 570 case DVBFE_ALGO_HW:
571 dprintk("%s: Frontend ALGO = DVBFE_ALGO_HW\n", __func__);
572 params = NULL; /* have we been asked to RETUNE ? */
573
568 if (fepriv->state & FESTATE_RETUNE) { 574 if (fepriv->state & FESTATE_RETUNE) {
575 dprintk("%s: Retune requested, FESTATE_RETUNE\n", __func__);
569 params = &fepriv->parameters; 576 params = &fepriv->parameters;
570 fepriv->state = FESTATE_TUNED; 577 fepriv->state = FESTATE_TUNED;
571 } 578 }
572 579
573 fe->ops.tune(fe, params, fepriv->tune_mode_flags, &fepriv->delay, &s); 580 if (fe->ops.tune)
574 if (s != fepriv->status) { 581 fe->ops.tune(fe, params, fepriv->tune_mode_flags, &fepriv->delay, &s);
582
583 if (s != fepriv->status && !(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT)) {
584 dprintk("%s: state changed, adding current state\n", __func__);
575 dvb_frontend_add_event(fe, s); 585 dvb_frontend_add_event(fe, s);
576 fepriv->status = s; 586 fepriv->status = s;
577 } 587 }
578 } else 588 break;
589 case DVBFE_ALGO_SW:
590 dprintk("%s: Frontend ALGO = DVBFE_ALGO_SW\n", __func__);
579 dvb_frontend_swzigzag(fe); 591 dvb_frontend_swzigzag(fe);
580 } else 592 break;
593 case DVBFE_ALGO_CUSTOM:
594 params = NULL; /* have we been asked to RETUNE ? */
595 dprintk("%s: Frontend ALGO = DVBFE_ALGO_CUSTOM, state=%d\n", __func__, fepriv->state);
596 if (fepriv->state & FESTATE_RETUNE) {
597 dprintk("%s: Retune requested, FESTAT_RETUNE\n", __func__);
598 params = &fepriv->parameters;
599 fepriv->state = FESTATE_TUNED;
600 }
601 /* Case where we are going to search for a carrier
602 * User asked us to retune again for some reason, possibly
603 * requesting a search with a new set of parameters
604 */
605 if (fepriv->algo_status & DVBFE_ALGO_SEARCH_AGAIN) {
606 if (fe->ops.search) {
607 fepriv->algo_status = fe->ops.search(fe, &fepriv->parameters);
608 /* We did do a search as was requested, the flags are
609 * now unset as well and has the flags wrt to search.
610 */
611 } else {
612 fepriv->algo_status &= ~DVBFE_ALGO_SEARCH_AGAIN;
613 }
614 }
615 /* Track the carrier if the search was successful */
616 if (fepriv->algo_status == DVBFE_ALGO_SEARCH_SUCCESS) {
617 if (fe->ops.track)
618 fe->ops.track(fe, &fepriv->parameters);
619 } else {
620 fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN;
621 fepriv->delay = HZ / 2;
622 }
623 fe->ops.read_status(fe, &s);
624 if (s != fepriv->status) {
625 dvb_frontend_add_event(fe, s); /* update event list */
626 fepriv->status = s;
627 if (!(s & FE_HAS_LOCK)) {
628 fepriv->delay = HZ / 10;
629 fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN;
630 } else {
631 fepriv->delay = 60 * HZ;
632 }
633 }
634 break;
635 default:
636 dprintk("%s: UNDEFINED ALGO !\n", __func__);
637 break;
638 }
639 } else {
581 dvb_frontend_swzigzag(fe); 640 dvb_frontend_swzigzag(fe);
641 }
582 } 642 }
583 643
584 if (dvb_powerdown_on_sleep) { 644 if (dvb_powerdown_on_sleep) {
@@ -1226,6 +1286,9 @@ int dtv_property_process_set(struct dvb_frontend *fe, struct dtv_property *tvp,
1226 dprintk("%s() Finalised property cache\n", __func__); 1286 dprintk("%s() Finalised property cache\n", __func__);
1227 dtv_property_cache_submit(fe); 1287 dtv_property_cache_submit(fe);
1228 1288
1289 /* Request the search algorithm to search */
1290 fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN;
1291
1229 r |= dvb_frontend_ioctl_legacy(inode, file, FE_SET_FRONTEND, 1292 r |= dvb_frontend_ioctl_legacy(inode, file, FE_SET_FRONTEND,
1230 &fepriv->parameters); 1293 &fepriv->parameters);
1231 break; 1294 break;
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index db4a63b0a32e..e176da472d7a 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -69,6 +69,125 @@ struct analog_parameters {
69 u64 std; 69 u64 std;
70}; 70};
71 71
72enum dvbfe_modcod {
73 DVBFE_MODCOD_DUMMY_PLFRAME = 0,
74 DVBFE_MODCOD_QPSK_1_4,
75 DVBFE_MODCOD_QPSK_1_3,
76 DVBFE_MODCOD_QPSK_2_5,
77 DVBFE_MODCOD_QPSK_1_2,
78 DVBFE_MODCOD_QPSK_3_5,
79 DVBFE_MODCOD_QPSK_2_3,
80 DVBFE_MODCOD_QPSK_3_4,
81 DVBFE_MODCOD_QPSK_4_5,
82 DVBFE_MODCOD_QPSK_5_6,
83 DVBFE_MODCOD_QPSK_8_9,
84 DVBFE_MODCOD_QPSK_9_10,
85 DVBFE_MODCOD_8PSK_3_5,
86 DVBFE_MODCOD_8PSK_2_3,
87 DVBFE_MODCOD_8PSK_3_4,
88 DVBFE_MODCOD_8PSK_5_6,
89 DVBFE_MODCOD_8PSK_8_9,
90 DVBFE_MODCOD_8PSK_9_10,
91 DVBFE_MODCOD_16APSK_2_3,
92 DVBFE_MODCOD_16APSK_3_4,
93 DVBFE_MODCOD_16APSK_4_5,
94 DVBFE_MODCOD_16APSK_5_6,
95 DVBFE_MODCOD_16APSK_8_9,
96 DVBFE_MODCOD_16APSK_9_10,
97 DVBFE_MODCOD_32APSK_3_4,
98 DVBFE_MODCOD_32APSK_4_5,
99 DVBFE_MODCOD_32APSK_5_6,
100 DVBFE_MODCOD_32APSK_8_9,
101 DVBFE_MODCOD_32APSK_9_10,
102 DVBFE_MODCOD_RESERVED_1,
103 DVBFE_MODCOD_BPSK_1_3,
104 DVBFE_MODCOD_BPSK_1_4,
105 DVBFE_MODCOD_RESERVED_2
106};
107
108enum tuner_param {
109 DVBFE_TUNER_FREQUENCY = (1 << 0),
110 DVBFE_TUNER_TUNERSTEP = (1 << 1),
111 DVBFE_TUNER_IFFREQ = (1 << 2),
112 DVBFE_TUNER_BANDWIDTH = (1 << 3),
113 DVBFE_TUNER_REFCLOCK = (1 << 4),
114 DVBFE_TUNER_IQSENSE = (1 << 5),
115 DVBFE_TUNER_DUMMY = (1 << 31)
116};
117
118/*
119 * ALGO_HW: (Hardware Algorithm)
120 * ----------------------------------------------------------------
121 * Devices that support this algorithm do everything in hardware
122 * and no software support is needed to handle them.
123 * Requesting these devices to LOCK is the only thing required,
124 * device is supposed to do everything in the hardware.
125 *
126 * ALGO_SW: (Software Algorithm)
127 * ----------------------------------------------------------------
128 * These are dumb devices, that require software to do everything
129 *
130 * ALGO_CUSTOM: (Customizable Agorithm)
131 * ----------------------------------------------------------------
132 * Devices having this algorithm can be customized to have specific
133 * algorithms in the frontend driver, rather than simply doing a
134 * software zig-zag. In this case the zigzag maybe hardware assisted
135 * or it maybe completely done in hardware. In all cases, usage of
136 * this algorithm, in conjunction with the search and track
137 * callbacks, utilizes the driver specific algorithm.
138 *
139 * ALGO_RECOVERY: (Recovery Algorithm)
140 * ----------------------------------------------------------------
141 * These devices have AUTO recovery capabilities from LOCK failure
142 */
143enum dvbfe_algo {
144 DVBFE_ALGO_HW = (1 << 0),
145 DVBFE_ALGO_SW = (1 << 1),
146 DVBFE_ALGO_CUSTOM = (1 << 2),
147 DVBFE_ALGO_RECOVERY = (1 << 31)
148};
149
150struct tuner_state {
151 u32 frequency;
152 u32 tunerstep;
153 u32 ifreq;
154 u32 bandwidth;
155 u32 iqsense;
156 u32 refclock;
157};
158
159/*
160 * search callback possible return status
161 *
162 * DVBFE_ALGO_SEARCH_SUCCESS
163 * The frontend search algorithm completed and returned succesfully
164 *
165 * DVBFE_ALGO_SEARCH_ASLEEP
166 * The frontend search algorithm is sleeping
167 *
168 * DVBFE_ALGO_SEARCH_FAILED
169 * The frontend search for a signal failed
170 *
171 * DVBFE_ALGO_SEARCH_INVALID
172 * The frontend search algorith was probably supplied with invalid
173 * parameters and the search is an invalid one
174 *
175 * DVBFE_ALGO_SEARCH_ERROR
176 * The frontend search algorithm failed due to some error
177 *
178 * DVBFE_ALGO_SEARCH_AGAIN
179 * The frontend search algorithm was requested to search again
180 */
181enum dvbfe_search {
182 DVBFE_ALGO_SEARCH_SUCCESS = (1 << 0),
183 DVBFE_ALGO_SEARCH_ASLEEP = (1 << 1),
184 DVBFE_ALGO_SEARCH_FAILED = (1 << 2),
185 DVBFE_ALGO_SEARCH_INVALID = (1 << 3),
186 DVBFE_ALGO_SEARCH_AGAIN = (1 << 4),
187 DVBFE_ALGO_SEARCH_ERROR = (1 << 31),
188};
189
190
72struct dvb_tuner_ops { 191struct dvb_tuner_ops {
73 192
74 struct dvb_tuner_info info; 193 struct dvb_tuner_info info;
@@ -99,6 +218,13 @@ struct dvb_tuner_ops {
99 * tuners which require sophisticated tuning loops, controlling each parameter seperately. */ 218 * tuners which require sophisticated tuning loops, controlling each parameter seperately. */
100 int (*set_frequency)(struct dvb_frontend *fe, u32 frequency); 219 int (*set_frequency)(struct dvb_frontend *fe, u32 frequency);
101 int (*set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth); 220 int (*set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth);
221
222 /*
223 * These are provided seperately from set_params in order to facilitate silicon
224 * tuners which require sophisticated tuning loops, controlling each parameter seperately.
225 */
226 int (*set_state)(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *state);
227 int (*get_state)(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *state);
102}; 228};
103 229
104struct analog_demod_info { 230struct analog_demod_info {
@@ -142,7 +268,7 @@ struct dvb_frontend_ops {
142 unsigned int *delay, 268 unsigned int *delay,
143 fe_status_t *status); 269 fe_status_t *status);
144 /* get frontend tuning algorithm from the module */ 270 /* get frontend tuning algorithm from the module */
145 int (*get_frontend_algo)(struct dvb_frontend *fe); 271 enum dvbfe_algo (*get_frontend_algo)(struct dvb_frontend *fe);
146 272
147 /* these two are only used for the swzigzag code */ 273 /* these two are only used for the swzigzag code */
148 int (*set_frontend)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params); 274 int (*set_frontend)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params);
@@ -167,6 +293,12 @@ struct dvb_frontend_ops {
167 int (*i2c_gate_ctrl)(struct dvb_frontend* fe, int enable); 293 int (*i2c_gate_ctrl)(struct dvb_frontend* fe, int enable);
168 int (*ts_bus_ctrl)(struct dvb_frontend* fe, int acquire); 294 int (*ts_bus_ctrl)(struct dvb_frontend* fe, int acquire);
169 295
296 /* These callbacks are for devices that implement their own
297 * tuning algorithms, rather than a simple swzigzag
298 */
299 enum dvbfe_search (*search)(struct dvb_frontend *fe, struct dvb_frontend_parameters *p);
300 int (*track)(struct dvb_frontend *fe, struct dvb_frontend_parameters *p);
301
170 struct dvb_tuner_ops tuner_ops; 302 struct dvb_tuner_ops tuner_ops;
171 struct analog_demod_ops analog_ops; 303 struct analog_demod_ops analog_ops;
172 304
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index a113744a56cc..6c571d9f011c 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -50,33 +50,27 @@ static const char * const dnames[] = {
50 "net", "osd" 50 "net", "osd"
51}; 51};
52 52
53#ifdef CONFIG_DVB_DYNAMIC_MINORS
54#define MAX_DVB_MINORS 256
55#define DVB_MAX_IDS MAX_DVB_MINORS
56#else
53#define DVB_MAX_IDS 4 57#define DVB_MAX_IDS 4
54#define nums2minor(num,type,id) ((num << 6) | (id << 4) | type) 58#define nums2minor(num,type,id) ((num << 6) | (id << 4) | type)
55#define MAX_DVB_MINORS (DVB_MAX_ADAPTERS*64) 59#define MAX_DVB_MINORS (DVB_MAX_ADAPTERS*64)
60#endif
56 61
57static struct class *dvb_class; 62static struct class *dvb_class;
58 63
59static struct dvb_device* dvbdev_find_device (int minor) 64static struct dvb_device *dvb_minors[MAX_DVB_MINORS];
60{ 65static DECLARE_RWSEM(minor_rwsem);
61 struct dvb_adapter *adap;
62
63 list_for_each_entry(adap, &dvb_adapter_list, list_head) {
64 struct dvb_device *dev;
65 list_for_each_entry(dev, &adap->device_list, list_head)
66 if (nums2minor(adap->num, dev->type, dev->id) == minor)
67 return dev;
68 }
69
70 return NULL;
71}
72
73 66
74static int dvb_device_open(struct inode *inode, struct file *file) 67static int dvb_device_open(struct inode *inode, struct file *file)
75{ 68{
76 struct dvb_device *dvbdev; 69 struct dvb_device *dvbdev;
77 70
78 lock_kernel(); 71 lock_kernel();
79 dvbdev = dvbdev_find_device (iminor(inode)); 72 down_read(&minor_rwsem);
73 dvbdev = dvb_minors[iminor(inode)];
80 74
81 if (dvbdev && dvbdev->fops) { 75 if (dvbdev && dvbdev->fops) {
82 int err = 0; 76 int err = 0;
@@ -92,9 +86,11 @@ static int dvb_device_open(struct inode *inode, struct file *file)
92 file->f_op = fops_get(old_fops); 86 file->f_op = fops_get(old_fops);
93 } 87 }
94 fops_put(old_fops); 88 fops_put(old_fops);
89 up_read(&minor_rwsem);
95 unlock_kernel(); 90 unlock_kernel();
96 return err; 91 return err;
97 } 92 }
93 up_read(&minor_rwsem);
98 unlock_kernel(); 94 unlock_kernel();
99 return -ENODEV; 95 return -ENODEV;
100} 96}
@@ -192,6 +188,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
192 struct dvb_device *dvbdev; 188 struct dvb_device *dvbdev;
193 struct file_operations *dvbdevfops; 189 struct file_operations *dvbdevfops;
194 struct device *clsdev; 190 struct device *clsdev;
191 int minor;
195 int id; 192 int id;
196 193
197 mutex_lock(&dvbdev_register_lock); 194 mutex_lock(&dvbdev_register_lock);
@@ -231,11 +228,31 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
231 228
232 list_add_tail (&dvbdev->list_head, &adap->device_list); 229 list_add_tail (&dvbdev->list_head, &adap->device_list);
233 230
231 down_write(&minor_rwsem);
232#ifdef CONFIG_DVB_DYNAMIC_MINORS
233 for (minor = 0; minor < MAX_DVB_MINORS; minor++)
234 if (dvb_minors[minor] == NULL)
235 break;
236
237 if (minor == MAX_DVB_MINORS) {
238 kfree(dvbdevfops);
239 kfree(dvbdev);
240 mutex_unlock(&dvbdev_register_lock);
241 return -EINVAL;
242 }
243#else
244 minor = nums2minor(adap->num, type, id);
245#endif
246
247 dvbdev->minor = minor;
248 dvb_minors[minor] = dvbdev;
249 up_write(&minor_rwsem);
250
234 mutex_unlock(&dvbdev_register_lock); 251 mutex_unlock(&dvbdev_register_lock);
235 252
236 clsdev = device_create(dvb_class, adap->device, 253 clsdev = device_create(dvb_class, adap->device,
237 MKDEV(DVB_MAJOR, nums2minor(adap->num, type, id)), 254 MKDEV(DVB_MAJOR, minor),
238 NULL, "dvb%d.%s%d", adap->num, dnames[type], id); 255 dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
239 if (IS_ERR(clsdev)) { 256 if (IS_ERR(clsdev)) {
240 printk(KERN_ERR "%s: failed to create device dvb%d.%s%d (%ld)\n", 257 printk(KERN_ERR "%s: failed to create device dvb%d.%s%d (%ld)\n",
241 __func__, adap->num, dnames[type], id, PTR_ERR(clsdev)); 258 __func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
@@ -243,8 +260,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
243 } 260 }
244 261
245 dprintk(KERN_DEBUG "DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n", 262 dprintk(KERN_DEBUG "DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
246 adap->num, dnames[type], id, nums2minor(adap->num, type, id), 263 adap->num, dnames[type], id, minor, minor);
247 nums2minor(adap->num, type, id));
248 264
249 return 0; 265 return 0;
250} 266}
@@ -256,8 +272,11 @@ void dvb_unregister_device(struct dvb_device *dvbdev)
256 if (!dvbdev) 272 if (!dvbdev)
257 return; 273 return;
258 274
259 device_destroy(dvb_class, MKDEV(DVB_MAJOR, nums2minor(dvbdev->adapter->num, 275 down_write(&minor_rwsem);
260 dvbdev->type, dvbdev->id))); 276 dvb_minors[dvbdev->minor] = NULL;
277 up_write(&minor_rwsem);
278
279 device_destroy(dvb_class, MKDEV(DVB_MAJOR, dvbdev->minor));
261 280
262 list_del (&dvbdev->list_head); 281 list_del (&dvbdev->list_head);
263 kfree (dvbdev->fops); 282 kfree (dvbdev->fops);
@@ -413,6 +432,15 @@ out:
413 return err; 432 return err;
414} 433}
415 434
435static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env)
436{
437 struct dvb_device *dvbdev = dev_get_drvdata(dev);
438
439 add_uevent_var(env, "DVB_DEVICE_NUM=%d", dvbdev->id);
440 add_uevent_var(env, "DVB_ADAPTER_NUM=%d", dvbdev->adapter->num);
441 return 0;
442}
443
416static int __init init_dvbdev(void) 444static int __init init_dvbdev(void)
417{ 445{
418 int retval; 446 int retval;
@@ -434,6 +462,7 @@ static int __init init_dvbdev(void)
434 retval = PTR_ERR(dvb_class); 462 retval = PTR_ERR(dvb_class);
435 goto error; 463 goto error;
436 } 464 }
465 dvb_class->dev_uevent = dvb_uevent;
437 return 0; 466 return 0;
438 467
439error: 468error:
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h
index 574e336bac35..dca49cf962e8 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.h
+++ b/drivers/media/dvb/dvb-core/dvbdev.h
@@ -74,6 +74,7 @@ struct dvb_device {
74 struct file_operations *fops; 74 struct file_operations *fops;
75 struct dvb_adapter *adapter; 75 struct dvb_adapter *adapter;
76 int type; 76 int type;
77 int minor;
77 u32 id; 78 u32 id;
78 79
79 /* in theory, 'users' can vanish now, 80 /* in theory, 'users' can vanish now,
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index e9ab0249d133..e1e9aa5c6b84 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -733,9 +733,19 @@ static int af9015_read_config(struct usb_device *udev)
733 af9015_config.ir_table_size = 733 af9015_config.ir_table_size =
734 ARRAY_SIZE(af9015_ir_table_mygictv); 734 ARRAY_SIZE(af9015_ir_table_mygictv);
735 break; 735 break;
736 case AF9015_REMOTE_DIGITTRADE_DVB_T:
737 af9015_properties[i].rc_key_map =
738 af9015_rc_keys_digittrade;
739 af9015_properties[i].rc_key_map_size =
740 ARRAY_SIZE(af9015_rc_keys_digittrade);
741 af9015_config.ir_table =
742 af9015_ir_table_digittrade;
743 af9015_config.ir_table_size =
744 ARRAY_SIZE(af9015_ir_table_digittrade);
745 break;
736 } 746 }
737 } else { 747 } else {
738 switch (udev->descriptor.idVendor) { 748 switch (le16_to_cpu(udev->descriptor.idVendor)) {
739 case USB_VID_LEADTEK: 749 case USB_VID_LEADTEK:
740 af9015_properties[i].rc_key_map = 750 af9015_properties[i].rc_key_map =
741 af9015_rc_keys_leadtek; 751 af9015_rc_keys_leadtek;
@@ -748,7 +758,7 @@ static int af9015_read_config(struct usb_device *udev)
748 break; 758 break;
749 case USB_VID_VISIONPLUS: 759 case USB_VID_VISIONPLUS:
750 if (udev->descriptor.idProduct == 760 if (udev->descriptor.idProduct ==
751 USB_PID_AZUREWAVE_AD_TU700) { 761 cpu_to_le16(USB_PID_AZUREWAVE_AD_TU700)) {
752 af9015_properties[i].rc_key_map = 762 af9015_properties[i].rc_key_map =
753 af9015_rc_keys_twinhan; 763 af9015_rc_keys_twinhan;
754 af9015_properties[i].rc_key_map_size = 764 af9015_properties[i].rc_key_map_size =
@@ -800,6 +810,16 @@ static int af9015_read_config(struct usb_device *udev)
800 ARRAY_SIZE(af9015_ir_table_msi); 810 ARRAY_SIZE(af9015_ir_table_msi);
801 } 811 }
802 break; 812 break;
813 case USB_VID_AVERMEDIA:
814 af9015_properties[i].rc_key_map =
815 af9015_rc_keys_avermedia;
816 af9015_properties[i].rc_key_map_size =
817 ARRAY_SIZE(af9015_rc_keys_avermedia);
818 af9015_config.ir_table =
819 af9015_ir_table_avermedia;
820 af9015_config.ir_table_size =
821 ARRAY_SIZE(af9015_ir_table_avermedia);
822 break;
803 } 823 }
804 } 824 }
805 } 825 }
@@ -1191,6 +1211,7 @@ static struct usb_device_id af9015_usb_table[] = {
1191 {USB_DEVICE(USB_VID_TELESTAR, USB_PID_TELESTAR_STARSTICK_2)}, 1211 {USB_DEVICE(USB_VID_TELESTAR, USB_PID_TELESTAR_STARSTICK_2)},
1192 {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A309)}, 1212 {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A309)},
1193/* 15 */{USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGI_VOX_MINI_III)}, 1213/* 15 */{USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGI_VOX_MINI_III)},
1214 {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U)},
1194 {0}, 1215 {0},
1195}; 1216};
1196MODULE_DEVICE_TABLE(usb, af9015_usb_table); 1217MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1343,7 +1364,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
1343 1364
1344 .i2c_algo = &af9015_i2c_algo, 1365 .i2c_algo = &af9015_i2c_algo,
1345 1366
1346 .num_device_descs = 6, 1367 .num_device_descs = 7,
1347 .devices = { 1368 .devices = {
1348 { 1369 {
1349 .name = "Xtensions XD-380", 1370 .name = "Xtensions XD-380",
@@ -1375,6 +1396,12 @@ static struct dvb_usb_device_properties af9015_properties[] = {
1375 .cold_ids = {&af9015_usb_table[15], NULL}, 1396 .cold_ids = {&af9015_usb_table[15], NULL},
1376 .warm_ids = {NULL}, 1397 .warm_ids = {NULL},
1377 }, 1398 },
1399 {
1400 .name = "KWorld USB DVB-T TV Stick II " \
1401 "(VS-DVB-T 395U)",
1402 .cold_ids = {&af9015_usb_table[16], NULL},
1403 .warm_ids = {NULL},
1404 },
1378 } 1405 }
1379 } 1406 }
1380}; 1407};
diff --git a/drivers/media/dvb/dvb-usb/af9015.h b/drivers/media/dvb/dvb-usb/af9015.h
index 6c3c97293316..21c7782f4889 100644
--- a/drivers/media/dvb/dvb-usb/af9015.h
+++ b/drivers/media/dvb/dvb-usb/af9015.h
@@ -123,6 +123,7 @@ enum af9015_remote {
123 AF9015_REMOTE_A_LINK_DTU_M, 123 AF9015_REMOTE_A_LINK_DTU_M,
124 AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3, 124 AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3,
125 AF9015_REMOTE_MYGICTV_U718, 125 AF9015_REMOTE_MYGICTV_U718,
126 AF9015_REMOTE_DIGITTRADE_DVB_T,
126}; 127};
127 128
128/* Leadtek WinFast DTV Dongle Gold */ 129/* Leadtek WinFast DTV Dongle Gold */
@@ -520,4 +521,143 @@ static u8 af9015_ir_table_kworld[] = {
520 0x86, 0x6b, 0x23, 0xdc, 0x45, 0x07, 0x00, 521 0x86, 0x6b, 0x23, 0xdc, 0x45, 0x07, 0x00,
521}; 522};
522 523
524/* AverMedia Volar X */
525static struct dvb_usb_rc_key af9015_rc_keys_avermedia[] = {
526 { 0x05, 0x3d, KEY_PROG1 }, /* SOURCE */
527 { 0x05, 0x12, KEY_POWER }, /* POWER */
528 { 0x05, 0x1e, KEY_1 }, /* 1 */
529 { 0x05, 0x1f, KEY_2 }, /* 2 */
530 { 0x05, 0x20, KEY_3 }, /* 3 */
531 { 0x05, 0x21, KEY_4 }, /* 4 */
532 { 0x05, 0x22, KEY_5 }, /* 5 */
533 { 0x05, 0x23, KEY_6 }, /* 6 */
534 { 0x05, 0x24, KEY_7 }, /* 7 */
535 { 0x05, 0x25, KEY_8 }, /* 8 */
536 { 0x05, 0x26, KEY_9 }, /* 9 */
537 { 0x05, 0x3f, KEY_LEFT }, /* L / DISPLAY */
538 { 0x05, 0x27, KEY_0 }, /* 0 */
539 { 0x05, 0x0f, KEY_RIGHT }, /* R / CH RTN */
540 { 0x05, 0x18, KEY_PROG2 }, /* SNAP SHOT */
541 { 0x05, 0x1c, KEY_PROG3 }, /* 16-CH PREV */
542 { 0x05, 0x2d, KEY_VOLUMEDOWN }, /* VOL DOWN */
543 { 0x05, 0x3e, KEY_ZOOM }, /* FULL SCREEN */
544 { 0x05, 0x2e, KEY_VOLUMEUP }, /* VOL UP */
545 { 0x05, 0x10, KEY_MUTE }, /* MUTE */
546 { 0x05, 0x04, KEY_AUDIO }, /* AUDIO */
547 { 0x05, 0x15, KEY_RECORD }, /* RECORD */
548 { 0x05, 0x11, KEY_PLAY }, /* PLAY */
549 { 0x05, 0x16, KEY_STOP }, /* STOP */
550 { 0x05, 0x0c, KEY_PLAYPAUSE }, /* TIMESHIFT / PAUSE */
551 { 0x05, 0x05, KEY_BACK }, /* << / RED */
552 { 0x05, 0x09, KEY_FORWARD }, /* >> / YELLOW */
553 { 0x05, 0x17, KEY_TEXT }, /* TELETEXT */
554 { 0x05, 0x0a, KEY_EPG }, /* EPG */
555 { 0x05, 0x13, KEY_MENU }, /* MENU */
556
557 { 0x05, 0x0e, KEY_CHANNELUP }, /* CH UP */
558 { 0x05, 0x0d, KEY_CHANNELDOWN }, /* CH DOWN */
559 { 0x05, 0x19, KEY_FIRST }, /* |<< / GREEN */
560 { 0x05, 0x08, KEY_LAST }, /* >>| / BLUE */
561};
562
563static u8 af9015_ir_table_avermedia[] = {
564 0x02, 0xfd, 0x00, 0xff, 0x12, 0x05, 0x00,
565 0x02, 0xfd, 0x01, 0xfe, 0x3d, 0x05, 0x00,
566 0x02, 0xfd, 0x03, 0xfc, 0x17, 0x05, 0x00,
567 0x02, 0xfd, 0x04, 0xfb, 0x0a, 0x05, 0x00,
568 0x02, 0xfd, 0x05, 0xfa, 0x1e, 0x05, 0x00,
569 0x02, 0xfd, 0x06, 0xf9, 0x1f, 0x05, 0x00,
570 0x02, 0xfd, 0x07, 0xf8, 0x20, 0x05, 0x00,
571 0x02, 0xfd, 0x09, 0xf6, 0x21, 0x05, 0x00,
572 0x02, 0xfd, 0x0a, 0xf5, 0x22, 0x05, 0x00,
573 0x02, 0xfd, 0x0b, 0xf4, 0x23, 0x05, 0x00,
574 0x02, 0xfd, 0x0d, 0xf2, 0x24, 0x05, 0x00,
575 0x02, 0xfd, 0x0e, 0xf1, 0x25, 0x05, 0x00,
576 0x02, 0xfd, 0x0f, 0xf0, 0x26, 0x05, 0x00,
577 0x02, 0xfd, 0x11, 0xee, 0x27, 0x05, 0x00,
578 0x02, 0xfd, 0x08, 0xf7, 0x04, 0x05, 0x00,
579 0x02, 0xfd, 0x0c, 0xf3, 0x3e, 0x05, 0x00,
580 0x02, 0xfd, 0x10, 0xef, 0x1c, 0x05, 0x00,
581 0x02, 0xfd, 0x12, 0xed, 0x3f, 0x05, 0x00,
582 0x02, 0xfd, 0x13, 0xec, 0x0f, 0x05, 0x00,
583 0x02, 0xfd, 0x14, 0xeb, 0x10, 0x05, 0x00,
584 0x02, 0xfd, 0x15, 0xea, 0x13, 0x05, 0x00,
585 0x02, 0xfd, 0x17, 0xe8, 0x18, 0x05, 0x00,
586 0x02, 0xfd, 0x18, 0xe7, 0x11, 0x05, 0x00,
587 0x02, 0xfd, 0x19, 0xe6, 0x15, 0x05, 0x00,
588 0x02, 0xfd, 0x1a, 0xe5, 0x0c, 0x05, 0x00,
589 0x02, 0xfd, 0x1b, 0xe4, 0x16, 0x05, 0x00,
590 0x02, 0xfd, 0x1c, 0xe3, 0x09, 0x05, 0x00,
591 0x02, 0xfd, 0x1d, 0xe2, 0x05, 0x05, 0x00,
592 0x02, 0xfd, 0x1e, 0xe1, 0x2d, 0x05, 0x00,
593 0x02, 0xfd, 0x1f, 0xe0, 0x2e, 0x05, 0x00,
594 0x03, 0xfc, 0x00, 0xff, 0x08, 0x05, 0x00,
595 0x03, 0xfc, 0x01, 0xfe, 0x19, 0x05, 0x00,
596 0x03, 0xfc, 0x02, 0xfd, 0x0d, 0x05, 0x00,
597 0x03, 0xfc, 0x03, 0xfc, 0x0e, 0x05, 0x00,
598};
599
600/* Digittrade DVB-T USB Stick */
601static struct dvb_usb_rc_key af9015_rc_keys_digittrade[] = {
602 { 0x01, 0x0f, KEY_LAST }, /* RETURN */
603 { 0x05, 0x17, KEY_TEXT }, /* TELETEXT */
604 { 0x01, 0x08, KEY_EPG }, /* EPG */
605 { 0x05, 0x13, KEY_POWER }, /* POWER */
606 { 0x01, 0x09, KEY_ZOOM }, /* FULLSCREEN */
607 { 0x00, 0x40, KEY_AUDIO }, /* DUAL SOUND */
608 { 0x00, 0x2c, KEY_PRINT }, /* SNAPSHOT */
609 { 0x05, 0x16, KEY_SUBTITLE }, /* SUBTITLE */
610 { 0x00, 0x52, KEY_CHANNELUP }, /* CH Up */
611 { 0x00, 0x51, KEY_CHANNELDOWN },/* Ch Dn */
612 { 0x00, 0x57, KEY_VOLUMEUP }, /* Vol Up */
613 { 0x00, 0x56, KEY_VOLUMEDOWN }, /* Vol Dn */
614 { 0x01, 0x10, KEY_MUTE }, /* MUTE */
615 { 0x00, 0x27, KEY_0 },
616 { 0x00, 0x1e, KEY_1 },
617 { 0x00, 0x1f, KEY_2 },
618 { 0x00, 0x20, KEY_3 },
619 { 0x00, 0x21, KEY_4 },
620 { 0x00, 0x22, KEY_5 },
621 { 0x00, 0x23, KEY_6 },
622 { 0x00, 0x24, KEY_7 },
623 { 0x00, 0x25, KEY_8 },
624 { 0x00, 0x26, KEY_9 },
625 { 0x01, 0x17, KEY_PLAYPAUSE }, /* TIMESHIFT */
626 { 0x01, 0x15, KEY_RECORD }, /* RECORD */
627 { 0x03, 0x13, KEY_PLAY }, /* PLAY */
628 { 0x01, 0x16, KEY_STOP }, /* STOP */
629 { 0x01, 0x13, KEY_PAUSE }, /* PAUSE */
630};
631
632static u8 af9015_ir_table_digittrade[] = {
633 0x00, 0xff, 0x06, 0xf9, 0x13, 0x05, 0x00,
634 0x00, 0xff, 0x4d, 0xb2, 0x17, 0x01, 0x00,
635 0x00, 0xff, 0x1f, 0xe0, 0x2c, 0x00, 0x00,
636 0x00, 0xff, 0x0a, 0xf5, 0x15, 0x01, 0x00,
637 0x00, 0xff, 0x0e, 0xf1, 0x16, 0x01, 0x00,
638 0x00, 0xff, 0x09, 0xf6, 0x09, 0x01, 0x00,
639 0x00, 0xff, 0x01, 0xfe, 0x08, 0x01, 0x00,
640 0x00, 0xff, 0x05, 0xfa, 0x10, 0x01, 0x00,
641 0x00, 0xff, 0x02, 0xfd, 0x56, 0x00, 0x00,
642 0x00, 0xff, 0x40, 0xbf, 0x57, 0x00, 0x00,
643 0x00, 0xff, 0x19, 0xe6, 0x52, 0x00, 0x00,
644 0x00, 0xff, 0x17, 0xe8, 0x51, 0x00, 0x00,
645 0x00, 0xff, 0x10, 0xef, 0x0f, 0x01, 0x00,
646 0x00, 0xff, 0x54, 0xab, 0x27, 0x00, 0x00,
647 0x00, 0xff, 0x1b, 0xe4, 0x1e, 0x00, 0x00,
648 0x00, 0xff, 0x11, 0xee, 0x1f, 0x00, 0x00,
649 0x00, 0xff, 0x15, 0xea, 0x20, 0x00, 0x00,
650 0x00, 0xff, 0x12, 0xed, 0x21, 0x00, 0x00,
651 0x00, 0xff, 0x16, 0xe9, 0x22, 0x00, 0x00,
652 0x00, 0xff, 0x4c, 0xb3, 0x23, 0x00, 0x00,
653 0x00, 0xff, 0x48, 0xb7, 0x24, 0x00, 0x00,
654 0x00, 0xff, 0x04, 0xfb, 0x25, 0x00, 0x00,
655 0x00, 0xff, 0x00, 0xff, 0x26, 0x00, 0x00,
656 0x00, 0xff, 0x1e, 0xe1, 0x13, 0x03, 0x00,
657 0x00, 0xff, 0x1a, 0xe5, 0x13, 0x01, 0x00,
658 0x00, 0xff, 0x03, 0xfc, 0x17, 0x05, 0x00,
659 0x00, 0xff, 0x0d, 0xf2, 0x16, 0x05, 0x00,
660 0x00, 0xff, 0x1d, 0xe2, 0x40, 0x00, 0x00,
661};
662
523#endif 663#endif
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index cd2edbcaa097..5017f08b14a6 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -153,7 +153,7 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
153 int num) 153 int num)
154{ 154{
155 struct dvb_usb_device *d = i2c_get_adapdata(adap); 155 struct dvb_usb_device *d = i2c_get_adapdata(adap);
156 int ret, inc, i = 0; 156 int ret = 0, inc, i = 0;
157 157
158 if (mutex_lock_interruptible(&d->i2c_mutex) < 0) 158 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
159 return -EAGAIN; 159 return -EAGAIN;
diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-core.c b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
index 3ac9f74e9fbf..80e37a0d0892 100644
--- a/drivers/media/dvb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
@@ -32,7 +32,6 @@
32 32
33/* debug */ 33/* debug */
34int dvb_usb_cinergyt2_debug; 34int dvb_usb_cinergyt2_debug;
35int disable_remote;
36 35
37module_param_named(debug, dvb_usb_cinergyt2_debug, int, 0644); 36module_param_named(debug, dvb_usb_cinergyt2_debug, int, 0644);
38MODULE_PARM_DESC(debug, "set debugging level (1=info, xfer=2, rc=4 " 37MODULE_PARM_DESC(debug, "set debugging level (1=info, xfer=2, rc=4 "
@@ -45,7 +44,7 @@ struct cinergyt2_state {
45}; 44};
46 45
47/* We are missing a release hook with usb_device data */ 46/* We are missing a release hook with usb_device data */
48struct dvb_usb_device *cinergyt2_usb_device; 47static struct dvb_usb_device *cinergyt2_usb_device;
49 48
50static struct dvb_usb_device_properties cinergyt2_properties; 49static struct dvb_usb_device_properties cinergyt2_properties;
51 50
diff --git a/drivers/media/dvb/dvb-usb/cinergyT2.h b/drivers/media/dvb/dvb-usb/cinergyT2.h
index 11d79eb384c8..84efe03771eb 100644
--- a/drivers/media/dvb/dvb-usb/cinergyT2.h
+++ b/drivers/media/dvb/dvb-usb/cinergyT2.h
@@ -70,11 +70,11 @@ struct dvbt_get_status_msg {
70 uint8_t bandwidth; 70 uint8_t bandwidth;
71 uint16_t tps; 71 uint16_t tps;
72 uint8_t flags; 72 uint8_t flags;
73 uint16_t gain; 73 __le16 gain;
74 uint8_t snr; 74 uint8_t snr;
75 uint32_t viterbi_error_rate; 75 __le32 viterbi_error_rate;
76 uint32_t rs_error_rate; 76 uint32_t rs_error_rate;
77 uint32_t uncorrected_block_count; 77 __le32 uncorrected_block_count;
78 uint8_t lock_bits; 78 uint8_t lock_bits;
79 uint8_t prev_lock_bits; 79 uint8_t prev_lock_bits;
80} __attribute__((packed)); 80} __attribute__((packed));
@@ -82,9 +82,9 @@ struct dvbt_get_status_msg {
82 82
83struct dvbt_set_parameters_msg { 83struct dvbt_set_parameters_msg {
84 uint8_t cmd; 84 uint8_t cmd;
85 uint32_t freq; 85 __le32 freq;
86 uint8_t bandwidth; 86 uint8_t bandwidth;
87 uint16_t tps; 87 __le16 tps;
88 uint8_t flags; 88 uint8_t flags;
89} __attribute__((packed)); 89} __attribute__((packed));
90 90
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 7380b94b3b36..a4fca3fca5ee 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -96,6 +96,7 @@
96#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0 96#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0
97#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1 97#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
98#define USB_PID_KWORLD_399U 0xe399 98#define USB_PID_KWORLD_399U 0xe399
99#define USB_PID_KWORLD_395U 0xe396
99#define USB_PID_KWORLD_PC160_2T 0xc160 100#define USB_PID_KWORLD_PC160_2T 0xc160
100#define USB_PID_KWORLD_VSTREAM_COLD 0x17de 101#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
101#define USB_PID_KWORLD_VSTREAM_WARM 0x17df 102#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index 6286fbbe7fb5..c65f273ff313 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -9,7 +9,6 @@
9* 9*
10* see Documentation/dvb/README.dvb-usb for more information 10* see Documentation/dvb/README.dvb-usb for more information
11*/ 11*/
12#include <linux/version.h>
13#include "dw2102.h" 12#include "dw2102.h"
14#include "si21xx.h" 13#include "si21xx.h"
15#include "stv0299.h" 14#include "stv0299.h"
@@ -27,6 +26,10 @@
27#define USB_PID_DW2104 0x2104 26#define USB_PID_DW2104 0x2104
28#endif 27#endif
29 28
29#ifndef USB_PID_CINERGY_S
30#define USB_PID_CINERGY_S 0x0064
31#endif
32
30#define DW210X_READ_MSG 0 33#define DW210X_READ_MSG 0
31#define DW210X_WRITE_MSG 1 34#define DW210X_WRITE_MSG 1
32 35
@@ -578,6 +581,7 @@ static struct usb_device_id dw2102_table[] = {
578 {USB_DEVICE(USB_VID_CYPRESS, 0x2101)}, 581 {USB_DEVICE(USB_VID_CYPRESS, 0x2101)},
579 {USB_DEVICE(USB_VID_CYPRESS, 0x2104)}, 582 {USB_DEVICE(USB_VID_CYPRESS, 0x2104)},
580 {USB_DEVICE(0x9022, 0xd650)}, 583 {USB_DEVICE(0x9022, 0xd650)},
584 {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)},
581 { } 585 { }
582}; 586};
583 587
@@ -647,6 +651,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
647 dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, 651 dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
648 DW210X_WRITE_MSG); 652 DW210X_WRITE_MSG);
649 break; 653 break;
654 case USB_PID_CINERGY_S:
650 case USB_PID_DW2102: 655 case USB_PID_DW2102:
651 dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, 656 dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
652 DW210X_WRITE_MSG); 657 DW210X_WRITE_MSG);
@@ -655,7 +660,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
655 /* check STV0299 frontend */ 660 /* check STV0299 frontend */
656 dw210x_op_rw(dev, 0xb5, 0, 0, &reset16[0], 2, 661 dw210x_op_rw(dev, 0xb5, 0, 0, &reset16[0], 2,
657 DW210X_READ_MSG); 662 DW210X_READ_MSG);
658 if (reset16[0] == 0xa1) { 663 if ((reset16[0] == 0xa1) || (reset16[0] == 0x80)) {
659 dw2102_properties.i2c_algo = &dw2102_i2c_algo; 664 dw2102_properties.i2c_algo = &dw2102_i2c_algo;
660 dw2102_properties.adapter->tuner_attach = &dw2102_tuner_attach; 665 dw2102_properties.adapter->tuner_attach = &dw2102_tuner_attach;
661 break; 666 break;
@@ -726,7 +731,7 @@ static struct dvb_usb_device_properties dw2102_properties = {
726 }, 731 },
727 } 732 }
728 }, 733 },
729 .num_device_descs = 2, 734 .num_device_descs = 3,
730 .devices = { 735 .devices = {
731 {"DVBWorld DVB-S 2102 USB2.0", 736 {"DVBWorld DVB-S 2102 USB2.0",
732 {&dw2102_table[0], NULL}, 737 {&dw2102_table[0], NULL},
@@ -736,6 +741,10 @@ static struct dvb_usb_device_properties dw2102_properties = {
736 {&dw2102_table[1], NULL}, 741 {&dw2102_table[1], NULL},
737 {NULL}, 742 {NULL},
738 }, 743 },
744 {"TerraTec Cinergy S USB",
745 {&dw2102_table[4], NULL},
746 {NULL},
747 },
739 } 748 }
740}; 749};
741 750
diff --git a/drivers/media/dvb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
index 262a858c3068..20eadf9318e0 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
@@ -25,6 +25,20 @@ struct gp8psk_fe_state {
25 unsigned long status_check_interval; 25 unsigned long status_check_interval;
26}; 26};
27 27
28static int gp8psk_tuned_to_DCII(struct dvb_frontend *fe)
29{
30 struct gp8psk_fe_state *st = fe->demodulator_priv;
31 u8 status;
32 gp8psk_usb_in_op(st->d, GET_8PSK_CONFIG, 0, 0, &status, 1);
33 return status & bmDCtuned;
34}
35
36static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode)
37{
38 struct gp8psk_fe_state *state = fe->demodulator_priv;
39 return gp8psk_usb_out_op(state->d, SET_8PSK_CONFIG, mode, 0, NULL, 0);
40}
41
28static int gp8psk_fe_update_status(struct gp8psk_fe_state *st) 42static int gp8psk_fe_update_status(struct gp8psk_fe_state *st)
29{ 43{
30 u8 buf[6]; 44 u8 buf[6];
@@ -99,39 +113,114 @@ static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_front
99 return 0; 113 return 0;
100} 114}
101 115
116static int gp8psk_fe_set_property(struct dvb_frontend *fe,
117 struct dtv_property *tvp)
118{
119 deb_fe("%s(..)\n", __func__);
120 return 0;
121}
122
123static int gp8psk_fe_get_property(struct dvb_frontend *fe,
124 struct dtv_property *tvp)
125{
126 deb_fe("%s(..)\n", __func__);
127 return 0;
128}
129
130
102static int gp8psk_fe_set_frontend(struct dvb_frontend* fe, 131static int gp8psk_fe_set_frontend(struct dvb_frontend* fe,
103 struct dvb_frontend_parameters *fep) 132 struct dvb_frontend_parameters *fep)
104{ 133{
105 struct gp8psk_fe_state *state = fe->demodulator_priv; 134 struct gp8psk_fe_state *state = fe->demodulator_priv;
135 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
106 u8 cmd[10]; 136 u8 cmd[10];
107 u32 freq = fep->frequency * 1000; 137 u32 freq = fep->frequency * 1000;
138 int gp_product_id = le16_to_cpu(state->d->udev->descriptor.idProduct);
139
140 deb_fe("%s()\n", __func__);
108 141
109 cmd[4] = freq & 0xff; 142 cmd[4] = freq & 0xff;
110 cmd[5] = (freq >> 8) & 0xff; 143 cmd[5] = (freq >> 8) & 0xff;
111 cmd[6] = (freq >> 16) & 0xff; 144 cmd[6] = (freq >> 16) & 0xff;
112 cmd[7] = (freq >> 24) & 0xff; 145 cmd[7] = (freq >> 24) & 0xff;
113 146
114 switch(fe->ops.info.type) { 147 switch (c->delivery_system) {
115 case FE_QPSK: 148 case SYS_DVBS:
116 cmd[0] = fep->u.qpsk.symbol_rate & 0xff; 149 /* Only QPSK is supported for DVB-S */
117 cmd[1] = (fep->u.qpsk.symbol_rate >> 8) & 0xff; 150 if (c->modulation != QPSK) {
118 cmd[2] = (fep->u.qpsk.symbol_rate >> 16) & 0xff; 151 deb_fe("%s: unsupported modulation selected (%d)\n",
119 cmd[3] = (fep->u.qpsk.symbol_rate >> 24) & 0xff; 152 __func__, c->modulation);
120 cmd[8] = ADV_MOD_DVB_QPSK; 153 return -EOPNOTSUPP;
121 cmd[9] = 0x03; /*ADV_MOD_FEC_XXX*/ 154 }
155 c->fec_inner = FEC_AUTO;
122 break; 156 break;
157 case SYS_DVBS2:
158 deb_fe("%s: DVB-S2 delivery system selected\n", __func__);
159 break;
160
123 default: 161 default:
124 // other modes are unsuported right now 162 deb_fe("%s: unsupported delivery system selected (%d)\n",
125 cmd[0] = 0; 163 __func__, c->delivery_system);
126 cmd[1] = 0; 164 return -EOPNOTSUPP;
127 cmd[2] = 0; 165 }
128 cmd[3] = 0; 166
129 cmd[8] = 0; 167 cmd[0] = c->symbol_rate & 0xff;
168 cmd[1] = (c->symbol_rate >> 8) & 0xff;
169 cmd[2] = (c->symbol_rate >> 16) & 0xff;
170 cmd[3] = (c->symbol_rate >> 24) & 0xff;
171 switch (c->modulation) {
172 case QPSK:
173 if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM)
174 if (gp8psk_tuned_to_DCII(fe))
175 gp8psk_bcm4500_reload(state->d);
176 switch (c->fec_inner) {
177 case FEC_1_2:
178 cmd[9] = 0; break;
179 case FEC_2_3:
180 cmd[9] = 1; break;
181 case FEC_3_4:
182 cmd[9] = 2; break;
183 case FEC_5_6:
184 cmd[9] = 3; break;
185 case FEC_7_8:
186 cmd[9] = 4; break;
187 case FEC_AUTO:
188 cmd[9] = 5; break;
189 default:
190 cmd[9] = 5; break;
191 }
192 cmd[8] = ADV_MOD_DVB_QPSK;
193 break;
194 case PSK_8: /* PSK_8 is for compatibility with DN */
195 cmd[8] = ADV_MOD_TURBO_8PSK;
196 switch (c->fec_inner) {
197 case FEC_2_3:
198 cmd[9] = 0; break;
199 case FEC_3_4:
200 cmd[9] = 1; break;
201 case FEC_3_5:
202 cmd[9] = 2; break;
203 case FEC_5_6:
204 cmd[9] = 3; break;
205 case FEC_8_9:
206 cmd[9] = 4; break;
207 default:
208 cmd[9] = 0; break;
209 }
210 break;
211 case QAM_16: /* QAM_16 is for compatibility with DN */
212 cmd[8] = ADV_MOD_TURBO_16QAM;
130 cmd[9] = 0; 213 cmd[9] = 0;
131 break; 214 break;
215 default: /* Unknown modulation */
216 deb_fe("%s: unsupported modulation selected (%d)\n",
217 __func__, c->modulation);
218 return -EOPNOTSUPP;
132 } 219 }
133 220
134 gp8psk_usb_out_op(state->d,TUNE_8PSK,0,0,cmd,10); 221 if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM)
222 gp8psk_set_tuner_mode(fe, 0);
223 gp8psk_usb_out_op(state->d, TUNE_8PSK, 0, 0, cmd, 10);
135 224
136 state->lock = 0; 225 state->lock = 0;
137 state->next_status_check = jiffies; 226 state->next_status_check = jiffies;
@@ -140,13 +229,6 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend* fe,
140 return 0; 229 return 0;
141} 230}
142 231
143static int gp8psk_fe_get_frontend(struct dvb_frontend* fe,
144 struct dvb_frontend_parameters *fep)
145{
146 return 0;
147}
148
149
150static int gp8psk_fe_send_diseqc_msg (struct dvb_frontend* fe, 232static int gp8psk_fe_send_diseqc_msg (struct dvb_frontend* fe,
151 struct dvb_diseqc_master_cmd *m) 233 struct dvb_diseqc_master_cmd *m)
152{ 234{
@@ -261,9 +343,13 @@ static struct dvb_frontend_ops gp8psk_fe_ops = {
261 .symbol_rate_max = 45000000, 343 .symbol_rate_max = 45000000,
262 .symbol_rate_tolerance = 500, /* ppm */ 344 .symbol_rate_tolerance = 500, /* ppm */
263 .caps = FE_CAN_INVERSION_AUTO | 345 .caps = FE_CAN_INVERSION_AUTO |
264 FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | 346 FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
265 FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | 347 FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
266 FE_CAN_QPSK 348 /*
349 * FE_CAN_QAM_16 is for compatibility
350 * (Myth incorrectly detects Turbo-QPSK as plain QAM-16)
351 */
352 FE_CAN_QPSK | FE_CAN_QAM_16
267 }, 353 },
268 354
269 .release = gp8psk_fe_release, 355 .release = gp8psk_fe_release,
@@ -271,8 +357,10 @@ static struct dvb_frontend_ops gp8psk_fe_ops = {
271 .init = NULL, 357 .init = NULL,
272 .sleep = NULL, 358 .sleep = NULL,
273 359
360 .set_property = gp8psk_fe_set_property,
361 .get_property = gp8psk_fe_get_property,
274 .set_frontend = gp8psk_fe_set_frontend, 362 .set_frontend = gp8psk_fe_set_frontend,
275 .get_frontend = gp8psk_fe_get_frontend, 363
276 .get_tune_settings = gp8psk_fe_get_tune_settings, 364 .get_tune_settings = gp8psk_fe_get_tune_settings,
277 365
278 .read_status = gp8psk_fe_read_status, 366 .read_status = gp8psk_fe_read_status,
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index d965a923f391..c1da962cc886 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -174,6 +174,22 @@ static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
174 return 0; 174 return 0;
175} 175}
176 176
177int gp8psk_bcm4500_reload(struct dvb_usb_device *d)
178{
179 u8 buf;
180 int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
181 /* Turn off 8psk power */
182 if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1))
183 return -EINVAL;
184 /* Turn On 8psk power */
185 if (gp8psk_usb_in_op(d, BOOT_8PSK, 1, 0, &buf, 1))
186 return -EINVAL;
187 /* load BCM4500 firmware */
188 if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM)
189 if (gp8psk_load_bcm4500fw(d))
190 return EINVAL;
191 return 0;
192}
177 193
178static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) 194static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
179{ 195{
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.h b/drivers/media/dvb/dvb-usb/gp8psk.h
index e5cd8149c23d..e83a57506cfa 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.h
+++ b/drivers/media/dvb/dvb-usb/gp8psk.h
@@ -92,5 +92,6 @@ extern struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d);
92extern int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen); 92extern int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen);
93extern int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, 93extern int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
94 u16 index, u8 *b, int blen); 94 u16 index, u8 *b, int blen);
95extern int gp8psk_bcm4500_reload(struct dvb_usb_device *d);
95 96
96#endif 97#endif
diff --git a/drivers/media/dvb/dvb-usb/usb-urb.c b/drivers/media/dvb/dvb-usb/usb-urb.c
index da93b9e982c0..9da2cc95ca13 100644
--- a/drivers/media/dvb/dvb-usb/usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/usb-urb.c
@@ -156,7 +156,8 @@ static int usb_bulk_urb_init(struct usb_data_stream *stream)
156 stream->props.u.bulk.buffersize, 156 stream->props.u.bulk.buffersize,
157 usb_urb_complete, stream); 157 usb_urb_complete, stream);
158 158
159 stream->urb_list[i]->transfer_flags = 0; 159 stream->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
160 stream->urb_list[i]->transfer_dma = stream->dma_addr[i];
160 stream->urbs_initialized++; 161 stream->urbs_initialized++;
161 } 162 }
162 return 0; 163 return 0;
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 96b93e21a84b..00269560793a 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -12,6 +12,25 @@ config DVB_FE_CUSTOMISE
12 12
13 If unsure say N. 13 If unsure say N.
14 14
15comment "Multistandard (satellite) frontends"
16 depends on DVB_CORE
17
18config DVB_STB0899
19 tristate "STB0899 based"
20 depends on DVB_CORE && I2C
21 default m if DVB_FE_CUSTOMISE
22 help
23 A DVB-S/S2/DSS Multistandard demodulator. Say Y when you want
24 to support this demodulator based frontends
25
26config DVB_STB6100
27 tristate "STB6100 based tuners"
28 depends on DVB_CORE && I2C
29 default m if DVB_FE_CUSTOMISE
30 help
31 A Silicon tuner from ST used in conjunction with the STB0899
32 demodulator. Say Y when you want to support this tuner.
33
15comment "DVB-S (satellite) frontends" 34comment "DVB-S (satellite) frontends"
16 depends on DVB_CORE 35 depends on DVB_CORE
17 36
@@ -78,6 +97,13 @@ config DVB_TDA10086
78 help 97 help
79 A DVB-S tuner module. Say Y when you want to support this frontend. 98 A DVB-S tuner module. Say Y when you want to support this frontend.
80 99
100config DVB_TDA8261
101 tristate "Philips TDA8261 based"
102 depends on DVB_CORE && I2C
103 default m if DVB_FE_CUSTOMISE
104 help
105 A DVB-S tuner module. Say Y when you want to support this frontend.
106
81config DVB_VES1X93 107config DVB_VES1X93
82 tristate "VLSI VES1893 or VES1993 based" 108 tristate "VLSI VES1893 or VES1993 based"
83 depends on DVB_CORE && I2C 109 depends on DVB_CORE && I2C
@@ -92,6 +118,14 @@ config DVB_TUNER_ITD1000
92 help 118 help
93 A DVB-S tuner module. Say Y when you want to support this frontend. 119 A DVB-S tuner module. Say Y when you want to support this frontend.
94 120
121config DVB_TUNER_CX24113
122 tristate "Conexant CX24113/CX24128 tuner for DVB-S/DSS"
123 depends on DVB_CORE && I2C
124 default m if DVB_FE_CUSTOMISE
125 help
126 A DVB-S tuner module. Say Y when you want to support this frontend.
127
128
95config DVB_TDA826X 129config DVB_TDA826X
96 tristate "Philips TDA826X silicon tuner" 130 tristate "Philips TDA826X silicon tuner"
97 depends on DVB_CORE && I2C 131 depends on DVB_CORE && I2C
@@ -345,6 +379,14 @@ config DVB_LGDT330X
345 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want 379 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
346 to support this frontend. 380 to support this frontend.
347 381
382config DVB_LGDT3304
383 tristate "LG Electronics LGDT3304"
384 depends on DVB_CORE && I2C
385 default m if DVB_FE_CUSTOMISE
386 help
387 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
388 to support this frontend.
389
348config DVB_S5H1409 390config DVB_S5H1409
349 tristate "Samsung S5H1409 based" 391 tristate "Samsung S5H1409 based"
350 depends on DVB_CORE && I2C 392 depends on DVB_CORE && I2C
@@ -369,6 +411,17 @@ config DVB_S5H1411
369 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want 411 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
370 to support this frontend. 412 to support this frontend.
371 413
414comment "ISDB-T (terrestrial) frontends"
415 depends on DVB_CORE
416
417config DVB_S921
418 tristate "Sharp S921 tuner"
419 depends on DVB_CORE && I2C
420 default m if DVB_FE_CUSTOMISE
421 help
422 AN ISDB-T DQPSK, QPSK, 16QAM and 64QAM 1seg tuner module.
423 Say Y when you want to support this frontend.
424
372comment "Digital terrestrial only tuners/PLL" 425comment "Digital terrestrial only tuners/PLL"
373 depends on DVB_CORE 426 depends on DVB_CORE
374 427
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index aba79f4a63a7..af7bdf0ad4c7 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -5,8 +5,13 @@
5EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ 5EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
6EXTRA_CFLAGS += -Idrivers/media/common/tuners/ 6EXTRA_CFLAGS += -Idrivers/media/common/tuners/
7 7
8s921-objs := s921_module.o s921_core.o
9stb0899-objs = stb0899_drv.o stb0899_algo.o
10
8obj-$(CONFIG_DVB_PLL) += dvb-pll.o 11obj-$(CONFIG_DVB_PLL) += dvb-pll.o
9obj-$(CONFIG_DVB_STV0299) += stv0299.o 12obj-$(CONFIG_DVB_STV0299) += stv0299.o
13obj-$(CONFIG_DVB_STB0899) += stb0899.o
14obj-$(CONFIG_DVB_STB6100) += stb6100.o
10obj-$(CONFIG_DVB_SP8870) += sp8870.o 15obj-$(CONFIG_DVB_SP8870) += sp8870.o
11obj-$(CONFIG_DVB_CX22700) += cx22700.o 16obj-$(CONFIG_DVB_CX22700) += cx22700.o
12obj-$(CONFIG_DVB_CX24110) += cx24110.o 17obj-$(CONFIG_DVB_CX24110) += cx24110.o
@@ -35,18 +40,21 @@ obj-$(CONFIG_DVB_OR51132) += or51132.o
35obj-$(CONFIG_DVB_BCM3510) += bcm3510.o 40obj-$(CONFIG_DVB_BCM3510) += bcm3510.o
36obj-$(CONFIG_DVB_S5H1420) += s5h1420.o 41obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
37obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o 42obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
43obj-$(CONFIG_DVB_LGDT3304) += lgdt3304.o
38obj-$(CONFIG_DVB_CX24123) += cx24123.o 44obj-$(CONFIG_DVB_CX24123) += cx24123.o
39obj-$(CONFIG_DVB_LNBP21) += lnbp21.o 45obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
40obj-$(CONFIG_DVB_ISL6405) += isl6405.o 46obj-$(CONFIG_DVB_ISL6405) += isl6405.o
41obj-$(CONFIG_DVB_ISL6421) += isl6421.o 47obj-$(CONFIG_DVB_ISL6421) += isl6421.o
42obj-$(CONFIG_DVB_TDA10086) += tda10086.o 48obj-$(CONFIG_DVB_TDA10086) += tda10086.o
43obj-$(CONFIG_DVB_TDA826X) += tda826x.o 49obj-$(CONFIG_DVB_TDA826X) += tda826x.o
50obj-$(CONFIG_DVB_TDA8261) += tda8261.o
44obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o 51obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o
45obj-$(CONFIG_DVB_TUA6100) += tua6100.o 52obj-$(CONFIG_DVB_TUA6100) += tua6100.o
46obj-$(CONFIG_DVB_S5H1409) += s5h1409.o 53obj-$(CONFIG_DVB_S5H1409) += s5h1409.o
47obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o 54obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
48obj-$(CONFIG_DVB_AU8522) += au8522.o 55obj-$(CONFIG_DVB_AU8522) += au8522.o
49obj-$(CONFIG_DVB_TDA10048) += tda10048.o 56obj-$(CONFIG_DVB_TDA10048) += tda10048.o
57obj-$(CONFIG_DVB_TUNER_CX24113) += cx24113.o
50obj-$(CONFIG_DVB_S5H1411) += s5h1411.o 58obj-$(CONFIG_DVB_S5H1411) += s5h1411.o
51obj-$(CONFIG_DVB_LGS8GL5) += lgs8gl5.o 59obj-$(CONFIG_DVB_LGS8GL5) += lgs8gl5.o
52obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o 60obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o
@@ -55,3 +63,5 @@ obj-$(CONFIG_DVB_CX24116) += cx24116.o
55obj-$(CONFIG_DVB_SI21XX) += si21xx.o 63obj-$(CONFIG_DVB_SI21XX) += si21xx.o
56obj-$(CONFIG_DVB_STV0288) += stv0288.o 64obj-$(CONFIG_DVB_STV0288) += stv0288.o
57obj-$(CONFIG_DVB_STB6000) += stb6000.o 65obj-$(CONFIG_DVB_STB6000) += stb6000.o
66obj-$(CONFIG_DVB_S921) += s921.o
67
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index 692b68a9e73b..b2b50fb4cfd3 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -223,12 +223,12 @@ static int af9013_set_coeff(struct af9013_state *state, fe_bandwidth_t bw)
223 int ret = 0; 223 int ret = 0;
224 u8 i = 0; 224 u8 i = 0;
225 u8 buf[24]; 225 u8 buf[24];
226 u32 ns_coeff1_2048nu; 226 u32 uninitialized_var(ns_coeff1_2048nu);
227 u32 ns_coeff1_8191nu; 227 u32 uninitialized_var(ns_coeff1_8191nu);
228 u32 ns_coeff1_8192nu; 228 u32 uninitialized_var(ns_coeff1_8192nu);
229 u32 ns_coeff1_8193nu; 229 u32 uninitialized_var(ns_coeff1_8193nu);
230 u32 ns_coeff2_2k; 230 u32 uninitialized_var(ns_coeff2_2k);
231 u32 ns_coeff2_8k; 231 u32 uninitialized_var(ns_coeff2_8k);
232 232
233 deb_info("%s: adc_clock:%d bw:%d\n", __func__, 233 deb_info("%s: adc_clock:%d bw:%d\n", __func__,
234 state->config.adc_clock, bw); 234 state->config.adc_clock, bw);
@@ -1009,7 +1009,7 @@ static int af9013_update_snr(struct dvb_frontend *fe)
1009 int ret; 1009 int ret;
1010 u8 buf[3], i, len; 1010 u8 buf[3], i, len;
1011 u32 quant = 0; 1011 u32 quant = 0;
1012 struct snr_table *snr_table; 1012 struct snr_table *uninitialized_var(snr_table);
1013 1013
1014 /* check if quantizer ready (for snr) */ 1014 /* check if quantizer ready (for snr) */
1015 ret = af9013_read_reg_bits(state, 0xd2e1, 3, 1, &buf[0]); 1015 ret = af9013_read_reg_bits(state, 0xd2e1, 3, 1, &buf[0]);
diff --git a/drivers/media/dvb/frontends/cx24113.c b/drivers/media/dvb/frontends/cx24113.c
new file mode 100644
index 000000000000..f6e7b0380a5a
--- /dev/null
+++ b/drivers/media/dvb/frontends/cx24113.c
@@ -0,0 +1,616 @@
1/*
2 * Driver for Conexant CX24113/CX24128 Tuner (Satellite)
3 *
4 * Copyright (C) 2007-8 Patrick Boettcher <pb@linuxtv.org>
5 *
6 * Developed for BBTI / Technisat
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 *
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#include <linux/slab.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/init.h>
28
29#include "dvb_frontend.h"
30#include "cx24113.h"
31
32static int debug;
33
34#define info(args...) do { printk(KERN_INFO "CX24113: " args); } while (0)
35#define err(args...) do { printk(KERN_ERR "CX24113: " args); } while (0)
36
37#define dprintk(args...) \
38 do { \
39 if (debug) { \
40 printk(KERN_DEBUG "CX24113: %s: ", __func__); \
41 printk(args); \
42 } \
43 } while (0)
44
45struct cx24113_state {
46 struct i2c_adapter *i2c;
47 const struct cx24113_config *config;
48
49#define REV_CX24113 0x23
50 u8 rev;
51 u8 ver;
52
53 u8 icp_mode:1;
54
55#define ICP_LEVEL1 0
56#define ICP_LEVEL2 1
57#define ICP_LEVEL3 2
58#define ICP_LEVEL4 3
59 u8 icp_man:2;
60 u8 icp_auto_low:2;
61 u8 icp_auto_mlow:2;
62 u8 icp_auto_mhi:2;
63 u8 icp_auto_hi:2;
64 u8 icp_dig;
65
66#define LNA_MIN_GAIN 0
67#define LNA_MID_GAIN 1
68#define LNA_MAX_GAIN 2
69 u8 lna_gain:2;
70
71 u8 acp_on:1;
72
73 u8 vco_mode:2;
74 u8 vco_shift:1;
75#define VCOBANDSEL_6 0x80
76#define VCOBANDSEL_5 0x01
77#define VCOBANDSEL_4 0x02
78#define VCOBANDSEL_3 0x04
79#define VCOBANDSEL_2 0x08
80#define VCOBANDSEL_1 0x10
81 u8 vco_band;
82
83#define VCODIV4 4
84#define VCODIV2 2
85 u8 vcodiv;
86
87 u8 bs_delay:4;
88 u16 bs_freqcnt:13;
89 u16 bs_rdiv;
90 u8 prescaler_mode:1;
91
92 u8 rfvga_bias_ctrl;
93
94 s16 tuner_gain_thres;
95 u8 gain_level;
96
97 u32 frequency;
98
99 u8 refdiv;
100
101 u8 Fwindow_enabled;
102};
103
104static int cx24113_writereg(struct cx24113_state *state, int reg, int data)
105{
106 u8 buf[] = { reg, data };
107 struct i2c_msg msg = { .addr = state->config->i2c_addr,
108 .flags = 0, .buf = buf, .len = 2 };
109 int err = i2c_transfer(state->i2c, &msg, 1);
110 if (err != 1) {
111 printk(KERN_DEBUG "%s: writereg error(err == %i, reg == 0x%02x,"
112 " data == 0x%02x)\n", __func__, err, reg, data);
113 return err;
114 }
115
116 return 0;
117}
118
119static int cx24113_readreg(struct cx24113_state *state, u8 reg)
120{
121 int ret;
122 u8 b;
123 struct i2c_msg msg[] = {
124 { .addr = state->config->i2c_addr,
125 .flags = 0, .buf = &reg, .len = 1 },
126 { .addr = state->config->i2c_addr,
127 .flags = I2C_M_RD, .buf = &b, .len = 1 }
128 };
129
130 ret = i2c_transfer(state->i2c, msg, 2);
131
132 if (ret != 2) {
133 printk(KERN_DEBUG "%s: reg=0x%x (error=%d)\n",
134 __func__, reg, ret);
135 return ret;
136 }
137
138 return b;
139}
140
141static void cx24113_set_parameters(struct cx24113_state *state)
142{
143 u8 r;
144
145 r = cx24113_readreg(state, 0x10) & 0x82;
146 r |= state->icp_mode;
147 r |= state->icp_man << 4;
148 r |= state->icp_dig << 2;
149 r |= state->prescaler_mode << 5;
150 cx24113_writereg(state, 0x10, r);
151
152 r = (state->icp_auto_low << 0) | (state->icp_auto_mlow << 2)
153 | (state->icp_auto_mhi << 4) | (state->icp_auto_hi << 6);
154 cx24113_writereg(state, 0x11, r);
155
156 if (state->rev == REV_CX24113) {
157 r = cx24113_readreg(state, 0x20) & 0xec;
158 r |= state->lna_gain;
159 r |= state->rfvga_bias_ctrl << 4;
160 cx24113_writereg(state, 0x20, r);
161 }
162
163 r = cx24113_readreg(state, 0x12) & 0x03;
164 r |= state->acp_on << 2;
165 r |= state->bs_delay << 4;
166 cx24113_writereg(state, 0x12, r);
167
168 r = cx24113_readreg(state, 0x18) & 0x40;
169 r |= state->vco_shift;
170 if (state->vco_band == VCOBANDSEL_6)
171 r |= (1 << 7);
172 else
173 r |= (state->vco_band << 1);
174 cx24113_writereg(state, 0x18, r);
175
176 r = cx24113_readreg(state, 0x14) & 0x20;
177 r |= (state->vco_mode << 6) | ((state->bs_freqcnt >> 8) & 0x1f);
178 cx24113_writereg(state, 0x14, r);
179 cx24113_writereg(state, 0x15, (state->bs_freqcnt & 0xff));
180
181 cx24113_writereg(state, 0x16, (state->bs_rdiv >> 4) & 0xff);
182 r = (cx24113_readreg(state, 0x17) & 0x0f) |
183 ((state->bs_rdiv & 0x0f) << 4);
184 cx24113_writereg(state, 0x17, r);
185}
186
187#define VGA_0 0x00
188#define VGA_1 0x04
189#define VGA_2 0x02
190#define VGA_3 0x06
191#define VGA_4 0x01
192#define VGA_5 0x05
193#define VGA_6 0x03
194#define VGA_7 0x07
195
196#define RFVGA_0 0x00
197#define RFVGA_1 0x01
198#define RFVGA_2 0x02
199#define RFVGA_3 0x03
200
201static int cx24113_set_gain_settings(struct cx24113_state *state,
202 s16 power_estimation)
203{
204 u8 ampout = cx24113_readreg(state, 0x1d) & 0xf0,
205 vga = cx24113_readreg(state, 0x1f) & 0x3f,
206 rfvga = cx24113_readreg(state, 0x20) & 0xf3;
207 u8 gain_level = power_estimation >= state->tuner_gain_thres;
208
209 dprintk("power estimation: %d, thres: %d, gain_level: %d/%d\n",
210 power_estimation, state->tuner_gain_thres,
211 state->gain_level, gain_level);
212
213 if (gain_level == state->gain_level)
214 return 0; /* nothing to be done */
215
216 ampout |= 0xf;
217
218 if (gain_level) {
219 rfvga |= RFVGA_0 << 2;
220 vga |= (VGA_7 << 3) | VGA_7;
221 } else {
222 rfvga |= RFVGA_2 << 2;
223 vga |= (VGA_6 << 3) | VGA_2;
224 }
225 state->gain_level = gain_level;
226
227 cx24113_writereg(state, 0x1d, ampout);
228 cx24113_writereg(state, 0x1f, vga);
229 cx24113_writereg(state, 0x20, rfvga);
230
231 return 1; /* did something */
232}
233
234static int cx24113_set_Fref(struct cx24113_state *state, u8 high)
235{
236 u8 xtal = cx24113_readreg(state, 0x02);
237 if (state->rev == 0x43 && state->vcodiv == VCODIV4)
238 high = 1;
239
240 xtal &= ~0x2;
241 if (high)
242 xtal |= high << 1;
243 return cx24113_writereg(state, 0x02, xtal);
244}
245
246static int cx24113_enable(struct cx24113_state *state, u8 enable)
247{
248 u8 r21 = (cx24113_readreg(state, 0x21) & 0xc0) | enable;
249 if (state->rev == REV_CX24113)
250 r21 |= (1 << 1);
251 return cx24113_writereg(state, 0x21, r21);
252}
253
254static int cx24113_set_bandwidth(struct cx24113_state *state, u32 bandwidth_khz)
255{
256 u8 r;
257
258 if (bandwidth_khz <= 19000)
259 r = 0x03 << 6;
260 else if (bandwidth_khz <= 25000)
261 r = 0x02 << 6;
262 else
263 r = 0x01 << 6;
264
265 dprintk("bandwidth to be set: %d\n", bandwidth_khz);
266 bandwidth_khz *= 10;
267 bandwidth_khz -= 10000;
268 bandwidth_khz /= 1000;
269 bandwidth_khz += 5;
270 bandwidth_khz /= 10;
271
272 dprintk("bandwidth: %d %d\n", r >> 6, bandwidth_khz);
273
274 r |= bandwidth_khz & 0x3f;
275
276 return cx24113_writereg(state, 0x1e, r);
277}
278
279static int cx24113_set_clk_inversion(struct cx24113_state *state, u8 on)
280{
281 u8 r = (cx24113_readreg(state, 0x10) & 0x7f) | ((on & 0x1) << 7);
282 return cx24113_writereg(state, 0x10, r);
283}
284
285static int cx24113_get_status(struct dvb_frontend *fe, u32 *status)
286{
287 struct cx24113_state *state = fe->tuner_priv;
288 u8 r = (cx24113_readreg(state, 0x10) & 0x02) >> 1;
289 if (r)
290 *status |= TUNER_STATUS_LOCKED;
291 dprintk("PLL locked: %d\n", r);
292 return 0;
293}
294
295static u8 cx24113_set_ref_div(struct cx24113_state *state, u8 refdiv)
296{
297 if (state->rev == 0x43 && state->vcodiv == VCODIV4)
298 refdiv = 2;
299 return state->refdiv = refdiv;
300}
301
302static void cx24113_calc_pll_nf(struct cx24113_state *state, u16 *n, s32 *f)
303{
304 s32 N;
305 s64 F;
306 u8 R, r;
307 u8 vcodiv;
308 u8 factor;
309 s32 freq_hz = state->frequency * 1000;
310
311 if (state->config->xtal_khz < 20000)
312 factor = 1;
313 else
314 factor = 2;
315
316 if (state->rev == REV_CX24113) {
317 if (state->frequency >= 1100000)
318 vcodiv = VCODIV2;
319 else
320 vcodiv = VCODIV4;
321 } else {
322 if (state->frequency >= 1165000)
323 vcodiv = VCODIV2;
324 else
325 vcodiv = VCODIV4;
326 }
327 state->vcodiv = vcodiv;
328
329 dprintk("calculating N/F for %dHz with vcodiv %d\n", freq_hz, vcodiv);
330 R = 0;
331 do {
332 R = cx24113_set_ref_div(state, R + 1);
333
334 /* calculate tuner PLL settings: */
335 N = (freq_hz / 100 * vcodiv) * R;
336 N /= (state->config->xtal_khz) * factor * 2;
337 N += 5; /* For round up. */
338 N /= 10;
339 N -= 32;
340 } while (N < 6 && R < 3);
341
342 if (N < 6) {
343 err("strange frequency: N < 6\n");
344 return;
345 }
346 F = freq_hz;
347 F *= (u64) (R * vcodiv * 262144);
348 dprintk("1 N: %d, F: %lld, R: %d\n", N, (long long)F, R);
349 do_div(F, state->config->xtal_khz*1000 * factor * 2);
350 dprintk("2 N: %d, F: %lld, R: %d\n", N, (long long)F, R);
351 F -= (N + 32) * 262144;
352
353 dprintk("3 N: %d, F: %lld, R: %d\n", N, (long long)F, R);
354
355 if (state->Fwindow_enabled) {
356 if (F > (262144 / 2 - 1638))
357 F = 262144 / 2 - 1638;
358 if (F < (-262144 / 2 + 1638))
359 F = -262144 / 2 + 1638;
360 if ((F < 3277 && F > 0) || (F > -3277 && F < 0)) {
361 F = 0;
362 r = cx24113_readreg(state, 0x10);
363 cx24113_writereg(state, 0x10, r | (1 << 6));
364 }
365 }
366 dprintk("4 N: %d, F: %lld, R: %d\n", N, (long long)F, R);
367
368 *n = (u16) N;
369 *f = (s32) F;
370}
371
372
373static void cx24113_set_nfr(struct cx24113_state *state, u16 n, s32 f, u8 r)
374{
375 u8 reg;
376 cx24113_writereg(state, 0x19, (n >> 1) & 0xff);
377
378 reg = ((n & 0x1) << 7) | ((f >> 11) & 0x7f);
379 cx24113_writereg(state, 0x1a, reg);
380
381 cx24113_writereg(state, 0x1b, (f >> 3) & 0xff);
382
383 reg = cx24113_readreg(state, 0x1c) & 0x1f;
384 cx24113_writereg(state, 0x1c, reg | ((f & 0x7) << 5));
385
386 cx24113_set_Fref(state, r - 1);
387}
388
389static int cx24113_set_frequency(struct cx24113_state *state, u32 frequency)
390{
391 u8 r = 1; /* or 2 */
392 u16 n = 6;
393 s32 f = 0;
394
395 r = cx24113_readreg(state, 0x14);
396 cx24113_writereg(state, 0x14, r & 0x3f);
397
398 r = cx24113_readreg(state, 0x10);
399 cx24113_writereg(state, 0x10, r & 0xbf);
400
401 state->frequency = frequency;
402
403 dprintk("tuning to frequency: %d\n", frequency);
404
405 cx24113_calc_pll_nf(state, &n, &f);
406 cx24113_set_nfr(state, n, f, state->refdiv);
407
408 r = cx24113_readreg(state, 0x18) & 0xbf;
409 if (state->vcodiv != VCODIV2)
410 r |= 1 << 6;
411 cx24113_writereg(state, 0x18, r);
412
413 /* The need for this sleep is not clear. But helps in some cases */
414 msleep(5);
415
416 r = cx24113_readreg(state, 0x1c) & 0xef;
417 cx24113_writereg(state, 0x1c, r | (1 << 4));
418 return 0;
419}
420
421static int cx24113_init(struct dvb_frontend *fe)
422{
423 struct cx24113_state *state = fe->tuner_priv;
424 int ret;
425
426 state->tuner_gain_thres = -50;
427 state->gain_level = 255; /* to force a gain-setting initialization */
428 state->icp_mode = 0;
429
430 if (state->config->xtal_khz < 11000) {
431 state->icp_auto_hi = ICP_LEVEL4;
432 state->icp_auto_mhi = ICP_LEVEL4;
433 state->icp_auto_mlow = ICP_LEVEL3;
434 state->icp_auto_low = ICP_LEVEL3;
435 } else {
436 state->icp_auto_hi = ICP_LEVEL4;
437 state->icp_auto_mhi = ICP_LEVEL4;
438 state->icp_auto_mlow = ICP_LEVEL3;
439 state->icp_auto_low = ICP_LEVEL2;
440 }
441
442 state->icp_dig = ICP_LEVEL3;
443 state->icp_man = ICP_LEVEL1;
444 state->acp_on = 1;
445 state->vco_mode = 0;
446 state->vco_shift = 0;
447 state->vco_band = VCOBANDSEL_1;
448 state->bs_delay = 8;
449 state->bs_freqcnt = 0x0fff;
450 state->bs_rdiv = 0x0fff;
451 state->prescaler_mode = 0;
452 state->lna_gain = LNA_MAX_GAIN;
453 state->rfvga_bias_ctrl = 1;
454 state->Fwindow_enabled = 1;
455
456 cx24113_set_Fref(state, 0);
457 cx24113_enable(state, 0x3d);
458 cx24113_set_parameters(state);
459
460 cx24113_set_gain_settings(state, -30);
461
462 cx24113_set_bandwidth(state, 18025);
463 cx24113_set_clk_inversion(state, 1);
464
465 if (state->config->xtal_khz >= 40000)
466 ret = cx24113_writereg(state, 0x02,
467 (cx24113_readreg(state, 0x02) & 0xfb) | (1 << 2));
468 else
469 ret = cx24113_writereg(state, 0x02,
470 (cx24113_readreg(state, 0x02) & 0xfb) | (0 << 2));
471
472 return ret;
473}
474
475static int cx24113_set_params(struct dvb_frontend *fe,
476 struct dvb_frontend_parameters *p)
477{
478 struct cx24113_state *state = fe->tuner_priv;
479 /* for a ROLL-OFF factor of 0.35, 0.2: 600, 0.25: 625 */
480 u32 roll_off = 675;
481 u32 bw;
482
483 bw = ((p->u.qpsk.symbol_rate/100) * roll_off) / 1000;
484 bw += (10000000/100) + 5;
485 bw /= 10;
486 bw += 1000;
487 cx24113_set_bandwidth(state, bw);
488
489 cx24113_set_frequency(state, p->frequency);
490 msleep(5);
491 return cx24113_get_status(fe, &bw);
492}
493
494static s8 cx24113_agc_table[2][10] = {
495 {-54, -41, -35, -30, -25, -21, -16, -10, -6, -2},
496 {-39, -35, -30, -25, -19, -15, -11, -5, 1, 9},
497};
498
499void cx24113_agc_callback(struct dvb_frontend *fe)
500{
501 struct cx24113_state *state = fe->tuner_priv;
502 s16 s, i;
503 if (!fe->ops.read_signal_strength)
504 return;
505
506 do {
507 /* this only works with the current CX24123 implementation */
508 fe->ops.read_signal_strength(fe, (u16 *) &s);
509 s >>= 8;
510 dprintk("signal strength: %d\n", s);
511 for (i = 0; i < sizeof(cx24113_agc_table[0]); i++)
512 if (cx24113_agc_table[state->gain_level][i] > s)
513 break;
514 s = -25 - i*5;
515 } while (cx24113_set_gain_settings(state, s));
516}
517EXPORT_SYMBOL(cx24113_agc_callback);
518
519static int cx24113_get_frequency(struct dvb_frontend *fe, u32 *frequency)
520{
521 struct cx24113_state *state = fe->tuner_priv;
522 *frequency = state->frequency;
523 return 0;
524}
525
526static int cx24113_release(struct dvb_frontend *fe)
527{
528 struct cx24113_state *state = fe->tuner_priv;
529 dprintk("\n");
530 fe->tuner_priv = NULL;
531 kfree(state);
532 return 0;
533}
534
535static const struct dvb_tuner_ops cx24113_tuner_ops = {
536 .info = {
537 .name = "Conexant CX24113",
538 .frequency_min = 950000,
539 .frequency_max = 2150000,
540 .frequency_step = 125,
541 },
542
543 .release = cx24113_release,
544
545 .init = cx24113_init,
546 .sleep = NULL,
547
548 .set_params = cx24113_set_params,
549 .get_frequency = cx24113_get_frequency,
550 .get_bandwidth = NULL,
551 .get_status = cx24113_get_status,
552};
553
554struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
555 const struct cx24113_config *config, struct i2c_adapter *i2c)
556{
557 /* allocate memory for the internal state */
558 struct cx24113_state *state =
559 kzalloc(sizeof(struct cx24113_state), GFP_KERNEL);
560 int rc;
561 if (state == NULL) {
562 err("Unable to kmalloc\n");
563 goto error;
564 }
565
566 /* setup the state */
567 state->config = config;
568 state->i2c = i2c;
569
570 info("trying to detect myself\n");
571
572 /* making a dummy read, because of some expected troubles
573 * after power on */
574 cx24113_readreg(state, 0x00);
575
576 rc = cx24113_readreg(state, 0x00);
577 if (rc < 0) {
578 info("CX24113 not found.\n");
579 goto error;
580 }
581 state->rev = rc;
582
583 switch (rc) {
584 case 0x43:
585 info("detected CX24113 variant\n");
586 break;
587 case REV_CX24113:
588 info("sucessfully detected\n");
589 break;
590 default:
591 err("unsupported device id: %x\n", state->rev);
592 goto error;
593 }
594 state->ver = cx24113_readreg(state, 0x01);
595 info("version: %x\n", state->ver);
596
597 /* create dvb_frontend */
598 memcpy(&fe->ops.tuner_ops, &cx24113_tuner_ops,
599 sizeof(struct dvb_tuner_ops));
600 fe->tuner_priv = state;
601 return fe;
602
603error:
604 kfree(state);
605
606 return NULL;
607}
608EXPORT_SYMBOL(cx24113_attach);
609
610module_param(debug, int, 0644);
611MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
612
613MODULE_AUTHOR("Patrick Boettcher <pb@linuxtv.org>");
614MODULE_DESCRIPTION("DVB Frontend module for Conexant CX24113/CX24128hardware");
615MODULE_LICENSE("GPL");
616
diff --git a/drivers/media/dvb/frontends/cx24113.h b/drivers/media/dvb/frontends/cx24113.h
index 5ab3dd11076b..5de0f7ffd8d2 100644
--- a/drivers/media/dvb/frontends/cx24113.h
+++ b/drivers/media/dvb/frontends/cx24113.h
@@ -16,7 +16,7 @@
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.= 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 20 */
21 21
22#ifndef CX24113_H 22#ifndef CX24113_H
@@ -30,9 +30,13 @@ struct cx24113_config {
30 u32 xtal_khz; 30 u32 xtal_khz;
31}; 31};
32 32
33/* TODO: #if defined(CONFIG_DVB_TUNER_CX24113) || \ 33#if defined(CONFIG_DVB_TUNER_CX24113) || \
34 * (defined(CONFIG_DVB_TUNER_CX24113_MODULE) && defined(MODULE)) */ 34 (defined(CONFIG_DVB_TUNER_CX24113_MODULE) && defined(MODULE))
35extern struct dvb_frontend *cx24113_attach(struct dvb_frontend *,
36 const struct cx24113_config *config, struct i2c_adapter *i2c);
35 37
38extern void cx24113_agc_callback(struct dvb_frontend *fe);
39#else
36static inline struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe, 40static inline struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
37 const struct cx24113_config *config, struct i2c_adapter *i2c) 41 const struct cx24113_config *config, struct i2c_adapter *i2c)
38{ 42{
@@ -44,5 +48,6 @@ static inline void cx24113_agc_callback(struct dvb_frontend *fe)
44{ 48{
45 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 49 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
46} 50}
51#endif
47 52
48#endif /* CX24113_H */ 53#endif /* CX24113_H */
diff --git a/drivers/media/dvb/frontends/cx24116.c b/drivers/media/dvb/frontends/cx24116.c
index b144b308a4dd..9b6c89e93f16 100644
--- a/drivers/media/dvb/frontends/cx24116.c
+++ b/drivers/media/dvb/frontends/cx24116.c
@@ -106,7 +106,7 @@ MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
106#define CX24116_HAS_SYNCLOCK (0x08) 106#define CX24116_HAS_SYNCLOCK (0x08)
107#define CX24116_HAS_UNKNOWN1 (0x10) 107#define CX24116_HAS_UNKNOWN1 (0x10)
108#define CX24116_HAS_UNKNOWN2 (0x20) 108#define CX24116_HAS_UNKNOWN2 (0x20)
109#define CX24116_STATUS_MASK (0x3f) 109#define CX24116_STATUS_MASK (0x0f)
110#define CX24116_SIGNAL_MASK (0xc0) 110#define CX24116_SIGNAL_MASK (0xc0)
111 111
112#define CX24116_DISEQC_TONEOFF (0) /* toneburst never sent */ 112#define CX24116_DISEQC_TONEOFF (0) /* toneburst never sent */
@@ -160,6 +160,7 @@ struct cx24116_tuning {
160 fe_spectral_inversion_t inversion; 160 fe_spectral_inversion_t inversion;
161 fe_code_rate_t fec; 161 fe_code_rate_t fec;
162 162
163 fe_delivery_system_t delsys;
163 fe_modulation_t modulation; 164 fe_modulation_t modulation;
164 fe_pilot_t pilot; 165 fe_pilot_t pilot;
165 fe_rolloff_t rolloff; 166 fe_rolloff_t rolloff;
@@ -411,14 +412,15 @@ struct cx24116_modfec {
411}; 412};
412 413
413static int cx24116_lookup_fecmod(struct cx24116_state *state, 414static int cx24116_lookup_fecmod(struct cx24116_state *state,
414 fe_modulation_t m, fe_code_rate_t f) 415 fe_delivery_system_t d, fe_modulation_t m, fe_code_rate_t f)
415{ 416{
416 int i, ret = -EOPNOTSUPP; 417 int i, ret = -EOPNOTSUPP;
417 418
418 dprintk("%s(0x%02x,0x%02x)\n", __func__, m, f); 419 dprintk("%s(0x%02x,0x%02x)\n", __func__, m, f);
419 420
420 for (i = 0; i < ARRAY_SIZE(CX24116_MODFEC_MODES); i++) { 421 for (i = 0; i < ARRAY_SIZE(CX24116_MODFEC_MODES); i++) {
421 if ((m == CX24116_MODFEC_MODES[i].modulation) && 422 if ((d == CX24116_MODFEC_MODES[i].delivery_system) &&
423 (m == CX24116_MODFEC_MODES[i].modulation) &&
422 (f == CX24116_MODFEC_MODES[i].fec)) { 424 (f == CX24116_MODFEC_MODES[i].fec)) {
423 ret = i; 425 ret = i;
424 break; 426 break;
@@ -429,13 +431,13 @@ static int cx24116_lookup_fecmod(struct cx24116_state *state,
429} 431}
430 432
431static int cx24116_set_fec(struct cx24116_state *state, 433static int cx24116_set_fec(struct cx24116_state *state,
432 fe_modulation_t mod, fe_code_rate_t fec) 434 fe_delivery_system_t delsys, fe_modulation_t mod, fe_code_rate_t fec)
433{ 435{
434 int ret = 0; 436 int ret = 0;
435 437
436 dprintk("%s(0x%02x,0x%02x)\n", __func__, mod, fec); 438 dprintk("%s(0x%02x,0x%02x)\n", __func__, mod, fec);
437 439
438 ret = cx24116_lookup_fecmod(state, mod, fec); 440 ret = cx24116_lookup_fecmod(state, delsys, mod, fec);
439 441
440 if (ret < 0) 442 if (ret < 0)
441 return ret; 443 return ret;
@@ -679,7 +681,8 @@ static int cx24116_read_status(struct dvb_frontend *fe, fe_status_t *status)
679{ 681{
680 struct cx24116_state *state = fe->demodulator_priv; 682 struct cx24116_state *state = fe->demodulator_priv;
681 683
682 int lock = cx24116_readreg(state, CX24116_REG_SSTATUS); 684 int lock = cx24116_readreg(state, CX24116_REG_SSTATUS) &
685 CX24116_STATUS_MASK;
683 686
684 dprintk("%s: status = 0x%02x\n", __func__, lock); 687 dprintk("%s: status = 0x%02x\n", __func__, lock);
685 688
@@ -1205,7 +1208,7 @@ static int cx24116_set_frontend(struct dvb_frontend *fe,
1205 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 1208 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
1206 struct cx24116_cmd cmd; 1209 struct cx24116_cmd cmd;
1207 fe_status_t tunerstat; 1210 fe_status_t tunerstat;
1208 int i, status, ret, retune; 1211 int i, status, ret, retune = 1;
1209 1212
1210 dprintk("%s()\n", __func__); 1213 dprintk("%s()\n", __func__);
1211 1214
@@ -1222,7 +1225,6 @@ static int cx24116_set_frontend(struct dvb_frontend *fe,
1222 1225
1223 /* Pilot doesn't exist in DVB-S, turn bit off */ 1226 /* Pilot doesn't exist in DVB-S, turn bit off */
1224 state->dnxt.pilot_val = CX24116_PILOT_OFF; 1227 state->dnxt.pilot_val = CX24116_PILOT_OFF;
1225 retune = 1;
1226 1228
1227 /* DVB-S only supports 0.35 */ 1229 /* DVB-S only supports 0.35 */
1228 if (c->rolloff != ROLLOFF_35) { 1230 if (c->rolloff != ROLLOFF_35) {
@@ -1250,7 +1252,7 @@ static int cx24116_set_frontend(struct dvb_frontend *fe,
1250 case PILOT_AUTO: /* Not supported but emulated */ 1252 case PILOT_AUTO: /* Not supported but emulated */
1251 state->dnxt.pilot_val = (c->modulation == QPSK) 1253 state->dnxt.pilot_val = (c->modulation == QPSK)
1252 ? CX24116_PILOT_OFF : CX24116_PILOT_ON; 1254 ? CX24116_PILOT_OFF : CX24116_PILOT_ON;
1253 retune = 2; 1255 retune++;
1254 break; 1256 break;
1255 case PILOT_OFF: 1257 case PILOT_OFF:
1256 state->dnxt.pilot_val = CX24116_PILOT_OFF; 1258 state->dnxt.pilot_val = CX24116_PILOT_OFF;
@@ -1287,6 +1289,7 @@ static int cx24116_set_frontend(struct dvb_frontend *fe,
1287 __func__, c->delivery_system); 1289 __func__, c->delivery_system);
1288 return -EOPNOTSUPP; 1290 return -EOPNOTSUPP;
1289 } 1291 }
1292 state->dnxt.delsys = c->delivery_system;
1290 state->dnxt.modulation = c->modulation; 1293 state->dnxt.modulation = c->modulation;
1291 state->dnxt.frequency = c->frequency; 1294 state->dnxt.frequency = c->frequency;
1292 state->dnxt.pilot = c->pilot; 1295 state->dnxt.pilot = c->pilot;
@@ -1297,7 +1300,7 @@ static int cx24116_set_frontend(struct dvb_frontend *fe,
1297 return ret; 1300 return ret;
1298 1301
1299 /* FEC_NONE/AUTO for DVB-S2 is not supported and detected here */ 1302 /* FEC_NONE/AUTO for DVB-S2 is not supported and detected here */
1300 ret = cx24116_set_fec(state, c->modulation, c->fec_inner); 1303 ret = cx24116_set_fec(state, c->delivery_system, c->modulation, c->fec_inner);
1301 if (ret != 0) 1304 if (ret != 0)
1302 return ret; 1305 return ret;
1303 1306
@@ -1308,6 +1311,7 @@ static int cx24116_set_frontend(struct dvb_frontend *fe,
1308 /* discard the 'current' tuning parameters and prepare to tune */ 1311 /* discard the 'current' tuning parameters and prepare to tune */
1309 cx24116_clone_params(fe); 1312 cx24116_clone_params(fe);
1310 1313
1314 dprintk("%s: delsys = %d\n", __func__, state->dcur.delsys);
1311 dprintk("%s: modulation = %d\n", __func__, state->dcur.modulation); 1315 dprintk("%s: modulation = %d\n", __func__, state->dcur.modulation);
1312 dprintk("%s: frequency = %d\n", __func__, state->dcur.frequency); 1316 dprintk("%s: frequency = %d\n", __func__, state->dcur.frequency);
1313 dprintk("%s: pilot = %d (val = 0x%02x)\n", __func__, 1317 dprintk("%s: pilot = %d (val = 0x%02x)\n", __func__,
@@ -1427,6 +1431,23 @@ tuned: /* Set/Reset B/W */
1427 return ret; 1431 return ret;
1428} 1432}
1429 1433
1434static int cx24116_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *params,
1435 unsigned int mode_flags, unsigned int *delay, fe_status_t *status)
1436{
1437 *delay = HZ / 5;
1438 if (params) {
1439 int ret = cx24116_set_frontend(fe, params);
1440 if (ret)
1441 return ret;
1442 }
1443 return cx24116_read_status(fe, status);
1444}
1445
1446static int cx24116_get_algo(struct dvb_frontend *fe)
1447{
1448 return DVBFE_ALGO_HW;
1449}
1450
1430static struct dvb_frontend_ops cx24116_ops = { 1451static struct dvb_frontend_ops cx24116_ops = {
1431 1452
1432 .info = { 1453 .info = {
@@ -1458,6 +1479,8 @@ static struct dvb_frontend_ops cx24116_ops = {
1458 .set_voltage = cx24116_set_voltage, 1479 .set_voltage = cx24116_set_voltage,
1459 .diseqc_send_master_cmd = cx24116_send_diseqc_msg, 1480 .diseqc_send_master_cmd = cx24116_send_diseqc_msg,
1460 .diseqc_send_burst = cx24116_diseqc_send_burst, 1481 .diseqc_send_burst = cx24116_diseqc_send_burst,
1482 .get_frontend_algo = cx24116_get_algo,
1483 .tune = cx24116_tune,
1461 1484
1462 .set_property = cx24116_set_property, 1485 .set_property = cx24116_set_property,
1463 .get_property = cx24116_get_property, 1486 .get_property = cx24116_get_property,
diff --git a/drivers/media/dvb/frontends/dib7000p.h b/drivers/media/dvb/frontends/dib7000p.h
index 3e8126857127..aab8112e2db2 100644
--- a/drivers/media/dvb/frontends/dib7000p.h
+++ b/drivers/media/dvb/frontends/dib7000p.h
@@ -66,7 +66,8 @@ struct i2c_adapter *dib7000p_get_i2c_master(struct dvb_frontend *fe,
66 return NULL; 66 return NULL;
67} 67}
68 68
69extern int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, 69static inline
70int dib7000p_i2c_enumeration(struct i2c_adapter *i2c,
70 int no_of_demods, u8 default_addr, 71 int no_of_demods, u8 default_addr,
71 struct dib7000p_config cfg[]) 72 struct dib7000p_config cfg[])
72{ 73{
@@ -74,13 +75,15 @@ extern int dib7000p_i2c_enumeration(struct i2c_adapter *i2c,
74 return -ENODEV; 75 return -ENODEV;
75} 76}
76 77
77extern int dib7000p_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val) 78static inline
79int dib7000p_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val)
78{ 80{
79 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 81 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
80 return -ENODEV; 82 return -ENODEV;
81} 83}
82 84
83extern int dib7000p_set_wbd_ref(struct dvb_frontend *fe, u16 value) 85static inline
86int dib7000p_set_wbd_ref(struct dvb_frontend *fe, u16 value)
84{ 87{
85 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 88 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
86 return -ENODEV; 89 return -ENODEV;
diff --git a/drivers/media/dvb/frontends/drx397xD.c b/drivers/media/dvb/frontends/drx397xD.c
index b9ca5c8d2dd9..ec4e08dbc699 100644
--- a/drivers/media/dvb/frontends/drx397xD.c
+++ b/drivers/media/dvb/frontends/drx397xD.c
@@ -39,7 +39,7 @@ static const char mod_name[] = "drx397xD";
39#define F_SET_0D4h 2 39#define F_SET_0D4h 2
40 40
41enum fw_ix { 41enum fw_ix {
42#define _FW_ENTRY(a, b) b 42#define _FW_ENTRY(a, b, c) b
43#include "drx397xD_fw.h" 43#include "drx397xD_fw.h"
44}; 44};
45 45
@@ -72,11 +72,11 @@ static struct {
72 int refcnt; 72 int refcnt;
73 const u8 *data[ARRAY_SIZE(blob_name)]; 73 const u8 *data[ARRAY_SIZE(blob_name)];
74} fw[] = { 74} fw[] = {
75#define _FW_ENTRY(a, b) { \ 75#define _FW_ENTRY(a, b, c) { \
76 .name = a, \ 76 .name = a, \
77 .file = 0, \ 77 .file = 0, \
78 .lock = RW_LOCK_UNLOCKED, \ 78 .lock = __RW_LOCK_UNLOCKED(fw[c].lock), \
79 .refcnt = 0, \ 79 .refcnt = 0, \
80 .data = { } } 80 .data = { } }
81#include "drx397xD_fw.h" 81#include "drx397xD_fw.h"
82}; 82};
diff --git a/drivers/media/dvb/frontends/drx397xD_fw.h b/drivers/media/dvb/frontends/drx397xD_fw.h
index 01de02a81cd4..c8b44c1e807f 100644
--- a/drivers/media/dvb/frontends/drx397xD_fw.h
+++ b/drivers/media/dvb/frontends/drx397xD_fw.h
@@ -18,8 +18,8 @@
18 */ 18 */
19 19
20#ifdef _FW_ENTRY 20#ifdef _FW_ENTRY
21 _FW_ENTRY("drx397xD.A2.fw", DRXD_FW_A2 = 0 ), 21 _FW_ENTRY("drx397xD.A2.fw", DRXD_FW_A2 = 0, DRXD_FW_A2 ),
22 _FW_ENTRY("drx397xD.B1.fw", DRXD_FW_B1 ), 22 _FW_ENTRY("drx397xD.B1.fw", DRXD_FW_B1, DRXD_FW_B1 ),
23#undef _FW_ENTRY 23#undef _FW_ENTRY
24#endif /* _FW_ENTRY */ 24#endif /* _FW_ENTRY */
25 25
diff --git a/drivers/media/dvb/frontends/dvb-pll.c b/drivers/media/dvb/frontends/dvb-pll.c
index ea058153ebfa..9f6349964cda 100644
--- a/drivers/media/dvb/frontends/dvb-pll.c
+++ b/drivers/media/dvb/frontends/dvb-pll.c
@@ -311,7 +311,7 @@ static struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
311 .count = 4, 311 .count = 4,
312 .entries = { 312 .entries = {
313 { 1250000, 500, 0xc4, 0x00}, 313 { 1250000, 500, 0xc4, 0x00},
314 { 1550000, 500, 0xc4, 0x40}, 314 { 1450000, 500, 0xc4, 0x40},
315 { 2050000, 500, 0xc4, 0x80}, 315 { 2050000, 500, 0xc4, 0x80},
316 { 2150000, 500, 0xc4, 0xc0}, 316 { 2150000, 500, 0xc4, 0xc0},
317 }, 317 },
diff --git a/drivers/media/dvb/frontends/lgdt3304.c b/drivers/media/dvb/frontends/lgdt3304.c
new file mode 100644
index 000000000000..469ace5692c6
--- /dev/null
+++ b/drivers/media/dvb/frontends/lgdt3304.c
@@ -0,0 +1,378 @@
1/*
2 * Driver for LG ATSC lgdt3304 driver
3 *
4 * Copyright (C) 2008 Markus Rechberger <mrechberger@sundtek.de>
5 *
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/delay.h>
11#include "dvb_frontend.h"
12#include "lgdt3304.h"
13
14static unsigned int debug = 0;
15module_param(debug, int, 0644);
16MODULE_PARM_DESC(debug,"lgdt3304 debugging (default off)");
17
18#define dprintk(fmt, args...) if (debug) do {\
19 printk("lgdt3304 debug: " fmt, ##args); } while (0)
20
21struct lgdt3304_state
22{
23 struct dvb_frontend frontend;
24 fe_modulation_t current_modulation;
25 __u32 snr;
26 __u32 current_frequency;
27 __u8 addr;
28 struct i2c_adapter *i2c;
29};
30
31static int i2c_write_demod_bytes (struct dvb_frontend *fe, __u8 *buf, int len)
32{
33 struct lgdt3304_state *state = fe->demodulator_priv;
34 struct i2c_msg i2cmsgs = {
35 .addr = state->addr,
36 .flags = 0,
37 .len = 3,
38 .buf = buf
39 };
40 int i;
41 int err;
42
43 for (i=0; i<len-1; i+=3){
44 if((err = i2c_transfer(state->i2c, &i2cmsgs, 1))<0) {
45 printk("%s i2c_transfer error %d\n", __FUNCTION__, err);
46 if (err < 0)
47 return err;
48 else
49 return -EREMOTEIO;
50 }
51 i2cmsgs.buf += 3;
52 }
53 return 0;
54}
55
56static int lgdt3304_i2c_read_reg(struct dvb_frontend *fe, unsigned int reg)
57{
58 struct lgdt3304_state *state = fe->demodulator_priv;
59 struct i2c_msg i2cmsgs[2];
60 int ret;
61 __u8 buf;
62
63 __u8 regbuf[2] = { reg>>8, reg&0xff };
64
65 i2cmsgs[0].addr = state->addr;
66 i2cmsgs[0].flags = 0;
67 i2cmsgs[0].len = 2;
68 i2cmsgs[0].buf = regbuf;
69
70 i2cmsgs[1].addr = state->addr;
71 i2cmsgs[1].flags = I2C_M_RD;
72 i2cmsgs[1].len = 1;
73 i2cmsgs[1].buf = &buf;
74
75 if((ret = i2c_transfer(state->i2c, i2cmsgs, 2))<0) {
76 printk("%s i2c_transfer error %d\n", __FUNCTION__, ret);
77 return ret;
78 }
79
80 return buf;
81}
82
83static int lgdt3304_i2c_write_reg(struct dvb_frontend *fe, int reg, int val)
84{
85 struct lgdt3304_state *state = fe->demodulator_priv;
86 char buffer[3] = { reg>>8, reg&0xff, val };
87 int ret;
88
89 struct i2c_msg i2cmsgs = {
90 .addr = state->addr,
91 .flags = 0,
92 .len = 3,
93 .buf=buffer
94 };
95 ret = i2c_transfer(state->i2c, &i2cmsgs, 1);
96 if (ret != 1) {
97 printk("%s i2c_transfer error %d\n", __FUNCTION__, ret);
98 return ret;
99 }
100
101 return 0;
102}
103
104
105static int lgdt3304_soft_Reset(struct dvb_frontend *fe)
106{
107 lgdt3304_i2c_write_reg(fe, 0x0002, 0x9a);
108 lgdt3304_i2c_write_reg(fe, 0x0002, 0x9b);
109 mdelay(200);
110 return 0;
111}
112
113static int lgdt3304_set_parameters(struct dvb_frontend *fe, struct dvb_frontend_parameters *param) {
114 int err = 0;
115
116 static __u8 lgdt3304_vsb8_data[] = {
117 /* 16bit , 8bit */
118 /* regs , val */
119 0x00, 0x00, 0x02,
120 0x00, 0x00, 0x13,
121 0x00, 0x0d, 0x02,
122 0x00, 0x0e, 0x02,
123 0x00, 0x12, 0x32,
124 0x00, 0x13, 0xc4,
125 0x01, 0x12, 0x17,
126 0x01, 0x13, 0x15,
127 0x01, 0x14, 0x18,
128 0x01, 0x15, 0xff,
129 0x01, 0x16, 0x2c,
130 0x02, 0x14, 0x67,
131 0x02, 0x24, 0x8d,
132 0x04, 0x27, 0x12,
133 0x04, 0x28, 0x4f,
134 0x03, 0x08, 0x80,
135 0x03, 0x09, 0x00,
136 0x03, 0x0d, 0x00,
137 0x03, 0x0e, 0x1c,
138 0x03, 0x14, 0xe1,
139 0x05, 0x0e, 0x5b,
140 };
141
142 /* not yet tested .. */
143 static __u8 lgdt3304_qam64_data[] = {
144 /* 16bit , 8bit */
145 /* regs , val */
146 0x00, 0x00, 0x18,
147 0x00, 0x0d, 0x02,
148 //0x00, 0x0e, 0x02,
149 0x00, 0x12, 0x2a,
150 0x00, 0x13, 0x00,
151 0x03, 0x14, 0xe3,
152 0x03, 0x0e, 0x1c,
153 0x03, 0x08, 0x66,
154 0x03, 0x09, 0x66,
155 0x03, 0x0a, 0x08,
156 0x03, 0x0b, 0x9b,
157 0x05, 0x0e, 0x5b,
158 };
159
160
161 /* tested with KWorld a340 */
162 static __u8 lgdt3304_qam256_data[] = {
163 /* 16bit , 8bit */
164 /* regs , val */
165 0x00, 0x00, 0x01, //0x19,
166 0x00, 0x12, 0x2a,
167 0x00, 0x13, 0x80,
168 0x00, 0x0d, 0x02,
169 0x03, 0x14, 0xe3,
170
171 0x03, 0x0e, 0x1c,
172 0x03, 0x08, 0x66,
173 0x03, 0x09, 0x66,
174 0x03, 0x0a, 0x08,
175 0x03, 0x0b, 0x9b,
176
177 0x03, 0x0d, 0x14,
178 //0x05, 0x0e, 0x5b,
179 0x01, 0x06, 0x4a,
180 0x01, 0x07, 0x3d,
181 0x01, 0x08, 0x70,
182 0x01, 0x09, 0xa3,
183
184 0x05, 0x04, 0xfd,
185
186 0x00, 0x0d, 0x82,
187
188 0x05, 0x0e, 0x5b,
189
190 0x05, 0x0e, 0x5b,
191
192 0x00, 0x02, 0x9a,
193
194 0x00, 0x02, 0x9b,
195
196 0x00, 0x00, 0x01,
197 0x00, 0x12, 0x2a,
198 0x00, 0x13, 0x80,
199 0x00, 0x0d, 0x02,
200 0x03, 0x14, 0xe3,
201
202 0x03, 0x0e, 0x1c,
203 0x03, 0x08, 0x66,
204 0x03, 0x09, 0x66,
205 0x03, 0x0a, 0x08,
206 0x03, 0x0b, 0x9b,
207
208 0x03, 0x0d, 0x14,
209 0x01, 0x06, 0x4a,
210 0x01, 0x07, 0x3d,
211 0x01, 0x08, 0x70,
212 0x01, 0x09, 0xa3,
213
214 0x05, 0x04, 0xfd,
215
216 0x00, 0x0d, 0x82,
217
218 0x05, 0x0e, 0x5b,
219 };
220
221 struct lgdt3304_state *state = fe->demodulator_priv;
222 if (state->current_modulation != param->u.vsb.modulation) {
223 switch(param->u.vsb.modulation) {
224 case VSB_8:
225 err = i2c_write_demod_bytes(fe, lgdt3304_vsb8_data,
226 sizeof(lgdt3304_vsb8_data));
227 break;
228 case QAM_64:
229 err = i2c_write_demod_bytes(fe, lgdt3304_qam64_data,
230 sizeof(lgdt3304_qam64_data));
231 break;
232 case QAM_256:
233 err = i2c_write_demod_bytes(fe, lgdt3304_qam256_data,
234 sizeof(lgdt3304_qam256_data));
235 break;
236 default:
237 break;
238 }
239
240 if (err) {
241 printk("%s error setting modulation\n", __FUNCTION__);
242 } else {
243 state->current_modulation = param->u.vsb.modulation;
244 }
245 }
246 state->current_frequency = param->frequency;
247
248 lgdt3304_soft_Reset(fe);
249
250
251 if (fe->ops.tuner_ops.set_params)
252 fe->ops.tuner_ops.set_params(fe, param);
253
254 return 0;
255}
256
257static int lgdt3304_init(struct dvb_frontend *fe) {
258 return 0;
259}
260
261static int lgdt3304_sleep(struct dvb_frontend *fe) {
262 return 0;
263}
264
265
266static int lgdt3304_read_status(struct dvb_frontend *fe, fe_status_t *status)
267{
268 struct lgdt3304_state *state = fe->demodulator_priv;
269 int r011d;
270 int qam_lck;
271
272 *status = 0;
273 dprintk("lgdt read status\n");
274
275 r011d = lgdt3304_i2c_read_reg(fe, 0x011d);
276
277 dprintk("%02x\n", r011d);
278
279 switch(state->current_modulation) {
280 case VSB_8:
281 if (r011d & 0x80) {
282 dprintk("VSB Locked\n");
283 *status |= FE_HAS_CARRIER;
284 *status |= FE_HAS_LOCK;
285 *status |= FE_HAS_SYNC;
286 *status |= FE_HAS_SIGNAL;
287 }
288 break;
289 case QAM_64:
290 case QAM_256:
291 qam_lck = r011d & 0x7;
292 switch(qam_lck) {
293 case 0x0: dprintk("Unlock\n");
294 break;
295 case 0x4: dprintk("1st Lock in acquisition state\n");
296 break;
297 case 0x6: dprintk("2nd Lock in acquisition state\n");
298 break;
299 case 0x7: dprintk("Final Lock in good reception state\n");
300 *status |= FE_HAS_CARRIER;
301 *status |= FE_HAS_LOCK;
302 *status |= FE_HAS_SYNC;
303 *status |= FE_HAS_SIGNAL;
304 break;
305 }
306 break;
307 default:
308 printk("%s unhandled modulation\n", __FUNCTION__);
309 }
310
311
312 return 0;
313}
314
315static int lgdt3304_read_ber(struct dvb_frontend *fe, __u32 *ber)
316{
317 dprintk("read ber\n");
318 return 0;
319}
320
321static int lgdt3304_read_snr(struct dvb_frontend *fe, __u16 *snr)
322{
323 dprintk("read snr\n");
324 return 0;
325}
326
327static int lgdt3304_read_ucblocks(struct dvb_frontend *fe, __u32 *ucblocks)
328{
329 dprintk("read ucblocks\n");
330 return 0;
331}
332
333static void lgdt3304_release(struct dvb_frontend *fe)
334{
335 struct lgdt3304_state *state = (struct lgdt3304_state *)fe->demodulator_priv;
336 kfree(state);
337}
338
339static struct dvb_frontend_ops demod_lgdt3304={
340 .info = {
341 .name = "LG 3304",
342 .type = FE_ATSC,
343 .frequency_min = 54000000,
344 .frequency_max = 858000000,
345 .frequency_stepsize = 62500,
346 .symbol_rate_min = 5056941,
347 .symbol_rate_max = 10762000,
348 .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
349 },
350 .init = lgdt3304_init,
351 .sleep = lgdt3304_sleep,
352 .set_frontend = lgdt3304_set_parameters,
353 .read_snr = lgdt3304_read_snr,
354 .read_ber = lgdt3304_read_ber,
355 .read_status = lgdt3304_read_status,
356 .read_ucblocks = lgdt3304_read_ucblocks,
357 .release = lgdt3304_release,
358};
359
360struct dvb_frontend* lgdt3304_attach(const struct lgdt3304_config *config,
361 struct i2c_adapter *i2c)
362{
363
364 struct lgdt3304_state *state;
365 state = kzalloc(sizeof(struct lgdt3304_state), GFP_KERNEL);
366 memset(state, 0x0, sizeof(struct lgdt3304_state));
367 state->addr = config->i2c_address;
368 state->i2c = i2c;
369
370 memcpy(&state->frontend.ops, &demod_lgdt3304, sizeof(struct dvb_frontend_ops));
371 state->frontend.demodulator_priv = state;
372 return &state->frontend;
373}
374
375EXPORT_SYMBOL_GPL(lgdt3304_attach);
376MODULE_AUTHOR("Markus Rechberger <mrechberger@empiatech.com>");
377MODULE_DESCRIPTION("LGE LGDT3304 DVB-T demodulator driver");
378MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/lgdt3304.h b/drivers/media/dvb/frontends/lgdt3304.h
new file mode 100644
index 000000000000..fc409fe59acb
--- /dev/null
+++ b/drivers/media/dvb/frontends/lgdt3304.h
@@ -0,0 +1,45 @@
1/*
2 * Driver for DVB-T lgdt3304 demodulator
3 *
4 * Copyright (C) 2008 Markus Rechberger <mrechberger@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
20 */
21
22#ifndef LGDT3304_H
23#define LGDT3304_H
24
25#include <linux/dvb/frontend.h>
26
27struct lgdt3304_config
28{
29 /* demodulator's I2C address */
30 u8 i2c_address;
31};
32
33#if defined(CONFIG_DVB_LGDT3304) || (defined(CONFIG_DVB_LGDT3304_MODULE) && defined(MODULE))
34extern struct dvb_frontend* lgdt3304_attach(const struct lgdt3304_config *config,
35 struct i2c_adapter *i2c);
36#else
37static inline struct dvb_frontend* lgdt3304_attach(const struct lgdt3304_config *config,
38 struct i2c_adapter *i2c)
39{
40 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
41 return NULL;
42}
43#endif /* CONFIG_DVB_LGDT */
44
45#endif /* LGDT3304_H */
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c
index 40644aacffcb..66e2dd6d6fe4 100644
--- a/drivers/media/dvb/frontends/s5h1411.c
+++ b/drivers/media/dvb/frontends/s5h1411.c
@@ -874,6 +874,9 @@ struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
874 /* Note: Leaving the I2C gate open here. */ 874 /* Note: Leaving the I2C gate open here. */
875 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 1); 875 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 1);
876 876
877 /* Put the device into low-power mode until first use */
878 s5h1411_set_powerstate(&state->frontend, 1);
879
877 return &state->frontend; 880 return &state->frontend;
878 881
879error: 882error:
diff --git a/drivers/media/dvb/frontends/s921_core.c b/drivers/media/dvb/frontends/s921_core.c
new file mode 100644
index 000000000000..974b52be9aea
--- /dev/null
+++ b/drivers/media/dvb/frontends/s921_core.c
@@ -0,0 +1,216 @@
1/*
2 * Driver for Sharp s921 driver
3 *
4 * Copyright (C) 2008 Markus Rechberger <mrechberger@sundtek.de>
5 *
6 */
7
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/delay.h>
12#include "s921_core.h"
13
14static int s921_isdb_init(struct s921_isdb_t *dev);
15static int s921_isdb_set_parameters(struct s921_isdb_t *dev, struct s921_isdb_t_transmission_mode_params *params);
16static int s921_isdb_tune(struct s921_isdb_t *dev, struct s921_isdb_t_tune_params *params);
17static int s921_isdb_get_status(struct s921_isdb_t *dev, void *data);
18
19static u8 init_table[]={ 0x01, 0x40, 0x02, 0x00, 0x03, 0x40, 0x04, 0x01,
20 0x05, 0x00, 0x06, 0x00, 0x07, 0x00, 0x08, 0x00,
21 0x09, 0x00, 0x0a, 0x00, 0x0b, 0x5a, 0x0c, 0x00,
22 0x0d, 0x00, 0x0f, 0x00, 0x13, 0x1b, 0x14, 0x80,
23 0x15, 0x40, 0x17, 0x70, 0x18, 0x01, 0x19, 0x12,
24 0x1a, 0x01, 0x1b, 0x12, 0x1c, 0xa0, 0x1d, 0x00,
25 0x1e, 0x0a, 0x1f, 0x08, 0x20, 0x40, 0x21, 0xff,
26 0x22, 0x4c, 0x23, 0x4e, 0x24, 0x4c, 0x25, 0x00,
27 0x26, 0x00, 0x27, 0xf4, 0x28, 0x60, 0x29, 0x88,
28 0x2a, 0x40, 0x2b, 0x40, 0x2c, 0xff, 0x2d, 0x00,
29 0x2e, 0xff, 0x2f, 0x00, 0x30, 0x20, 0x31, 0x06,
30 0x32, 0x0c, 0x34, 0x0f, 0x37, 0xfe, 0x38, 0x00,
31 0x39, 0x63, 0x3a, 0x10, 0x3b, 0x10, 0x47, 0x00,
32 0x49, 0xe5, 0x4b, 0x00, 0x50, 0xc0, 0x52, 0x20,
33 0x54, 0x5a, 0x55, 0x5b, 0x56, 0x40, 0x57, 0x70,
34 0x5c, 0x50, 0x5d, 0x00, 0x62, 0x17, 0x63, 0x2f,
35 0x64, 0x6f, 0x68, 0x00, 0x69, 0x89, 0x6a, 0x00,
36 0x6b, 0x00, 0x6c, 0x00, 0x6d, 0x00, 0x6e, 0x00,
37 0x70, 0x00, 0x71, 0x00, 0x75, 0x00, 0x76, 0x30,
38 0x77, 0x01, 0xaf, 0x00, 0xb0, 0xa0, 0xb2, 0x3d,
39 0xb3, 0x25, 0xb4, 0x8b, 0xb5, 0x4b, 0xb6, 0x3f,
40 0xb7, 0xff, 0xb8, 0xff, 0xb9, 0xfc, 0xba, 0x00,
41 0xbb, 0x00, 0xbc, 0x00, 0xd0, 0x30, 0xe4, 0x84,
42 0xf0, 0x48, 0xf1, 0x19, 0xf2, 0x5a, 0xf3, 0x8e,
43 0xf4, 0x2d, 0xf5, 0x07, 0xf6, 0x5a, 0xf7, 0xba,
44 0xf8, 0xd7 };
45
46static u8 c_table[]={ 0x58, 0x8a, 0x7b, 0x59, 0x8c, 0x7b, 0x5a, 0x8e, 0x5b,
47 0x5b, 0x90, 0x5b, 0x5c, 0x92, 0x5b, 0x5d, 0x94, 0x5b,
48 0x5e, 0x96, 0x5b, 0x5f, 0x98, 0x3b, 0x60, 0x9a, 0x3b,
49 0x61, 0x9c, 0x3b, 0x62, 0x9e, 0x3b, 0x63, 0xa0, 0x3b,
50 0x64, 0xa2, 0x1b, 0x65, 0xa4, 0x1b, 0x66, 0xa6, 0x1b,
51 0x67, 0xa8, 0x1b, 0x68, 0xaa, 0x1b, 0x69, 0xac, 0x1b,
52 0x6a, 0xae, 0x1b, 0x6b, 0xb0, 0x1b, 0x6c, 0xb2, 0x1b,
53 0x6d, 0xb4, 0xfb, 0x6e, 0xb6, 0xfb, 0x6f, 0xb8, 0xfb,
54 0x70, 0xba, 0xfb, 0x71, 0xbc, 0xdb, 0x72, 0xbe, 0xdb,
55 0x73, 0xc0, 0xdb, 0x74, 0xc2, 0xdb, 0x75, 0xc4, 0xdb,
56 0x76, 0xc6, 0xdb, 0x77, 0xc8, 0xbb, 0x78, 0xca, 0xbb,
57 0x79, 0xcc, 0xbb, 0x7a, 0xce, 0xbb, 0x7b, 0xd0, 0xbb,
58 0x7c, 0xd2, 0xbb, 0x7d, 0xd4, 0xbb, 0x7e, 0xd6, 0xbb,
59 0x7f, 0xd8, 0xbb, 0x80, 0xda, 0x9b, 0x81, 0xdc, 0x9b,
60 0x82, 0xde, 0x9b, 0x83, 0xe0, 0x9b, 0x84, 0xe2, 0x9b,
61 0x85, 0xe4, 0x9b, 0x86, 0xe6, 0x9b, 0x87, 0xe8, 0x9b,
62 0x88, 0xea, 0x9b, 0x89, 0xec, 0x9b };
63
64int s921_isdb_cmd(struct s921_isdb_t *dev, u32 cmd, void *data) {
65 switch(cmd) {
66 case ISDB_T_CMD_INIT:
67 s921_isdb_init(dev);
68 break;
69 case ISDB_T_CMD_SET_PARAM:
70 s921_isdb_set_parameters(dev, data);
71 break;
72 case ISDB_T_CMD_TUNE:
73 s921_isdb_tune(dev, data);
74 break;
75 case ISDB_T_CMD_GET_STATUS:
76 s921_isdb_get_status(dev, data);
77 break;
78 default:
79 printk("unhandled command\n");
80 return -EINVAL;
81 }
82 return 0;
83}
84
85static int s921_isdb_init(struct s921_isdb_t *dev) {
86 unsigned int i;
87 unsigned int ret;
88 printk("isdb_init\n");
89 for (i = 0; i < sizeof(init_table); i+=2) {
90 ret = dev->i2c_write(dev->priv_dev, init_table[i], init_table[i+1]);
91 if (ret != 0) {
92 printk("i2c write failed\n");
93 return ret;
94 }
95 }
96 return 0;
97}
98
99static int s921_isdb_set_parameters(struct s921_isdb_t *dev, struct s921_isdb_t_transmission_mode_params *params) {
100
101 int ret;
102 /* auto is sufficient for now, lateron this should be reflected in an extra interface */
103
104
105
106 ret = dev->i2c_write(dev->priv_dev, 0xb0, 0xa0); //mod_b2);
107 ret = dev->i2c_write(dev->priv_dev, 0xb2, 0x3d); //mod_b2);
108
109 if (ret < 0)
110 return -EINVAL;
111
112 ret = dev->i2c_write(dev->priv_dev, 0xb3, 0x25); //mod_b3);
113 if (ret < 0)
114 return -EINVAL;
115
116 ret = dev->i2c_write(dev->priv_dev, 0xb4, 0x8b); //mod_b4);
117 if (ret < 0)
118 return -EINVAL;
119
120 ret = dev->i2c_write(dev->priv_dev, 0xb5, 0x4b); //mod_b5);
121 if (ret < 0)
122 return -EINVAL;
123
124 ret = dev->i2c_write(dev->priv_dev, 0xb6, 0x3f); //mod_b6);
125 if (ret < 0)
126 return -EINVAL;
127
128 ret = dev->i2c_write(dev->priv_dev, 0xb7, 0x3f); //mod_b7);
129 if (ret < 0)
130 return -EINVAL;
131
132 return E_OK;
133}
134
135static int s921_isdb_tune(struct s921_isdb_t *dev, struct s921_isdb_t_tune_params *params) {
136
137 int ret;
138 int index;
139
140 index = (params->frequency - 473143000)/6000000;
141
142 if (index > 48) {
143 return -EINVAL;
144 }
145
146 dev->i2c_write(dev->priv_dev, 0x47, 0x60);
147
148 ret = dev->i2c_write(dev->priv_dev, 0x68, 0x00);
149 if (ret < 0)
150 return -EINVAL;
151
152 ret = dev->i2c_write(dev->priv_dev, 0x69, 0x89);
153 if (ret < 0)
154 return -EINVAL;
155
156 ret = dev->i2c_write(dev->priv_dev, 0xf0, 0x48);
157 if (ret < 0)
158 return -EINVAL;
159
160 ret = dev->i2c_write(dev->priv_dev, 0xf1, 0x19);
161 if (ret < 0)
162 return -EINVAL;
163
164 ret = dev->i2c_write(dev->priv_dev, 0xf2, c_table[index*3]);
165 if (ret < 0)
166 return -EINVAL;
167
168 ret = dev->i2c_write(dev->priv_dev, 0xf3, c_table[index*3+1]);
169 if (ret < 0)
170 return -EINVAL;
171
172 ret = dev->i2c_write(dev->priv_dev, 0xf4, c_table[index*3+2]);
173 if (ret < 0)
174 return -EINVAL;
175
176 ret = dev->i2c_write(dev->priv_dev, 0xf5, 0xae);
177 if (ret < 0)
178 return -EINVAL;
179
180 ret = dev->i2c_write(dev->priv_dev, 0xf6, 0xb7);
181 if (ret < 0)
182 return -EINVAL;
183
184 ret = dev->i2c_write(dev->priv_dev, 0xf7, 0xba);
185 if (ret < 0)
186 return -EINVAL;
187
188 ret = dev->i2c_write(dev->priv_dev, 0xf8, 0xd7);
189 if (ret < 0)
190 return -EINVAL;
191
192 ret = dev->i2c_write(dev->priv_dev, 0x68, 0x0a);
193 if (ret < 0)
194 return -EINVAL;
195
196 ret = dev->i2c_write(dev->priv_dev, 0x69, 0x09);
197 if (ret < 0)
198 return -EINVAL;
199
200 dev->i2c_write(dev->priv_dev, 0x01, 0x40);
201 return 0;
202}
203
204static int s921_isdb_get_status(struct s921_isdb_t *dev, void *data) {
205 unsigned int *ret = (unsigned int*)data;
206 u8 ifagc_dt;
207 u8 rfagc_dt;
208
209 mdelay(10);
210 ifagc_dt = dev->i2c_read(dev->priv_dev, 0x81);
211 rfagc_dt = dev->i2c_read(dev->priv_dev, 0x82);
212 if (rfagc_dt == 0x40) {
213 *ret = 1;
214 }
215 return 0;
216}
diff --git a/drivers/media/dvb/frontends/s921_core.h b/drivers/media/dvb/frontends/s921_core.h
new file mode 100644
index 000000000000..de2f10a44e72
--- /dev/null
+++ b/drivers/media/dvb/frontends/s921_core.h
@@ -0,0 +1,114 @@
1#ifndef _S921_CORE_H
2#define _S921_CORE_H
3//#define u8 unsigned int
4//#define u32 unsigned int
5
6
7
8//#define EINVAL -1
9#define E_OK 0
10
11struct s921_isdb_t {
12 void *priv_dev;
13 int (*i2c_write)(void *dev, u8 reg, u8 val);
14 int (*i2c_read)(void *dev, u8 reg);
15};
16
17#define ISDB_T_CMD_INIT 0
18#define ISDB_T_CMD_SET_PARAM 1
19#define ISDB_T_CMD_TUNE 2
20#define ISDB_T_CMD_GET_STATUS 3
21
22struct s921_isdb_t_tune_params {
23 u32 frequency;
24};
25
26struct s921_isdb_t_status {
27};
28
29struct s921_isdb_t_transmission_mode_params {
30 u8 mode;
31 u8 layer_a_mode;
32#define ISDB_T_LA_MODE_1 0
33#define ISDB_T_LA_MODE_2 1
34#define ISDB_T_LA_MODE_3 2
35 u8 layer_a_carrier_modulation;
36#define ISDB_T_LA_CM_DQPSK 0
37#define ISDB_T_LA_CM_QPSK 1
38#define ISDB_T_LA_CM_16QAM 2
39#define ISDB_T_LA_CM_64QAM 3
40#define ISDB_T_LA_CM_NOLAYER 4
41 u8 layer_a_code_rate;
42#define ISDB_T_LA_CR_1_2 0
43#define ISDB_T_LA_CR_2_3 1
44#define ISDB_T_LA_CR_3_4 2
45#define ISDB_T_LA_CR_5_6 4
46#define ISDB_T_LA_CR_7_8 8
47#define ISDB_T_LA_CR_NOLAYER 16
48 u8 layer_a_time_interleave;
49#define ISDB_T_LA_TI_0 0
50#define ISDB_T_LA_TI_1 1
51#define ISDB_T_LA_TI_2 2
52#define ISDB_T_LA_TI_4 4
53#define ISDB_T_LA_TI_8 8
54#define ISDB_T_LA_TI_16 16
55#define ISDB_T_LA_TI_32 32
56 u8 layer_a_nseg;
57
58 u8 layer_b_mode;
59#define ISDB_T_LB_MODE_1 0
60#define ISDB_T_LB_MODE_2 1
61#define ISDB_T_LB_MODE_3 2
62 u8 layer_b_carrier_modulation;
63#define ISDB_T_LB_CM_DQPSK 0
64#define ISDB_T_LB_CM_QPSK 1
65#define ISDB_T_LB_CM_16QAM 2
66#define ISDB_T_LB_CM_64QAM 3
67#define ISDB_T_LB_CM_NOLAYER 4
68 u8 layer_b_code_rate;
69#define ISDB_T_LB_CR_1_2 0
70#define ISDB_T_LB_CR_2_3 1
71#define ISDB_T_LB_CR_3_4 2
72#define ISDB_T_LB_CR_5_6 4
73#define ISDB_T_LB_CR_7_8 8
74#define ISDB_T_LB_CR_NOLAYER 16
75 u8 layer_b_time_interleave;
76#define ISDB_T_LB_TI_0 0
77#define ISDB_T_LB_TI_1 1
78#define ISDB_T_LB_TI_2 2
79#define ISDB_T_LB_TI_4 4
80#define ISDB_T_LB_TI_8 8
81#define ISDB_T_LB_TI_16 16
82#define ISDB_T_LB_TI_32 32
83 u8 layer_b_nseg;
84
85 u8 layer_c_mode;
86#define ISDB_T_LC_MODE_1 0
87#define ISDB_T_LC_MODE_2 1
88#define ISDB_T_LC_MODE_3 2
89 u8 layer_c_carrier_modulation;
90#define ISDB_T_LC_CM_DQPSK 0
91#define ISDB_T_LC_CM_QPSK 1
92#define ISDB_T_LC_CM_16QAM 2
93#define ISDB_T_LC_CM_64QAM 3
94#define ISDB_T_LC_CM_NOLAYER 4
95 u8 layer_c_code_rate;
96#define ISDB_T_LC_CR_1_2 0
97#define ISDB_T_LC_CR_2_3 1
98#define ISDB_T_LC_CR_3_4 2
99#define ISDB_T_LC_CR_5_6 4
100#define ISDB_T_LC_CR_7_8 8
101#define ISDB_T_LC_CR_NOLAYER 16
102 u8 layer_c_time_interleave;
103#define ISDB_T_LC_TI_0 0
104#define ISDB_T_LC_TI_1 1
105#define ISDB_T_LC_TI_2 2
106#define ISDB_T_LC_TI_4 4
107#define ISDB_T_LC_TI_8 8
108#define ISDB_T_LC_TI_16 16
109#define ISDB_T_LC_TI_32 32
110 u8 layer_c_nseg;
111};
112
113int s921_isdb_cmd(struct s921_isdb_t *dev, u32 cmd, void *data);
114#endif
diff --git a/drivers/media/dvb/frontends/s921_module.c b/drivers/media/dvb/frontends/s921_module.c
new file mode 100644
index 000000000000..3cbb9cb2cf47
--- /dev/null
+++ b/drivers/media/dvb/frontends/s921_module.c
@@ -0,0 +1,190 @@
1/*
2 * Driver for Sharp s921 driver
3 *
4 * Copyright (C) 2008 Markus Rechberger <mrechberger@sundtek.de>
5 *
6 * All rights reserved.
7 *
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/delay.h>
13#include "dvb_frontend.h"
14#include "s921_module.h"
15#include "s921_core.h"
16
17static unsigned int debug = 0;
18module_param(debug, int, 0644);
19MODULE_PARM_DESC(debug,"s921 debugging (default off)");
20
21#define dprintk(fmt, args...) if (debug) do {\
22 printk("s921 debug: " fmt, ##args); } while (0)
23
24struct s921_state
25{
26 struct dvb_frontend frontend;
27 fe_modulation_t current_modulation;
28 __u32 snr;
29 __u32 current_frequency;
30 __u8 addr;
31 struct s921_isdb_t dev;
32 struct i2c_adapter *i2c;
33};
34
35static int s921_set_parameters(struct dvb_frontend *fe, struct dvb_frontend_parameters *param) {
36 struct s921_state *state = (struct s921_state *)fe->demodulator_priv;
37 struct s921_isdb_t_transmission_mode_params params;
38 struct s921_isdb_t_tune_params tune_params;
39
40 tune_params.frequency = param->frequency;
41 s921_isdb_cmd(&state->dev, ISDB_T_CMD_SET_PARAM, &params);
42 s921_isdb_cmd(&state->dev, ISDB_T_CMD_TUNE, &tune_params);
43 mdelay(100);
44 return 0;
45}
46
47static int s921_init(struct dvb_frontend *fe) {
48 printk("s921 init\n");
49 return 0;
50}
51
52static int s921_sleep(struct dvb_frontend *fe) {
53 printk("s921 sleep\n");
54 return 0;
55}
56
57static int s921_read_status(struct dvb_frontend *fe, fe_status_t *status)
58{
59 struct s921_state *state = (struct s921_state *)fe->demodulator_priv;
60 unsigned int ret;
61 mdelay(5);
62 s921_isdb_cmd(&state->dev, ISDB_T_CMD_GET_STATUS, &ret);
63 *status = 0;
64
65 printk("status: %02x\n", ret);
66 if (ret == 1) {
67 *status |= FE_HAS_CARRIER;
68 *status |= FE_HAS_VITERBI;
69 *status |= FE_HAS_LOCK;
70 *status |= FE_HAS_SYNC;
71 *status |= FE_HAS_SIGNAL;
72 }
73
74 return 0;
75}
76
77static int s921_read_ber(struct dvb_frontend *fe, __u32 *ber)
78{
79 dprintk("read ber\n");
80 return 0;
81}
82
83static int s921_read_snr(struct dvb_frontend *fe, __u16 *snr)
84{
85 dprintk("read snr\n");
86 return 0;
87}
88
89static int s921_read_ucblocks(struct dvb_frontend *fe, __u32 *ucblocks)
90{
91 dprintk("read ucblocks\n");
92 return 0;
93}
94
95static void s921_release(struct dvb_frontend *fe)
96{
97 struct s921_state *state = (struct s921_state *)fe->demodulator_priv;
98 kfree(state);
99}
100
101static struct dvb_frontend_ops demod_s921={
102 .info = {
103 .name = "SHARP S921",
104 .type = FE_OFDM,
105 .frequency_min = 473143000,
106 .frequency_max = 767143000,
107 .frequency_stepsize = 6000000,
108 .frequency_tolerance = 0,
109 .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
110 FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
111 FE_CAN_FEC_AUTO |
112 FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
113 FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO |
114 FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER |
115 FE_CAN_MUTE_TS
116 },
117 .init = s921_init,
118 .sleep = s921_sleep,
119 .set_frontend = s921_set_parameters,
120 .read_snr = s921_read_snr,
121 .read_ber = s921_read_ber,
122 .read_status = s921_read_status,
123 .read_ucblocks = s921_read_ucblocks,
124 .release = s921_release,
125};
126
127static int s921_write(void *dev, u8 reg, u8 val) {
128 struct s921_state *state = dev;
129 char buf[2]={reg,val};
130 int err;
131 struct i2c_msg i2cmsgs = {
132 .addr = state->addr,
133 .flags = 0,
134 .len = 2,
135 .buf = buf
136 };
137
138 if((err = i2c_transfer(state->i2c, &i2cmsgs, 1))<0) {
139 printk("%s i2c_transfer error %d\n", __FUNCTION__, err);
140 if (err < 0)
141 return err;
142 else
143 return -EREMOTEIO;
144 }
145
146 return 0;
147}
148
149static int s921_read(void *dev, u8 reg) {
150 struct s921_state *state = dev;
151 u8 b1;
152 int ret;
153 struct i2c_msg msg[2] = { { .addr = state->addr,
154 .flags = 0,
155 .buf = &reg, .len = 1 },
156 { .addr = state->addr,
157 .flags = I2C_M_RD,
158 .buf = &b1, .len = 1 } };
159
160 ret = i2c_transfer(state->i2c, msg, 2);
161 if (ret != 2)
162 return ret;
163 return b1;
164}
165
166struct dvb_frontend* s921_attach(const struct s921_config *config,
167 struct i2c_adapter *i2c)
168{
169
170 struct s921_state *state;
171 state = kzalloc(sizeof(struct s921_state), GFP_KERNEL);
172 memset(state, 0x0, sizeof(struct s921_state));
173
174 state->addr = config->i2c_address;
175 state->i2c = i2c;
176 state->dev.i2c_write = &s921_write;
177 state->dev.i2c_read = &s921_read;
178 state->dev.priv_dev = state;
179
180 s921_isdb_cmd(&state->dev, ISDB_T_CMD_INIT, NULL);
181
182 memcpy(&state->frontend.ops, &demod_s921, sizeof(struct dvb_frontend_ops));
183 state->frontend.demodulator_priv = state;
184 return &state->frontend;
185}
186
187EXPORT_SYMBOL_GPL(s921_attach);
188MODULE_AUTHOR("Markus Rechberger <mrechberger@empiatech.com>");
189MODULE_DESCRIPTION("Sharp S921 ISDB-T 1Seg");
190MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/s921_module.h b/drivers/media/dvb/frontends/s921_module.h
new file mode 100644
index 000000000000..78660424ba95
--- /dev/null
+++ b/drivers/media/dvb/frontends/s921_module.h
@@ -0,0 +1,49 @@
1/*
2 * Driver for DVB-T s921 demodulator
3 *
4 * Copyright (C) 2008 Markus Rechberger <mrechberger@sundtek.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
20 */
21
22#ifndef S921_MODULE_H
23#define S921_MODULE_H
24
25#include <linux/dvb/frontend.h>
26#include "s921_core.h"
27
28int s921_isdb_init(struct s921_isdb_t *dev);
29int s921_isdb_cmd(struct s921_isdb_t *dev, u32 cmd, void *data);
30
31struct s921_config
32{
33 /* demodulator's I2C address */
34 u8 i2c_address;
35};
36
37#if defined(CONFIG_DVB_S921) || (defined(CONFIG_DVB_S921_MODULE) && defined(MODULE))
38extern struct dvb_frontend* s921_attach(const struct s921_config *config,
39 struct i2c_adapter *i2c);
40#else
41static inline struct dvb_frontend* s921_attach(const struct s921_config *config,
42 struct i2c_adapter *i2c)
43{
44 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
45 return NULL;
46}
47#endif /* CONFIG_DVB_S921 */
48
49#endif /* S921_H */
diff --git a/drivers/media/dvb/frontends/si21xx.c b/drivers/media/dvb/frontends/si21xx.c
index 3ddbe69c45ce..0bd16af8a6cd 100644
--- a/drivers/media/dvb/frontends/si21xx.c
+++ b/drivers/media/dvb/frontends/si21xx.c
@@ -8,7 +8,6 @@
8* (at your option) any later version. 8* (at your option) any later version.
9* 9*
10*/ 10*/
11#include <linux/version.h>
12#include <linux/init.h> 11#include <linux/init.h>
13#include <linux/kernel.h> 12#include <linux/kernel.h>
14#include <linux/module.h> 13#include <linux/module.h>
diff --git a/drivers/media/dvb/frontends/stb0899_algo.c b/drivers/media/dvb/frontends/stb0899_algo.c
new file mode 100644
index 000000000000..ced9b7ae7d50
--- /dev/null
+++ b/drivers/media/dvb/frontends/stb0899_algo.c
@@ -0,0 +1,1519 @@
1/*
2 STB0899 Multistandard Frontend driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#include "stb0899_drv.h"
23#include "stb0899_priv.h"
24#include "stb0899_reg.h"
25
26inline u32 stb0899_do_div(u64 n, u32 d)
27{
28 /* wrap do_div() for ease of use */
29
30 do_div(n, d);
31 return n;
32}
33
34/*
35 * stb0899_calc_srate
36 * Compute symbol rate
37 */
38static u32 stb0899_calc_srate(u32 master_clk, u8 *sfr)
39{
40 u64 tmp;
41
42 /* srate = (SFR * master_clk) >> 20 */
43
44 /* sfr is of size 20 bit, stored with an offset of 4 bit */
45 tmp = (((u32)sfr[0]) << 16) | (((u32)sfr[1]) << 8) | sfr[2];
46 tmp &= ~0xf;
47 tmp *= master_clk;
48 tmp >>= 24;
49
50 return tmp;
51}
52
53/*
54 * stb0899_get_srate
55 * Get the current symbol rate
56 */
57u32 stb0899_get_srate(struct stb0899_state *state)
58{
59 struct stb0899_internal *internal = &state->internal;
60 u8 sfr[3];
61
62 stb0899_read_regs(state, STB0899_SFRH, sfr, 3);
63
64 return stb0899_calc_srate(internal->master_clk, sfr);
65}
66
67/*
68 * stb0899_set_srate
69 * Set symbol frequency
70 * MasterClock: master clock frequency (hz)
71 * SymbolRate: symbol rate (bauds)
72 * return symbol frequency
73 */
74static u32 stb0899_set_srate(struct stb0899_state *state, u32 master_clk, u32 srate)
75{
76 u32 tmp;
77 u8 sfr[3];
78
79 dprintk(state->verbose, FE_DEBUG, 1, "-->");
80 /*
81 * in order to have the maximum precision, the symbol rate entered into
82 * the chip is computed as the closest value of the "true value".
83 * In this purpose, the symbol rate value is rounded (1 is added on the bit
84 * below the LSB )
85 *
86 * srate = (SFR * master_clk) >> 20
87 * <=>
88 * SFR = srate << 20 / master_clk
89 *
90 * rounded:
91 * SFR = (srate << 21 + master_clk) / (2 * master_clk)
92 *
93 * stored as 20 bit number with an offset of 4 bit:
94 * sfr = SFR << 4;
95 */
96
97 tmp = stb0899_do_div((((u64)srate) << 21) + master_clk, 2 * master_clk);
98 tmp <<= 4;
99
100 sfr[0] = tmp >> 16;
101 sfr[1] = tmp >> 8;
102 sfr[2] = tmp;
103
104 stb0899_write_regs(state, STB0899_SFRH, sfr, 3);
105
106 return srate;
107}
108
109/*
110 * stb0899_calc_derot_time
111 * Compute the amount of time needed by the derotator to lock
112 * SymbolRate: Symbol rate
113 * return: derotator time constant (ms)
114 */
115static long stb0899_calc_derot_time(long srate)
116{
117 if (srate > 0)
118 return (100000 / (srate / 1000));
119 else
120 return 0;
121}
122
123/*
124 * stb0899_carr_width
125 * Compute the width of the carrier
126 * return: width of carrier (kHz or Mhz)
127 */
128long stb0899_carr_width(struct stb0899_state *state)
129{
130 struct stb0899_internal *internal = &state->internal;
131
132 return (internal->srate + (internal->srate * internal->rolloff) / 100);
133}
134
135/*
136 * stb0899_first_subrange
137 * Compute the first subrange of the search
138 */
139static void stb0899_first_subrange(struct stb0899_state *state)
140{
141 struct stb0899_internal *internal = &state->internal;
142 struct stb0899_params *params = &state->params;
143 struct stb0899_config *config = state->config;
144
145 int range = 0;
146 u32 bandwidth = 0;
147
148 if (config->tuner_get_bandwidth) {
149 stb0899_i2c_gate_ctrl(&state->frontend, 1);
150 config->tuner_get_bandwidth(&state->frontend, &bandwidth);
151 stb0899_i2c_gate_ctrl(&state->frontend, 0);
152 range = bandwidth - stb0899_carr_width(state) / 2;
153 }
154
155 if (range > 0)
156 internal->sub_range = MIN(internal->srch_range, range);
157 else
158 internal->sub_range = 0;
159
160 internal->freq = params->freq;
161 internal->tuner_offst = 0L;
162 internal->sub_dir = 1;
163}
164
165/*
166 * stb0899_check_tmg
167 * check for timing lock
168 * internal.Ttiming: time to wait for loop lock
169 */
170static enum stb0899_status stb0899_check_tmg(struct stb0899_state *state)
171{
172 struct stb0899_internal *internal = &state->internal;
173 int lock;
174 u8 reg;
175 s8 timing;
176
177 msleep(internal->t_derot);
178
179 stb0899_write_reg(state, STB0899_RTF, 0xf2);
180 reg = stb0899_read_reg(state, STB0899_TLIR);
181 lock = STB0899_GETFIELD(TLIR_TMG_LOCK_IND, reg);
182 timing = stb0899_read_reg(state, STB0899_RTF);
183
184 if (lock >= 42) {
185 if ((lock > 48) && (ABS(timing) >= 110)) {
186 internal->status = ANALOGCARRIER;
187 dprintk(state->verbose, FE_DEBUG, 1, "-->ANALOG Carrier !");
188 } else {
189 internal->status = TIMINGOK;
190 dprintk(state->verbose, FE_DEBUG, 1, "------->TIMING OK !");
191 }
192 } else {
193 internal->status = NOTIMING;
194 dprintk(state->verbose, FE_DEBUG, 1, "-->NO TIMING !");
195 }
196 return internal->status;
197}
198
199/*
200 * stb0899_search_tmg
201 * perform a fs/2 zig-zag to find timing
202 */
203static enum stb0899_status stb0899_search_tmg(struct stb0899_state *state)
204{
205 struct stb0899_internal *internal = &state->internal;
206 struct stb0899_params *params = &state->params;
207
208 short int derot_step, derot_freq = 0, derot_limit, next_loop = 3;
209 int index = 0;
210 u8 cfr[2];
211
212 internal->status = NOTIMING;
213
214 /* timing loop computation & symbol rate optimisation */
215 derot_limit = (internal->sub_range / 2L) / internal->mclk;
216 derot_step = (params->srate / 2L) / internal->mclk;
217
218 while ((stb0899_check_tmg(state) != TIMINGOK) && next_loop) {
219 index++;
220 derot_freq += index * internal->direction * derot_step; /* next derot zig zag position */
221
222 if (ABS(derot_freq) > derot_limit)
223 next_loop--;
224
225 if (next_loop) {
226 STB0899_SETFIELD_VAL(CFRM, cfr[0], MSB(state->config->inversion * derot_freq));
227 STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(state->config->inversion * derot_freq));
228 stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
229 }
230 internal->direction = -internal->direction; /* Change zigzag direction */
231 }
232
233 if (internal->status == TIMINGOK) {
234 stb0899_read_regs(state, STB0899_CFRM, cfr, 2); /* get derotator frequency */
235 internal->derot_freq = state->config->inversion * MAKEWORD16(cfr[0], cfr[1]);
236 dprintk(state->verbose, FE_DEBUG, 1, "------->TIMING OK ! Derot Freq = %d", internal->derot_freq);
237 }
238
239 return internal->status;
240}
241
242/*
243 * stb0899_check_carrier
244 * Check for carrier found
245 */
246static enum stb0899_status stb0899_check_carrier(struct stb0899_state *state)
247{
248 struct stb0899_internal *internal = &state->internal;
249 u8 reg;
250
251 msleep(internal->t_derot); /* wait for derotator ok */
252
253 reg = stb0899_read_reg(state, STB0899_CFD);
254 STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
255 stb0899_write_reg(state, STB0899_CFD, reg);
256
257 reg = stb0899_read_reg(state, STB0899_DSTATUS);
258 dprintk(state->verbose, FE_DEBUG, 1, "--------------------> STB0899_DSTATUS=[0x%02x]", reg);
259 if (STB0899_GETFIELD(CARRIER_FOUND, reg)) {
260 internal->status = CARRIEROK;
261 dprintk(state->verbose, FE_DEBUG, 1, "-------------> CARRIEROK !");
262 } else {
263 internal->status = NOCARRIER;
264 dprintk(state->verbose, FE_DEBUG, 1, "-------------> NOCARRIER !");
265 }
266
267 return internal->status;
268}
269
270/*
271 * stb0899_search_carrier
272 * Search for a QPSK carrier with the derotator
273 */
274static enum stb0899_status stb0899_search_carrier(struct stb0899_state *state)
275{
276 struct stb0899_internal *internal = &state->internal;
277
278 short int derot_freq = 0, last_derot_freq = 0, derot_limit, next_loop = 3;
279 int index = 0;
280 u8 cfr[2];
281 u8 reg;
282
283 internal->status = NOCARRIER;
284 derot_limit = (internal->sub_range / 2L) / internal->mclk;
285 derot_freq = internal->derot_freq;
286
287 reg = stb0899_read_reg(state, STB0899_CFD);
288 STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
289 stb0899_write_reg(state, STB0899_CFD, reg);
290
291 do {
292 dprintk(state->verbose, FE_DEBUG, 1, "Derot Freq=%d, mclk=%d", derot_freq, internal->mclk);
293 if (stb0899_check_carrier(state) == NOCARRIER) {
294 index++;
295 last_derot_freq = derot_freq;
296 derot_freq += index * internal->direction * internal->derot_step; /* next zig zag derotator position */
297
298 if(ABS(derot_freq) > derot_limit)
299 next_loop--;
300
301 if (next_loop) {
302 reg = stb0899_read_reg(state, STB0899_CFD);
303 STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
304 stb0899_write_reg(state, STB0899_CFD, reg);
305
306 STB0899_SETFIELD_VAL(CFRM, cfr[0], MSB(state->config->inversion * derot_freq));
307 STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(state->config->inversion * derot_freq));
308 stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
309 }
310 }
311
312 internal->direction = -internal->direction; /* Change zigzag direction */
313 } while ((internal->status != CARRIEROK) && next_loop);
314
315 if (internal->status == CARRIEROK) {
316 stb0899_read_regs(state, STB0899_CFRM, cfr, 2); /* get derotator frequency */
317 internal->derot_freq = state->config->inversion * MAKEWORD16(cfr[0], cfr[1]);
318 dprintk(state->verbose, FE_DEBUG, 1, "----> CARRIER OK !, Derot Freq=%d", internal->derot_freq);
319 } else {
320 internal->derot_freq = last_derot_freq;
321 }
322
323 return internal->status;
324}
325
326/*
327 * stb0899_check_data
328 * Check for data found
329 */
330static enum stb0899_status stb0899_check_data(struct stb0899_state *state)
331{
332 struct stb0899_internal *internal = &state->internal;
333 struct stb0899_params *params = &state->params;
334
335 int lock = 0, index = 0, dataTime = 500, loop;
336 u8 reg;
337
338 internal->status = NODATA;
339
340 /* RESET FEC */
341 reg = stb0899_read_reg(state, STB0899_TSTRES);
342 STB0899_SETFIELD_VAL(FRESACS, reg, 1);
343 stb0899_write_reg(state, STB0899_TSTRES, reg);
344 msleep(1);
345 reg = stb0899_read_reg(state, STB0899_TSTRES);
346 STB0899_SETFIELD_VAL(FRESACS, reg, 0);
347 stb0899_write_reg(state, STB0899_TSTRES, reg);
348
349 if (params->srate <= 2000000)
350 dataTime = 2000;
351 else if (params->srate <= 5000000)
352 dataTime = 1500;
353 else if (params->srate <= 15000000)
354 dataTime = 1000;
355 else
356 dataTime = 500;
357
358 stb0899_write_reg(state, STB0899_DSTATUS2, 0x00); /* force search loop */
359 while (1) {
360 /* WARNING! VIT LOCKED has to be tested before VIT_END_LOOOP */
361 reg = stb0899_read_reg(state, STB0899_VSTATUS);
362 lock = STB0899_GETFIELD(VSTATUS_LOCKEDVIT, reg);
363 loop = STB0899_GETFIELD(VSTATUS_END_LOOPVIT, reg);
364
365 if (lock || loop || (index > dataTime))
366 break;
367 index++;
368 }
369
370 if (lock) { /* DATA LOCK indicator */
371 internal->status = DATAOK;
372 dprintk(state->verbose, FE_DEBUG, 1, "-----------------> DATA OK !");
373 }
374
375 return internal->status;
376}
377
378/*
379 * stb0899_search_data
380 * Search for a QPSK carrier with the derotator
381 */
382static enum stb0899_status stb0899_search_data(struct stb0899_state *state)
383{
384 short int derot_freq, derot_step, derot_limit, next_loop = 3;
385 u8 cfr[2];
386 u8 reg;
387 int index = 1;
388
389 struct stb0899_internal *internal = &state->internal;
390 struct stb0899_params *params = &state->params;
391
392 derot_step = (params->srate / 4L) / internal->mclk;
393 derot_limit = (internal->sub_range / 2L) / internal->mclk;
394 derot_freq = internal->derot_freq;
395
396 do {
397 if ((internal->status != CARRIEROK) || (stb0899_check_data(state) != DATAOK)) {
398
399 derot_freq += index * internal->direction * derot_step; /* next zig zag derotator position */
400 if (ABS(derot_freq) > derot_limit)
401 next_loop--;
402
403 if (next_loop) {
404 dprintk(state->verbose, FE_DEBUG, 1, "Derot freq=%d, mclk=%d", derot_freq, internal->mclk);
405 reg = stb0899_read_reg(state, STB0899_CFD);
406 STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
407 stb0899_write_reg(state, STB0899_CFD, reg);
408
409 STB0899_SETFIELD_VAL(CFRM, cfr[0], MSB(state->config->inversion * derot_freq));
410 STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(state->config->inversion * derot_freq));
411 stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
412
413 stb0899_check_carrier(state);
414 index++;
415 }
416 }
417 internal->direction = -internal->direction; /* change zig zag direction */
418 } while ((internal->status != DATAOK) && next_loop);
419
420 if (internal->status == DATAOK) {
421 stb0899_read_regs(state, STB0899_CFRM, cfr, 2); /* get derotator frequency */
422 internal->derot_freq = state->config->inversion * MAKEWORD16(cfr[0], cfr[1]);
423 dprintk(state->verbose, FE_DEBUG, 1, "------> DATAOK ! Derot Freq=%d", internal->derot_freq);
424 }
425
426 return internal->status;
427}
428
429/*
430 * stb0899_check_range
431 * check if the found frequency is in the correct range
432 */
433static enum stb0899_status stb0899_check_range(struct stb0899_state *state)
434{
435 struct stb0899_internal *internal = &state->internal;
436 struct stb0899_params *params = &state->params;
437
438 int range_offst, tp_freq;
439
440 range_offst = internal->srch_range / 2000;
441 tp_freq = internal->freq + (internal->derot_freq * internal->mclk) / 1000;
442
443 if ((tp_freq >= params->freq - range_offst) && (tp_freq <= params->freq + range_offst)) {
444 internal->status = RANGEOK;
445 dprintk(state->verbose, FE_DEBUG, 1, "----> RANGEOK !");
446 } else {
447 internal->status = OUTOFRANGE;
448 dprintk(state->verbose, FE_DEBUG, 1, "----> OUT OF RANGE !");
449 }
450
451 return internal->status;
452}
453
454/*
455 * NextSubRange
456 * Compute the next subrange of the search
457 */
458static void next_sub_range(struct stb0899_state *state)
459{
460 struct stb0899_internal *internal = &state->internal;
461 struct stb0899_params *params = &state->params;
462
463 long old_sub_range;
464
465 if (internal->sub_dir > 0) {
466 old_sub_range = internal->sub_range;
467 internal->sub_range = MIN((internal->srch_range / 2) -
468 (internal->tuner_offst + internal->sub_range / 2),
469 internal->sub_range);
470
471 if (internal->sub_range < 0)
472 internal->sub_range = 0;
473
474 internal->tuner_offst += (old_sub_range + internal->sub_range) / 2;
475 }
476
477 internal->freq = params->freq + (internal->sub_dir * internal->tuner_offst) / 1000;
478 internal->sub_dir = -internal->sub_dir;
479}
480
481/*
482 * stb0899_dvbs_algo
483 * Search for a signal, timing, carrier and data for a
484 * given frequency in a given range
485 */
486enum stb0899_status stb0899_dvbs_algo(struct stb0899_state *state)
487{
488 struct stb0899_params *params = &state->params;
489 struct stb0899_internal *internal = &state->internal;
490 struct stb0899_config *config = state->config;
491
492 u8 bclc, reg;
493 u8 cfr[2];
494 u8 eq_const[10];
495 s32 clnI = 3;
496 u32 bandwidth = 0;
497
498 /* BETA values rated @ 99MHz */
499 s32 betaTab[5][4] = {
500 /* 5 10 20 30MBps */
501 { 37, 34, 32, 31 }, /* QPSK 1/2 */
502 { 37, 35, 33, 31 }, /* QPSK 2/3 */
503 { 37, 35, 33, 31 }, /* QPSK 3/4 */
504 { 37, 36, 33, 32 }, /* QPSK 5/6 */
505 { 37, 36, 33, 32 } /* QPSK 7/8 */
506 };
507
508 internal->direction = 1;
509
510 stb0899_set_srate(state, internal->master_clk, params->srate);
511 /* Carrier loop optimization versus symbol rate for acquisition*/
512 if (params->srate <= 5000000) {
513 stb0899_write_reg(state, STB0899_ACLC, 0x89);
514 bclc = stb0899_read_reg(state, STB0899_BCLC);
515 STB0899_SETFIELD_VAL(BETA, bclc, 0x1c);
516 stb0899_write_reg(state, STB0899_BCLC, bclc);
517 clnI = 0;
518 } else if (params->srate <= 15000000) {
519 stb0899_write_reg(state, STB0899_ACLC, 0xc9);
520 bclc = stb0899_read_reg(state, STB0899_BCLC);
521 STB0899_SETFIELD_VAL(BETA, bclc, 0x22);
522 stb0899_write_reg(state, STB0899_BCLC, bclc);
523 clnI = 1;
524 } else if(params->srate <= 25000000) {
525 stb0899_write_reg(state, STB0899_ACLC, 0x89);
526 bclc = stb0899_read_reg(state, STB0899_BCLC);
527 STB0899_SETFIELD_VAL(BETA, bclc, 0x27);
528 stb0899_write_reg(state, STB0899_BCLC, bclc);
529 clnI = 2;
530 } else {
531 stb0899_write_reg(state, STB0899_ACLC, 0xc8);
532 bclc = stb0899_read_reg(state, STB0899_BCLC);
533 STB0899_SETFIELD_VAL(BETA, bclc, 0x29);
534 stb0899_write_reg(state, STB0899_BCLC, bclc);
535 clnI = 3;
536 }
537
538 dprintk(state->verbose, FE_DEBUG, 1, "Set the timing loop to acquisition");
539 /* Set the timing loop to acquisition */
540 stb0899_write_reg(state, STB0899_RTC, 0x46);
541 stb0899_write_reg(state, STB0899_CFD, 0xee);
542
543 /* !! WARNING !!
544 * Do not read any status variables while acquisition,
545 * If any needed, read before the acquisition starts
546 * querying status while acquiring causes the
547 * acquisition to go bad and hence no locks.
548 */
549 dprintk(state->verbose, FE_DEBUG, 1, "Derot Percent=%d Srate=%d mclk=%d",
550 internal->derot_percent, params->srate, internal->mclk);
551
552 /* Initial calculations */
553 internal->derot_step = internal->derot_percent * (params->srate / 1000L) / internal->mclk; /* DerotStep/1000 * Fsymbol */
554 internal->t_derot = stb0899_calc_derot_time(params->srate);
555 internal->t_data = 500;
556
557 dprintk(state->verbose, FE_DEBUG, 1, "RESET stream merger");
558 /* RESET Stream merger */
559 reg = stb0899_read_reg(state, STB0899_TSTRES);
560 STB0899_SETFIELD_VAL(FRESRS, reg, 1);
561 stb0899_write_reg(state, STB0899_TSTRES, reg);
562
563 /*
564 * Set KDIVIDER to an intermediate value between
565 * 1/2 and 7/8 for acquisition
566 */
567 reg = stb0899_read_reg(state, STB0899_DEMAPVIT);
568 STB0899_SETFIELD_VAL(DEMAPVIT_KDIVIDER, reg, 60);
569 stb0899_write_reg(state, STB0899_DEMAPVIT, reg);
570
571 stb0899_write_reg(state, STB0899_EQON, 0x01); /* Equalizer OFF while acquiring */
572 stb0899_write_reg(state, STB0899_VITSYNC, 0x19);
573
574 stb0899_first_subrange(state);
575 do {
576 /* Initialisations */
577 cfr[0] = cfr[1] = 0;
578 stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* RESET derotator frequency */
579
580 stb0899_write_reg(state, STB0899_RTF, 0);
581 reg = stb0899_read_reg(state, STB0899_CFD);
582 STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
583 stb0899_write_reg(state, STB0899_CFD, reg);
584
585 internal->derot_freq = 0;
586 internal->status = NOAGC1;
587
588 /* enable tuner I/O */
589 stb0899_i2c_gate_ctrl(&state->frontend, 1);
590
591 /* Move tuner to frequency */
592 dprintk(state->verbose, FE_DEBUG, 1, "Tuner set frequency");
593 if (state->config->tuner_set_frequency)
594 state->config->tuner_set_frequency(&state->frontend, internal->freq);
595
596 if (state->config->tuner_get_frequency)
597 state->config->tuner_get_frequency(&state->frontend, &internal->freq);
598
599 msleep(internal->t_agc1 + internal->t_agc2 + internal->t_derot); /* AGC1, AGC2 and timing loop */
600 dprintk(state->verbose, FE_DEBUG, 1, "current derot freq=%d", internal->derot_freq);
601 internal->status = AGC1OK;
602
603 /* There is signal in the band */
604 if (config->tuner_get_bandwidth)
605 config->tuner_get_bandwidth(&state->frontend, &bandwidth);
606
607 /* disable tuner I/O */
608 stb0899_i2c_gate_ctrl(&state->frontend, 0);
609
610 if (params->srate <= bandwidth / 2)
611 stb0899_search_tmg(state); /* For low rates (SCPC) */
612 else
613 stb0899_check_tmg(state); /* For high rates (MCPC) */
614
615 if (internal->status == TIMINGOK) {
616 dprintk(state->verbose, FE_DEBUG, 1,
617 "TIMING OK ! Derot freq=%d, mclk=%d",
618 internal->derot_freq, internal->mclk);
619
620 if (stb0899_search_carrier(state) == CARRIEROK) { /* Search for carrier */
621 dprintk(state->verbose, FE_DEBUG, 1,
622 "CARRIER OK ! Derot freq=%d, mclk=%d",
623 internal->derot_freq, internal->mclk);
624
625 if (stb0899_search_data(state) == DATAOK) { /* Check for data */
626 dprintk(state->verbose, FE_DEBUG, 1,
627 "DATA OK ! Derot freq=%d, mclk=%d",
628 internal->derot_freq, internal->mclk);
629
630 if (stb0899_check_range(state) == RANGEOK) {
631 dprintk(state->verbose, FE_DEBUG, 1,
632 "RANGE OK ! derot freq=%d, mclk=%d",
633 internal->derot_freq, internal->mclk);
634
635 internal->freq = params->freq + ((internal->derot_freq * internal->mclk) / 1000);
636 reg = stb0899_read_reg(state, STB0899_PLPARM);
637 internal->fecrate = STB0899_GETFIELD(VITCURPUN, reg);
638 dprintk(state->verbose, FE_DEBUG, 1,
639 "freq=%d, internal resultant freq=%d",
640 params->freq, internal->freq);
641
642 dprintk(state->verbose, FE_DEBUG, 1,
643 "internal puncture rate=%d",
644 internal->fecrate);
645 }
646 }
647 }
648 }
649 if (internal->status != RANGEOK)
650 next_sub_range(state);
651
652 } while (internal->sub_range && internal->status != RANGEOK);
653
654 /* Set the timing loop to tracking */
655 stb0899_write_reg(state, STB0899_RTC, 0x33);
656 stb0899_write_reg(state, STB0899_CFD, 0xf7);
657 /* if locked and range ok, set Kdiv */
658 if (internal->status == RANGEOK) {
659 dprintk(state->verbose, FE_DEBUG, 1, "Locked & Range OK !");
660 stb0899_write_reg(state, STB0899_EQON, 0x41); /* Equalizer OFF while acquiring */
661 stb0899_write_reg(state, STB0899_VITSYNC, 0x39); /* SN to b'11 for acquisition */
662
663 /*
664 * Carrier loop optimization versus
665 * symbol Rate/Puncture Rate for Tracking
666 */
667 reg = stb0899_read_reg(state, STB0899_BCLC);
668 switch (internal->fecrate) {
669 case STB0899_FEC_1_2: /* 13 */
670 stb0899_write_reg(state, STB0899_DEMAPVIT, 0x1a);
671 STB0899_SETFIELD_VAL(BETA, reg, betaTab[0][clnI]);
672 stb0899_write_reg(state, STB0899_BCLC, reg);
673 break;
674 case STB0899_FEC_2_3: /* 18 */
675 stb0899_write_reg(state, STB0899_DEMAPVIT, 44);
676 STB0899_SETFIELD_VAL(BETA, reg, betaTab[1][clnI]);
677 stb0899_write_reg(state, STB0899_BCLC, reg);
678 break;
679 case STB0899_FEC_3_4: /* 21 */
680 stb0899_write_reg(state, STB0899_DEMAPVIT, 60);
681 STB0899_SETFIELD_VAL(BETA, reg, betaTab[2][clnI]);
682 stb0899_write_reg(state, STB0899_BCLC, reg);
683 break;
684 case STB0899_FEC_5_6: /* 24 */
685 stb0899_write_reg(state, STB0899_DEMAPVIT, 75);
686 STB0899_SETFIELD_VAL(BETA, reg, betaTab[3][clnI]);
687 stb0899_write_reg(state, STB0899_BCLC, reg);
688 break;
689 case STB0899_FEC_6_7: /* 25 */
690 stb0899_write_reg(state, STB0899_DEMAPVIT, 88);
691 stb0899_write_reg(state, STB0899_ACLC, 0x88);
692 stb0899_write_reg(state, STB0899_BCLC, 0x9a);
693 break;
694 case STB0899_FEC_7_8: /* 26 */
695 stb0899_write_reg(state, STB0899_DEMAPVIT, 94);
696 STB0899_SETFIELD_VAL(BETA, reg, betaTab[4][clnI]);
697 stb0899_write_reg(state, STB0899_BCLC, reg);
698 break;
699 default:
700 dprintk(state->verbose, FE_DEBUG, 1, "Unsupported Puncture Rate");
701 break;
702 }
703 /* release stream merger RESET */
704 reg = stb0899_read_reg(state, STB0899_TSTRES);
705 STB0899_SETFIELD_VAL(FRESRS, reg, 0);
706 stb0899_write_reg(state, STB0899_TSTRES, reg);
707
708 /* disable carrier detector */
709 reg = stb0899_read_reg(state, STB0899_CFD);
710 STB0899_SETFIELD_VAL(CFD_ON, reg, 0);
711 stb0899_write_reg(state, STB0899_CFD, reg);
712
713 stb0899_read_regs(state, STB0899_EQUAI1, eq_const, 10);
714 }
715
716 return internal->status;
717}
718
719/*
720 * stb0899_dvbs2_config_uwp
721 * Configure UWP state machine
722 */
723static void stb0899_dvbs2_config_uwp(struct stb0899_state *state)
724{
725 struct stb0899_internal *internal = &state->internal;
726 struct stb0899_config *config = state->config;
727 u32 uwp1, uwp2, uwp3, reg;
728
729 uwp1 = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_CNTRL1);
730 uwp2 = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_CNTRL2);
731 uwp3 = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_CNTRL3);
732
733 STB0899_SETFIELD_VAL(UWP_ESN0_AVE, uwp1, config->esno_ave);
734 STB0899_SETFIELD_VAL(UWP_ESN0_QUANT, uwp1, config->esno_quant);
735 STB0899_SETFIELD_VAL(UWP_TH_SOF, uwp1, config->uwp_threshold_sof);
736
737 STB0899_SETFIELD_VAL(FE_COARSE_TRK, uwp2, internal->av_frame_coarse);
738 STB0899_SETFIELD_VAL(FE_FINE_TRK, uwp2, internal->av_frame_fine);
739 STB0899_SETFIELD_VAL(UWP_MISS_TH, uwp2, config->miss_threshold);
740
741 STB0899_SETFIELD_VAL(UWP_TH_ACQ, uwp3, config->uwp_threshold_acq);
742 STB0899_SETFIELD_VAL(UWP_TH_TRACK, uwp3, config->uwp_threshold_track);
743
744 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_UWP_CNTRL1, STB0899_OFF0_UWP_CNTRL1, uwp1);
745 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_UWP_CNTRL2, STB0899_OFF0_UWP_CNTRL2, uwp2);
746 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_UWP_CNTRL3, STB0899_OFF0_UWP_CNTRL3, uwp3);
747
748 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, SOF_SRCH_TO);
749 STB0899_SETFIELD_VAL(SOF_SEARCH_TIMEOUT, reg, config->sof_search_timeout);
750 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_SOF_SRCH_TO, STB0899_OFF0_SOF_SRCH_TO, reg);
751}
752
753/*
754 * stb0899_dvbs2_config_csm_auto
755 * Set CSM to AUTO mode
756 */
757static void stb0899_dvbs2_config_csm_auto(struct stb0899_state *state)
758{
759 u32 reg;
760
761 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
762 STB0899_SETFIELD_VAL(CSM_AUTO_PARAM, reg, 1);
763 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, reg);
764}
765
766long Log2Int(int number)
767{
768 int i;
769
770 i = 0;
771 while ((1 << i) <= ABS(number))
772 i++;
773
774 if (number == 0)
775 i = 1;
776
777 return i - 1;
778}
779
780/*
781 * stb0899_dvbs2_calc_srate
782 * compute BTR_NOM_FREQ for the symbol rate
783 */
784static u32 stb0899_dvbs2_calc_srate(struct stb0899_state *state)
785{
786 struct stb0899_internal *internal = &state->internal;
787 struct stb0899_config *config = state->config;
788
789 u32 dec_ratio, dec_rate, decim, remain, intval, btr_nom_freq;
790 u32 master_clk, srate;
791
792 dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
793 dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
794 dec_rate = Log2Int(dec_ratio);
795 decim = 1 << dec_rate;
796 master_clk = internal->master_clk / 1000;
797 srate = internal->srate / 1000;
798
799 if (decim <= 4) {
800 intval = (decim * (1 << (config->btr_nco_bits - 1))) / master_clk;
801 remain = (decim * (1 << (config->btr_nco_bits - 1))) % master_clk;
802 } else {
803 intval = (1 << (config->btr_nco_bits - 1)) / (master_clk / 100) * decim / 100;
804 remain = (decim * (1 << (config->btr_nco_bits - 1))) % master_clk;
805 }
806 btr_nom_freq = (intval * srate) + ((remain * srate) / master_clk);
807
808 return btr_nom_freq;
809}
810
811/*
812 * stb0899_dvbs2_calc_dev
813 * compute the correction to be applied to symbol rate
814 */
815static u32 stb0899_dvbs2_calc_dev(struct stb0899_state *state)
816{
817 struct stb0899_internal *internal = &state->internal;
818 u32 dec_ratio, correction, master_clk, srate;
819
820 dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
821 dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
822
823 master_clk = internal->master_clk / 1000; /* for integer Caculation*/
824 srate = internal->srate / 1000; /* for integer Caculation*/
825 correction = (512 * master_clk) / (2 * dec_ratio * srate);
826
827 return correction;
828}
829
830/*
831 * stb0899_dvbs2_set_srate
832 * Set DVBS2 symbol rate
833 */
834static void stb0899_dvbs2_set_srate(struct stb0899_state *state)
835{
836 struct stb0899_internal *internal = &state->internal;
837
838 u32 dec_ratio, dec_rate, win_sel, decim, f_sym, btr_nom_freq;
839 u32 correction, freq_adj, band_lim, decim_cntrl, reg;
840 u8 anti_alias;
841
842 /*set decimation to 1*/
843 dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
844 dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
845 dec_rate = Log2Int(dec_ratio);
846
847 win_sel = 0;
848 if (dec_rate >= 5)
849 win_sel = dec_rate - 4;
850
851 decim = (1 << dec_rate);
852 /* (FSamp/Fsymbol *100) for integer Caculation */
853 f_sym = internal->master_clk / ((decim * internal->srate) / 1000);
854
855 if (f_sym <= 2250) /* don't band limit signal going into btr block*/
856 band_lim = 1;
857 else
858 band_lim = 0; /* band limit signal going into btr block*/
859
860 decim_cntrl = ((win_sel << 3) & 0x18) + ((band_lim << 5) & 0x20) + (dec_rate & 0x7);
861 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_DECIM_CNTRL, STB0899_OFF0_DECIM_CNTRL, decim_cntrl);
862
863 if (f_sym <= 3450)
864 anti_alias = 0;
865 else if (f_sym <= 4250)
866 anti_alias = 1;
867 else
868 anti_alias = 2;
869
870 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_ANTI_ALIAS_SEL, STB0899_OFF0_ANTI_ALIAS_SEL, anti_alias);
871 btr_nom_freq = stb0899_dvbs2_calc_srate(state);
872 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_NOM_FREQ, STB0899_OFF0_BTR_NOM_FREQ, btr_nom_freq);
873
874 correction = stb0899_dvbs2_calc_dev(state);
875 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_CNTRL);
876 STB0899_SETFIELD_VAL(BTR_FREQ_CORR, reg, correction);
877 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_CNTRL, STB0899_OFF0_BTR_CNTRL, reg);
878
879 /* scale UWP+CSM frequency to sample rate*/
880 freq_adj = internal->srate / (internal->master_clk / 4096);
881 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_FREQ_ADJ_SCALE, STB0899_OFF0_FREQ_ADJ_SCALE, freq_adj);
882}
883
884/*
885 * stb0899_dvbs2_set_btr_loopbw
886 * set bit timing loop bandwidth as a percentage of the symbol rate
887 */
888static void stb0899_dvbs2_set_btr_loopbw(struct stb0899_state *state)
889{
890 struct stb0899_internal *internal = &state->internal;
891 struct stb0899_config *config = state->config;
892
893 u32 sym_peak = 23, zeta = 707, loopbw_percent = 60;
894 s32 dec_ratio, dec_rate, k_btr1_rshft, k_btr1, k_btr0_rshft;
895 s32 k_btr0, k_btr2_rshft, k_direct_shift, k_indirect_shift;
896 u32 decim, K, wn, k_direct, k_indirect;
897 u32 reg;
898
899 dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
900 dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
901 dec_rate = Log2Int(dec_ratio);
902 decim = (1 << dec_rate);
903
904 sym_peak *= 576000;
905 K = (1 << config->btr_nco_bits) / (internal->master_clk / 1000);
906 K *= (internal->srate / 1000000) * decim; /*k=k 10^-8*/
907
908 if (K != 0) {
909 K = sym_peak / K;
910 wn = (4 * zeta * zeta) + 1000000;
911 wn = (2 * (loopbw_percent * 1000) * 40 * zeta) /wn; /*wn =wn 10^-8*/
912
913 k_indirect = (wn * wn) / K;
914 k_indirect = k_indirect; /*kindirect = kindirect 10^-6*/
915 k_direct = (2 * wn * zeta) / K; /*kDirect = kDirect 10^-2*/
916 k_direct *= 100;
917
918 k_direct_shift = Log2Int(k_direct) - Log2Int(10000) - 2;
919 k_btr1_rshft = (-1 * k_direct_shift) + config->btr_gain_shift_offset;
920 k_btr1 = k_direct / (1 << k_direct_shift);
921 k_btr1 /= 10000;
922
923 k_indirect_shift = Log2Int(k_indirect + 15) - 20 /*- 2*/;
924 k_btr0_rshft = (-1 * k_indirect_shift) + config->btr_gain_shift_offset;
925 k_btr0 = k_indirect * (1 << (-k_indirect_shift));
926 k_btr0 /= 1000000;
927
928 k_btr2_rshft = 0;
929 if (k_btr0_rshft > 15) {
930 k_btr2_rshft = k_btr0_rshft - 15;
931 k_btr0_rshft = 15;
932 }
933 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_LOOP_GAIN);
934 STB0899_SETFIELD_VAL(KBTR0_RSHFT, reg, k_btr0_rshft);
935 STB0899_SETFIELD_VAL(KBTR0, reg, k_btr0);
936 STB0899_SETFIELD_VAL(KBTR1_RSHFT, reg, k_btr1_rshft);
937 STB0899_SETFIELD_VAL(KBTR1, reg, k_btr1);
938 STB0899_SETFIELD_VAL(KBTR2_RSHFT, reg, k_btr2_rshft);
939 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_LOOP_GAIN, STB0899_OFF0_BTR_LOOP_GAIN, reg);
940 } else
941 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_LOOP_GAIN, STB0899_OFF0_BTR_LOOP_GAIN, 0xc4c4f);
942}
943
944/*
945 * stb0899_dvbs2_set_carr_freq
946 * set nominal frequency for carrier search
947 */
948static void stb0899_dvbs2_set_carr_freq(struct stb0899_state *state, s32 carr_freq, u32 master_clk)
949{
950 struct stb0899_config *config = state->config;
951 s32 crl_nom_freq;
952 u32 reg;
953
954 crl_nom_freq = (1 << config->crl_nco_bits) / master_clk;
955 crl_nom_freq *= carr_freq;
956 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_NOM_FREQ);
957 STB0899_SETFIELD_VAL(CRL_NOM_FREQ, reg, crl_nom_freq);
958 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_NOM_FREQ, STB0899_OFF0_CRL_NOM_FREQ, reg);
959}
960
961/*
962 * stb0899_dvbs2_init_calc
963 * Initialize DVBS2 UWP, CSM, carrier and timing loops
964 */
965static void stb0899_dvbs2_init_calc(struct stb0899_state *state)
966{
967 struct stb0899_internal *internal = &state->internal;
968 s32 steps, step_size;
969 u32 range, reg;
970
971 /* config uwp and csm */
972 stb0899_dvbs2_config_uwp(state);
973 stb0899_dvbs2_config_csm_auto(state);
974
975 /* initialize BTR */
976 stb0899_dvbs2_set_srate(state);
977 stb0899_dvbs2_set_btr_loopbw(state);
978
979 if (internal->srate / 1000000 >= 15)
980 step_size = (1 << 17) / 5;
981 else if (internal->srate / 1000000 >= 10)
982 step_size = (1 << 17) / 7;
983 else if (internal->srate / 1000000 >= 5)
984 step_size = (1 << 17) / 10;
985 else
986 step_size = (1 << 17) / 4;
987
988 range = internal->srch_range / 1000000;
989 steps = (10 * range * (1 << 17)) / (step_size * (internal->srate / 1000000));
990 steps = (steps + 6) / 10;
991 steps = (steps == 0) ? 1 : steps;
992 if (steps % 2 == 0)
993 stb0899_dvbs2_set_carr_freq(state, internal->center_freq -
994 (internal->step_size * (internal->srate / 20000000)),
995 (internal->master_clk) / 1000000);
996 else
997 stb0899_dvbs2_set_carr_freq(state, internal->center_freq, (internal->master_clk) / 1000000);
998
999 /*Set Carrier Search params (zigzag, num steps and freq step size*/
1000 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, ACQ_CNTRL2);
1001 STB0899_SETFIELD_VAL(ZIGZAG, reg, 1);
1002 STB0899_SETFIELD_VAL(NUM_STEPS, reg, steps);
1003 STB0899_SETFIELD_VAL(FREQ_STEPSIZE, reg, step_size);
1004 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_ACQ_CNTRL2, STB0899_OFF0_ACQ_CNTRL2, reg);
1005}
1006
1007/*
1008 * stb0899_dvbs2_btr_init
1009 * initialize the timing loop
1010 */
1011static void stb0899_dvbs2_btr_init(struct stb0899_state *state)
1012{
1013 u32 reg;
1014
1015 /* set enable BTR loopback */
1016 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_CNTRL);
1017 STB0899_SETFIELD_VAL(INTRP_PHS_SENSE, reg, 1);
1018 STB0899_SETFIELD_VAL(BTR_ERR_ENA, reg, 1);
1019 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_CNTRL, STB0899_OFF0_BTR_CNTRL, reg);
1020
1021 /* fix btr freq accum at 0 */
1022 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_FREQ_INIT, STB0899_OFF0_BTR_FREQ_INIT, 0x10000000);
1023 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_FREQ_INIT, STB0899_OFF0_BTR_FREQ_INIT, 0x00000000);
1024
1025 /* fix btr freq accum at 0 */
1026 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_PHS_INIT, STB0899_OFF0_BTR_PHS_INIT, 0x10000000);
1027 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_PHS_INIT, STB0899_OFF0_BTR_PHS_INIT, 0x00000000);
1028}
1029
1030/*
1031 * stb0899_dvbs2_reacquire
1032 * trigger a DVB-S2 acquisition
1033 */
1034static void stb0899_dvbs2_reacquire(struct stb0899_state *state)
1035{
1036 u32 reg = 0;
1037
1038 /* demod soft reset */
1039 STB0899_SETFIELD_VAL(DVBS2_RESET, reg, 1);
1040 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_RESET_CNTRL, STB0899_OFF0_RESET_CNTRL, reg);
1041
1042 /*Reset Timing Loop */
1043 stb0899_dvbs2_btr_init(state);
1044
1045 /* reset Carrier loop */
1046 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_FREQ_INIT, STB0899_OFF0_CRL_FREQ_INIT, (1 << 30));
1047 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_FREQ_INIT, STB0899_OFF0_CRL_FREQ_INIT, 0);
1048 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_LOOP_GAIN, STB0899_OFF0_CRL_LOOP_GAIN, 0);
1049 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_PHS_INIT, STB0899_OFF0_CRL_PHS_INIT, (1 << 30));
1050 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_PHS_INIT, STB0899_OFF0_CRL_PHS_INIT, 0);
1051
1052 /*release demod soft reset */
1053 reg = 0;
1054 STB0899_SETFIELD_VAL(DVBS2_RESET, reg, 0);
1055 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_RESET_CNTRL, STB0899_OFF0_RESET_CNTRL, reg);
1056
1057 /* start acquisition process */
1058 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_ACQUIRE_TRIG, STB0899_OFF0_ACQUIRE_TRIG, 1);
1059 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_LOCK_LOST, STB0899_OFF0_LOCK_LOST, 0);
1060
1061 /* equalizer Init */
1062 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQUALIZER_INIT, STB0899_OFF0_EQUALIZER_INIT, 1);
1063
1064 /*Start equilizer */
1065 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQUALIZER_INIT, STB0899_OFF0_EQUALIZER_INIT, 0);
1066
1067 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, EQ_CNTRL);
1068 STB0899_SETFIELD_VAL(EQ_SHIFT, reg, 0);
1069 STB0899_SETFIELD_VAL(EQ_DISABLE_UPDATE, reg, 0);
1070 STB0899_SETFIELD_VAL(EQ_DELAY, reg, 0x05);
1071 STB0899_SETFIELD_VAL(EQ_ADAPT_MODE, reg, 0x01);
1072 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQ_CNTRL, STB0899_OFF0_EQ_CNTRL, reg);
1073
1074 /* RESET Packet delineator */
1075 stb0899_write_reg(state, STB0899_PDELCTRL, 0x4a);
1076}
1077
1078/*
1079 * stb0899_dvbs2_get_dmd_status
1080 * get DVB-S2 Demod LOCK status
1081 */
1082static enum stb0899_status stb0899_dvbs2_get_dmd_status(struct stb0899_state *state, int timeout)
1083{
1084 int time = -10, lock = 0, uwp, csm;
1085 u32 reg;
1086
1087 do {
1088 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_STATUS);
1089 dprintk(state->verbose, FE_DEBUG, 1, "DMD_STATUS=[0x%02x]", reg);
1090 if (STB0899_GETFIELD(IF_AGC_LOCK, reg))
1091 dprintk(state->verbose, FE_DEBUG, 1, "------------->IF AGC LOCKED !");
1092 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_STAT2);
1093 dprintk(state->verbose, FE_DEBUG, 1, "----------->DMD STAT2=[0x%02x]", reg);
1094 uwp = STB0899_GETFIELD(UWP_LOCK, reg);
1095 csm = STB0899_GETFIELD(CSM_LOCK, reg);
1096 if (uwp && csm)
1097 lock = 1;
1098
1099 time += 10;
1100 msleep(10);
1101
1102 } while ((!lock) && (time <= timeout));
1103
1104 if (lock) {
1105 dprintk(state->verbose, FE_DEBUG, 1, "----------------> DVB-S2 LOCK !");
1106 return DVBS2_DEMOD_LOCK;
1107 } else {
1108 return DVBS2_DEMOD_NOLOCK;
1109 }
1110}
1111
1112/*
1113 * stb0899_dvbs2_get_data_lock
1114 * get FEC status
1115 */
1116static int stb0899_dvbs2_get_data_lock(struct stb0899_state *state, int timeout)
1117{
1118 int time = 0, lock = 0;
1119 u8 reg;
1120
1121 while ((!lock) && (time < timeout)) {
1122 reg = stb0899_read_reg(state, STB0899_CFGPDELSTATUS1);
1123 dprintk(state->verbose, FE_DEBUG, 1, "---------> CFGPDELSTATUS=[0x%02x]", reg);
1124 lock = STB0899_GETFIELD(CFGPDELSTATUS_LOCK, reg);
1125 time++;
1126 }
1127
1128 return lock;
1129}
1130
1131/*
1132 * stb0899_dvbs2_get_fec_status
1133 * get DVB-S2 FEC LOCK status
1134 */
1135static enum stb0899_status stb0899_dvbs2_get_fec_status(struct stb0899_state *state, int timeout)
1136{
1137 int time = 0, Locked;
1138
1139 do {
1140 Locked = stb0899_dvbs2_get_data_lock(state, 1);
1141 time++;
1142 msleep(1);
1143
1144 } while ((!Locked) && (time < timeout));
1145
1146 if (Locked) {
1147 dprintk(state->verbose, FE_DEBUG, 1, "---------->DVB-S2 FEC LOCK !");
1148 return DVBS2_FEC_LOCK;
1149 } else {
1150 return DVBS2_FEC_NOLOCK;
1151 }
1152}
1153
1154
1155/*
1156 * stb0899_dvbs2_init_csm
1157 * set parameters for manual mode
1158 */
1159static void stb0899_dvbs2_init_csm(struct stb0899_state *state, int pilots, enum stb0899_modcod modcod)
1160{
1161 struct stb0899_internal *internal = &state->internal;
1162
1163 s32 dvt_tbl = 1, two_pass = 0, agc_gain = 6, agc_shift = 0, loop_shift = 0, phs_diff_thr = 0x80;
1164 s32 gamma_acq, gamma_rho_acq, gamma_trk, gamma_rho_trk, lock_count_thr;
1165 u32 csm1, csm2, csm3, csm4;
1166
1167 if (((internal->master_clk / internal->srate) <= 4) && (modcod <= 11) && (pilots == 1)) {
1168 switch (modcod) {
1169 case STB0899_QPSK_12:
1170 gamma_acq = 25;
1171 gamma_rho_acq = 2700;
1172 gamma_trk = 12;
1173 gamma_rho_trk = 180;
1174 lock_count_thr = 8;
1175 break;
1176 case STB0899_QPSK_35:
1177 gamma_acq = 38;
1178 gamma_rho_acq = 7182;
1179 gamma_trk = 14;
1180 gamma_rho_trk = 308;
1181 lock_count_thr = 8;
1182 break;
1183 case STB0899_QPSK_23:
1184 gamma_acq = 42;
1185 gamma_rho_acq = 9408;
1186 gamma_trk = 17;
1187 gamma_rho_trk = 476;
1188 lock_count_thr = 8;
1189 break;
1190 case STB0899_QPSK_34:
1191 gamma_acq = 53;
1192 gamma_rho_acq = 16642;
1193 gamma_trk = 19;
1194 gamma_rho_trk = 646;
1195 lock_count_thr = 8;
1196 break;
1197 case STB0899_QPSK_45:
1198 gamma_acq = 53;
1199 gamma_rho_acq = 17119;
1200 gamma_trk = 22;
1201 gamma_rho_trk = 880;
1202 lock_count_thr = 8;
1203 break;
1204 case STB0899_QPSK_56:
1205 gamma_acq = 55;
1206 gamma_rho_acq = 19250;
1207 gamma_trk = 23;
1208 gamma_rho_trk = 989;
1209 lock_count_thr = 8;
1210 break;
1211 case STB0899_QPSK_89:
1212 gamma_acq = 60;
1213 gamma_rho_acq = 24240;
1214 gamma_trk = 24;
1215 gamma_rho_trk = 1176;
1216 lock_count_thr = 8;
1217 break;
1218 case STB0899_QPSK_910:
1219 gamma_acq = 66;
1220 gamma_rho_acq = 29634;
1221 gamma_trk = 24;
1222 gamma_rho_trk = 1176;
1223 lock_count_thr = 8;
1224 break;
1225 default:
1226 gamma_acq = 66;
1227 gamma_rho_acq = 29634;
1228 gamma_trk = 24;
1229 gamma_rho_trk = 1176;
1230 lock_count_thr = 8;
1231 break;
1232 }
1233
1234 csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
1235 STB0899_SETFIELD_VAL(CSM_AUTO_PARAM, csm1, 0);
1236 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
1237
1238 csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
1239 csm2 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL2);
1240 csm3 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL3);
1241 csm4 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL4);
1242
1243 STB0899_SETFIELD_VAL(CSM_DVT_TABLE, csm1, dvt_tbl);
1244 STB0899_SETFIELD_VAL(CSM_TWO_PASS, csm1, two_pass);
1245 STB0899_SETFIELD_VAL(CSM_AGC_GAIN, csm1, agc_gain);
1246 STB0899_SETFIELD_VAL(CSM_AGC_SHIFT, csm1, agc_shift);
1247 STB0899_SETFIELD_VAL(FE_LOOP_SHIFT, csm1, loop_shift);
1248 STB0899_SETFIELD_VAL(CSM_GAMMA_ACQ, csm2, gamma_acq);
1249 STB0899_SETFIELD_VAL(CSM_GAMMA_RHOACQ, csm2, gamma_rho_acq);
1250 STB0899_SETFIELD_VAL(CSM_GAMMA_TRACK, csm3, gamma_trk);
1251 STB0899_SETFIELD_VAL(CSM_GAMMA_RHOTRACK, csm3, gamma_rho_trk);
1252 STB0899_SETFIELD_VAL(CSM_LOCKCOUNT_THRESH, csm4, lock_count_thr);
1253 STB0899_SETFIELD_VAL(CSM_PHASEDIFF_THRESH, csm4, phs_diff_thr);
1254
1255 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
1256 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL2, STB0899_OFF0_CSM_CNTRL2, csm2);
1257 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL3, STB0899_OFF0_CSM_CNTRL3, csm3);
1258 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL4, STB0899_OFF0_CSM_CNTRL4, csm4);
1259 }
1260}
1261
1262/*
1263 * stb0899_dvbs2_get_srate
1264 * get DVB-S2 Symbol Rate
1265 */
1266static u32 stb0899_dvbs2_get_srate(struct stb0899_state *state)
1267{
1268 struct stb0899_internal *internal = &state->internal;
1269 struct stb0899_config *config = state->config;
1270
1271 u32 bTrNomFreq, srate, decimRate, intval1, intval2, reg;
1272 int div1, div2, rem1, rem2;
1273
1274 div1 = config->btr_nco_bits / 2;
1275 div2 = config->btr_nco_bits - div1 - 1;
1276
1277 bTrNomFreq = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_NOM_FREQ);
1278
1279 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DECIM_CNTRL);
1280 decimRate = STB0899_GETFIELD(DECIM_RATE, reg);
1281 decimRate = (1 << decimRate);
1282
1283 intval1 = internal->master_clk / (1 << div1);
1284 intval2 = bTrNomFreq / (1 << div2);
1285
1286 rem1 = internal->master_clk % (1 << div1);
1287 rem2 = bTrNomFreq % (1 << div2);
1288 /* only for integer calculation */
1289 srate = (intval1 * intval2) + ((intval1 * rem2) / (1 << div2)) + ((intval2 * rem1) / (1 << div1));
1290 srate /= decimRate; /*symbrate = (btrnomfreq_register_val*MasterClock)/2^(27+decim_rate_field) */
1291
1292 return srate;
1293}
1294
1295/*
1296 * stb0899_dvbs2_algo
1297 * Search for signal, timing, carrier and data for a given
1298 * frequency in a given range
1299 */
1300enum stb0899_status stb0899_dvbs2_algo(struct stb0899_state *state)
1301{
1302 struct stb0899_internal *internal = &state->internal;
1303 enum stb0899_modcod modcod;
1304
1305 s32 offsetfreq, searchTime, FecLockTime, pilots, iqSpectrum;
1306 int i = 0;
1307 u32 reg, csm1;
1308
1309 if (internal->srate <= 2000000) {
1310 searchTime = 5000; /* 5000 ms max time to lock UWP and CSM, SYMB <= 2Mbs */
1311 FecLockTime = 350; /* 350 ms max time to lock FEC, SYMB <= 2Mbs */
1312 } else if (internal->srate <= 5000000) {
1313 searchTime = 2500; /* 2500 ms max time to lock UWP and CSM, 2Mbs < SYMB <= 5Mbs */
1314 FecLockTime = 170; /* 170 ms max time to lock FEC, 2Mbs< SYMB <= 5Mbs */
1315 } else if (internal->srate <= 10000000) {
1316 searchTime = 1500; /* 1500 ms max time to lock UWP and CSM, 5Mbs <SYMB <= 10Mbs */
1317 FecLockTime = 80; /* 80 ms max time to lock FEC, 5Mbs< SYMB <= 10Mbs */
1318 } else if (internal->srate <= 15000000) {
1319 searchTime = 500; /* 500 ms max time to lock UWP and CSM, 10Mbs <SYMB <= 15Mbs */
1320 FecLockTime = 50; /* 50 ms max time to lock FEC, 10Mbs< SYMB <= 15Mbs */
1321 } else if (internal->srate <= 20000000) {
1322 searchTime = 300; /* 300 ms max time to lock UWP and CSM, 15Mbs < SYMB <= 20Mbs */
1323 FecLockTime = 30; /* 50 ms max time to lock FEC, 15Mbs< SYMB <= 20Mbs */
1324 } else if (internal->srate <= 25000000) {
1325 searchTime = 250; /* 250 ms max time to lock UWP and CSM, 20 Mbs < SYMB <= 25Mbs */
1326 FecLockTime = 25; /* 25 ms max time to lock FEC, 20Mbs< SYMB <= 25Mbs */
1327 } else {
1328 searchTime = 150; /* 150 ms max time to lock UWP and CSM, SYMB > 25Mbs */
1329 FecLockTime = 20; /* 20 ms max time to lock FEC, 20Mbs< SYMB <= 25Mbs */
1330 }
1331
1332 /* Maintain Stream Merger in reset during acquisition */
1333 reg = stb0899_read_reg(state, STB0899_TSTRES);
1334 STB0899_SETFIELD_VAL(FRESRS, reg, 1);
1335 stb0899_write_reg(state, STB0899_TSTRES, reg);
1336
1337 /* enable tuner I/O */
1338 stb0899_i2c_gate_ctrl(&state->frontend, 1);
1339
1340 /* Move tuner to frequency */
1341 if (state->config->tuner_set_frequency)
1342 state->config->tuner_set_frequency(&state->frontend, internal->freq);
1343 if (state->config->tuner_get_frequency)
1344 state->config->tuner_get_frequency(&state->frontend, &internal->freq);
1345
1346 /* disable tuner I/O */
1347 stb0899_i2c_gate_ctrl(&state->frontend, 0);
1348
1349 /* Set IF AGC to acquisition */
1350 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL);
1351 STB0899_SETFIELD_VAL(IF_LOOP_GAIN, reg, 4);
1352 STB0899_SETFIELD_VAL(IF_AGC_REF, reg, 32);
1353 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL, STB0899_OFF0_IF_AGC_CNTRL, reg);
1354
1355 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL2);
1356 STB0899_SETFIELD_VAL(IF_AGC_DUMP_PER, reg, 0);
1357 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL2, STB0899_OFF0_IF_AGC_CNTRL2, reg);
1358
1359 /* Initialisation */
1360 stb0899_dvbs2_init_calc(state);
1361
1362 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_CNTRL2);
1363 switch (internal->inversion) {
1364 case IQ_SWAP_OFF:
1365 STB0899_SETFIELD_VAL(SPECTRUM_INVERT, reg, 0);
1366 break;
1367 case IQ_SWAP_ON:
1368 STB0899_SETFIELD_VAL(SPECTRUM_INVERT, reg, 1);
1369 break;
1370 case IQ_SWAP_AUTO: /* use last successful search first */
1371 STB0899_SETFIELD_VAL(SPECTRUM_INVERT, reg, 1);
1372 break;
1373 }
1374 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_DMD_CNTRL2, STB0899_OFF0_DMD_CNTRL2, reg);
1375 stb0899_dvbs2_reacquire(state);
1376
1377 /* Wait for demod lock (UWP and CSM) */
1378 internal->status = stb0899_dvbs2_get_dmd_status(state, searchTime);
1379
1380 if (internal->status == DVBS2_DEMOD_LOCK) {
1381 dprintk(state->verbose, FE_DEBUG, 1, "------------> DVB-S2 DEMOD LOCK !");
1382 i = 0;
1383 /* Demod Locked, check FEC status */
1384 internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
1385
1386 /*If false lock (UWP and CSM Locked but no FEC) try 3 time max*/
1387 while ((internal->status != DVBS2_FEC_LOCK) && (i < 3)) {
1388 /* Read the frequency offset*/
1389 offsetfreq = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_FREQ);
1390
1391 /* Set the Nominal frequency to the found frequency offset for the next reacquire*/
1392 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_NOM_FREQ);
1393 STB0899_SETFIELD_VAL(CRL_NOM_FREQ, reg, offsetfreq);
1394 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_NOM_FREQ, STB0899_OFF0_CRL_NOM_FREQ, reg);
1395 stb0899_dvbs2_reacquire(state);
1396 internal->status = stb0899_dvbs2_get_fec_status(state, searchTime);
1397 i++;
1398 }
1399 }
1400
1401 if (internal->status != DVBS2_FEC_LOCK) {
1402 if (internal->inversion == IQ_SWAP_AUTO) {
1403 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_CNTRL2);
1404 iqSpectrum = STB0899_GETFIELD(SPECTRUM_INVERT, reg);
1405 /* IQ Spectrum Inversion */
1406 STB0899_SETFIELD_VAL(SPECTRUM_INVERT, reg, !iqSpectrum);
1407 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_DMD_CNTRL2, STB0899_OFF0_DMD_CNTRL2, reg);
1408 /* start acquistion process */
1409 stb0899_dvbs2_reacquire(state);
1410
1411 /* Wait for demod lock (UWP and CSM) */
1412 internal->status = stb0899_dvbs2_get_dmd_status(state, searchTime);
1413 if (internal->status == DVBS2_DEMOD_LOCK) {
1414 i = 0;
1415 /* Demod Locked, check FEC */
1416 internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
1417 /*try thrice for false locks, (UWP and CSM Locked but no FEC) */
1418 while ((internal->status != DVBS2_FEC_LOCK) && (i < 3)) {
1419 /* Read the frequency offset*/
1420 offsetfreq = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_FREQ);
1421
1422 /* Set the Nominal frequency to the found frequency offset for the next reacquire*/
1423 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_NOM_FREQ);
1424 STB0899_SETFIELD_VAL(CRL_NOM_FREQ, reg, offsetfreq);
1425 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_NOM_FREQ, STB0899_OFF0_CRL_NOM_FREQ, reg);
1426
1427 stb0899_dvbs2_reacquire(state);
1428 internal->status = stb0899_dvbs2_get_fec_status(state, searchTime);
1429 i++;
1430 }
1431 }
1432/*
1433 if (pParams->DVBS2State == FE_DVBS2_FEC_LOCKED)
1434 pParams->IQLocked = !iqSpectrum;
1435*/
1436 }
1437 }
1438 if (internal->status == DVBS2_FEC_LOCK) {
1439 dprintk(state->verbose, FE_DEBUG, 1, "----------------> DVB-S2 FEC Lock !");
1440 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_STAT2);
1441 modcod = STB0899_GETFIELD(UWP_DECODE_MOD, reg) >> 2;
1442 pilots = STB0899_GETFIELD(UWP_DECODE_MOD, reg) & 0x01;
1443
1444 if ((((10 * internal->master_clk) / (internal->srate / 10)) <= 410) &&
1445 (INRANGE(STB0899_QPSK_23, modcod, STB0899_QPSK_910)) &&
1446 (pilots == 1)) {
1447
1448 stb0899_dvbs2_init_csm(state, pilots, modcod);
1449 /* Wait for UWP,CSM and data LOCK 20ms max */
1450 internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
1451
1452 i = 0;
1453 while ((internal->status != DVBS2_FEC_LOCK) && (i < 3)) {
1454 csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
1455 STB0899_SETFIELD_VAL(CSM_TWO_PASS, csm1, 1);
1456 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
1457 csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
1458 STB0899_SETFIELD_VAL(CSM_TWO_PASS, csm1, 0);
1459 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
1460
1461 internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
1462 i++;
1463 }
1464 }
1465
1466 if ((((10 * internal->master_clk) / (internal->srate / 10)) <= 410) &&
1467 (INRANGE(STB0899_QPSK_12, modcod, STB0899_QPSK_35)) &&
1468 (pilots == 1)) {
1469
1470 /* Equalizer Disable update */
1471 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, EQ_CNTRL);
1472 STB0899_SETFIELD_VAL(EQ_DISABLE_UPDATE, reg, 1);
1473 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQ_CNTRL, STB0899_OFF0_EQ_CNTRL, reg);
1474 }
1475
1476 /* slow down the Equalizer once locked */
1477 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, EQ_CNTRL);
1478 STB0899_SETFIELD_VAL(EQ_SHIFT, reg, 0x02);
1479 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQ_CNTRL, STB0899_OFF0_EQ_CNTRL, reg);
1480
1481 /* Store signal parameters */
1482 offsetfreq = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_FREQ);
1483
1484 offsetfreq = offsetfreq / ((1 << 30) / 1000);
1485 offsetfreq *= (internal->master_clk / 1000000);
1486 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_CNTRL2);
1487 if (STB0899_GETFIELD(SPECTRUM_INVERT, reg))
1488 offsetfreq *= -1;
1489
1490 internal->freq = internal->freq - offsetfreq;
1491 internal->srate = stb0899_dvbs2_get_srate(state);
1492
1493 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_STAT2);
1494 internal->modcod = STB0899_GETFIELD(UWP_DECODE_MOD, reg) >> 2;
1495 internal->pilots = STB0899_GETFIELD(UWP_DECODE_MOD, reg) & 0x01;
1496 internal->frame_length = (STB0899_GETFIELD(UWP_DECODE_MOD, reg) >> 1) & 0x01;
1497
1498 /* Set IF AGC to tracking */
1499 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL);
1500 STB0899_SETFIELD_VAL(IF_LOOP_GAIN, reg, 3);
1501
1502 /* if QPSK 1/2,QPSK 3/5 or QPSK 2/3 set IF AGC reference to 16 otherwise 32*/
1503 if (INRANGE(STB0899_QPSK_12, internal->modcod, STB0899_QPSK_23))
1504 STB0899_SETFIELD_VAL(IF_AGC_REF, reg, 16);
1505
1506 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL, STB0899_OFF0_IF_AGC_CNTRL, reg);
1507
1508 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL2);
1509 STB0899_SETFIELD_VAL(IF_AGC_DUMP_PER, reg, 7);
1510 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL2, STB0899_OFF0_IF_AGC_CNTRL2, reg);
1511 }
1512
1513 /* Release Stream Merger Reset */
1514 reg = stb0899_read_reg(state, STB0899_TSTRES);
1515 STB0899_SETFIELD_VAL(FRESRS, reg, 0);
1516 stb0899_write_reg(state, STB0899_TSTRES, reg);
1517
1518 return internal->status;
1519}
diff --git a/drivers/media/dvb/frontends/stb0899_cfg.h b/drivers/media/dvb/frontends/stb0899_cfg.h
new file mode 100644
index 000000000000..0867906d3ff3
--- /dev/null
+++ b/drivers/media/dvb/frontends/stb0899_cfg.h
@@ -0,0 +1,287 @@
1/*
2 STB0899 Multistandard Frontend driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#ifndef __STB0899_CFG_H
23#define __STB0899_CFG_H
24
25static const struct stb0899_s2_reg stb0899_s2_init_2[] = {
26
27 { STB0899_OFF0_DMD_STATUS , STB0899_BASE_DMD_STATUS , 0x00000103 }, /* DMDSTATUS */
28 { STB0899_OFF0_CRL_FREQ , STB0899_BASE_CRL_FREQ , 0x3ed1da56 }, /* CRLFREQ */
29 { STB0899_OFF0_BTR_FREQ , STB0899_BASE_BTR_FREQ , 0x00004000 }, /* BTRFREQ */
30 { STB0899_OFF0_IF_AGC_GAIN , STB0899_BASE_IF_AGC_GAIN , 0x00002ade }, /* IFAGCGAIN */
31 { STB0899_OFF0_BB_AGC_GAIN , STB0899_BASE_BB_AGC_GAIN , 0x000001bc }, /* BBAGCGAIN */
32 { STB0899_OFF0_DC_OFFSET , STB0899_BASE_DC_OFFSET , 0x00000200 }, /* DCOFFSET */
33 { STB0899_OFF0_DMD_CNTRL , STB0899_BASE_DMD_CNTRL , 0x0000000f }, /* DMDCNTRL */
34
35 { STB0899_OFF0_IF_AGC_CNTRL , STB0899_BASE_IF_AGC_CNTRL , 0x03fb4a20 }, /* IFAGCCNTRL */
36 { STB0899_OFF0_BB_AGC_CNTRL , STB0899_BASE_BB_AGC_CNTRL , 0x00200c97 }, /* BBAGCCNTRL */
37
38 { STB0899_OFF0_CRL_CNTRL , STB0899_BASE_CRL_CNTRL , 0x00000016 }, /* CRLCNTRL */
39 { STB0899_OFF0_CRL_PHS_INIT , STB0899_BASE_CRL_PHS_INIT , 0x00000000 }, /* CRLPHSINIT */
40 { STB0899_OFF0_CRL_FREQ_INIT , STB0899_BASE_CRL_FREQ_INIT , 0x00000000 }, /* CRLFREQINIT */
41 { STB0899_OFF0_CRL_LOOP_GAIN , STB0899_BASE_CRL_LOOP_GAIN , 0x00000000 }, /* CRLLOOPGAIN */
42 { STB0899_OFF0_CRL_NOM_FREQ , STB0899_BASE_CRL_NOM_FREQ , 0x3ed097b6 }, /* CRLNOMFREQ */
43 { STB0899_OFF0_CRL_SWP_RATE , STB0899_BASE_CRL_SWP_RATE , 0x00000000 }, /* CRLSWPRATE */
44 { STB0899_OFF0_CRL_MAX_SWP , STB0899_BASE_CRL_MAX_SWP , 0x00000000 }, /* CRLMAXSWP */
45 { STB0899_OFF0_CRL_LK_CNTRL , STB0899_BASE_CRL_LK_CNTRL , 0x0f6cdc01 }, /* CRLLKCNTRL */
46 { STB0899_OFF0_DECIM_CNTRL , STB0899_BASE_DECIM_CNTRL , 0x00000000 }, /* DECIMCNTRL */
47 { STB0899_OFF0_BTR_CNTRL , STB0899_BASE_BTR_CNTRL , 0x00003993 }, /* BTRCNTRL */
48 { STB0899_OFF0_BTR_LOOP_GAIN , STB0899_BASE_BTR_LOOP_GAIN , 0x000d3c6f }, /* BTRLOOPGAIN */
49 { STB0899_OFF0_BTR_PHS_INIT , STB0899_BASE_BTR_PHS_INIT , 0x00000000 }, /* BTRPHSINIT */
50 { STB0899_OFF0_BTR_FREQ_INIT , STB0899_BASE_BTR_FREQ_INIT , 0x00000000 }, /* BTRFREQINIT */
51 { STB0899_OFF0_BTR_NOM_FREQ , STB0899_BASE_BTR_NOM_FREQ , 0x0238e38e }, /* BTRNOMFREQ */
52 { STB0899_OFF0_BTR_LK_CNTRL , STB0899_BASE_BTR_LK_CNTRL , 0x00000000 }, /* BTRLKCNTRL */
53 { STB0899_OFF0_DECN_CNTRL , STB0899_BASE_DECN_CNTRL , 0x00000000 }, /* DECNCNTRL */
54 { STB0899_OFF0_TP_CNTRL , STB0899_BASE_TP_CNTRL , 0x00000000 }, /* TPCNTRL */
55 { STB0899_OFF0_TP_BUF_STATUS , STB0899_BASE_TP_BUF_STATUS , 0x00000000 }, /* TPBUFSTATUS */
56 { STB0899_OFF0_DC_ESTIM , STB0899_BASE_DC_ESTIM , 0x00000000 }, /* DCESTIM */
57 { STB0899_OFF0_FLL_CNTRL , STB0899_BASE_FLL_CNTRL , 0x00000000 }, /* FLLCNTRL */
58 { STB0899_OFF0_FLL_FREQ_WD , STB0899_BASE_FLL_FREQ_WD , 0x40070000 }, /* FLLFREQWD */
59 { STB0899_OFF0_ANTI_ALIAS_SEL , STB0899_BASE_ANTI_ALIAS_SEL , 0x00000001 }, /* ANTIALIASSEL */
60 { STB0899_OFF0_RRC_ALPHA , STB0899_BASE_RRC_ALPHA , 0x00000002 }, /* RRCALPHA */
61 { STB0899_OFF0_DC_ADAPT_LSHFT , STB0899_BASE_DC_ADAPT_LSHFT , 0x00000000 }, /* DCADAPTISHFT */
62 { STB0899_OFF0_IMB_OFFSET , STB0899_BASE_IMB_OFFSET , 0x0000fe01 }, /* IMBOFFSET */
63 { STB0899_OFF0_IMB_ESTIMATE , STB0899_BASE_IMB_ESTIMATE , 0x00000000 }, /* IMBESTIMATE */
64 { STB0899_OFF0_IMB_CNTRL , STB0899_BASE_IMB_CNTRL , 0x00000001 }, /* IMBCNTRL */
65 { STB0899_OFF0_IF_AGC_CNTRL2 , STB0899_BASE_IF_AGC_CNTRL2 , 0x00005007 }, /* IFAGCCNTRL2 */
66 { STB0899_OFF0_DMD_CNTRL2 , STB0899_BASE_DMD_CNTRL2 , 0x00000002 }, /* DMDCNTRL2 */
67 { STB0899_OFF0_TP_BUFFER , STB0899_BASE_TP_BUFFER , 0x00000000 }, /* TPBUFFER */
68 { STB0899_OFF0_TP_BUFFER1 , STB0899_BASE_TP_BUFFER1 , 0x00000000 }, /* TPBUFFER1 */
69 { STB0899_OFF0_TP_BUFFER2 , STB0899_BASE_TP_BUFFER2 , 0x00000000 }, /* TPBUFFER2 */
70 { STB0899_OFF0_TP_BUFFER3 , STB0899_BASE_TP_BUFFER3 , 0x00000000 }, /* TPBUFFER3 */
71 { STB0899_OFF0_TP_BUFFER4 , STB0899_BASE_TP_BUFFER4 , 0x00000000 }, /* TPBUFFER4 */
72 { STB0899_OFF0_TP_BUFFER5 , STB0899_BASE_TP_BUFFER5 , 0x00000000 }, /* TPBUFFER5 */
73 { STB0899_OFF0_TP_BUFFER6 , STB0899_BASE_TP_BUFFER6 , 0x00000000 }, /* TPBUFFER6 */
74 { STB0899_OFF0_TP_BUFFER7 , STB0899_BASE_TP_BUFFER7 , 0x00000000 }, /* TPBUFFER7 */
75 { STB0899_OFF0_TP_BUFFER8 , STB0899_BASE_TP_BUFFER8 , 0x00000000 }, /* TPBUFFER8 */
76 { STB0899_OFF0_TP_BUFFER9 , STB0899_BASE_TP_BUFFER9 , 0x00000000 }, /* TPBUFFER9 */
77 { STB0899_OFF0_TP_BUFFER10 , STB0899_BASE_TP_BUFFER10 , 0x00000000 }, /* TPBUFFER10 */
78 { STB0899_OFF0_TP_BUFFER11 , STB0899_BASE_TP_BUFFER11 , 0x00000000 }, /* TPBUFFER11 */
79 { STB0899_OFF0_TP_BUFFER12 , STB0899_BASE_TP_BUFFER12 , 0x00000000 }, /* TPBUFFER12 */
80 { STB0899_OFF0_TP_BUFFER13 , STB0899_BASE_TP_BUFFER13 , 0x00000000 }, /* TPBUFFER13 */
81 { STB0899_OFF0_TP_BUFFER14 , STB0899_BASE_TP_BUFFER14 , 0x00000000 }, /* TPBUFFER14 */
82 { STB0899_OFF0_TP_BUFFER15 , STB0899_BASE_TP_BUFFER15 , 0x00000000 }, /* TPBUFFER15 */
83 { STB0899_OFF0_TP_BUFFER16 , STB0899_BASE_TP_BUFFER16 , 0x0000ff00 }, /* TPBUFFER16 */
84 { STB0899_OFF0_TP_BUFFER17 , STB0899_BASE_TP_BUFFER17 , 0x00000100 }, /* TPBUFFER17 */
85 { STB0899_OFF0_TP_BUFFER18 , STB0899_BASE_TP_BUFFER18 , 0x0000fe01 }, /* TPBUFFER18 */
86 { STB0899_OFF0_TP_BUFFER19 , STB0899_BASE_TP_BUFFER19 , 0x000004fe }, /* TPBUFFER19 */
87 { STB0899_OFF0_TP_BUFFER20 , STB0899_BASE_TP_BUFFER20 , 0x0000cfe7 }, /* TPBUFFER20 */
88 { STB0899_OFF0_TP_BUFFER21 , STB0899_BASE_TP_BUFFER21 , 0x0000bec6 }, /* TPBUFFER21 */
89 { STB0899_OFF0_TP_BUFFER22 , STB0899_BASE_TP_BUFFER22 , 0x0000c2bf }, /* TPBUFFER22 */
90 { STB0899_OFF0_TP_BUFFER23 , STB0899_BASE_TP_BUFFER23 , 0x0000c1c1 }, /* TPBUFFER23 */
91 { STB0899_OFF0_TP_BUFFER24 , STB0899_BASE_TP_BUFFER24 , 0x0000c1c1 }, /* TPBUFFER24 */
92 { STB0899_OFF0_TP_BUFFER25 , STB0899_BASE_TP_BUFFER25 , 0x0000c1c1 }, /* TPBUFFER25 */
93 { STB0899_OFF0_TP_BUFFER26 , STB0899_BASE_TP_BUFFER26 , 0x0000c1c1 }, /* TPBUFFER26 */
94 { STB0899_OFF0_TP_BUFFER27 , STB0899_BASE_TP_BUFFER27 , 0x0000c1c0 }, /* TPBUFFER27 */
95 { STB0899_OFF0_TP_BUFFER28 , STB0899_BASE_TP_BUFFER28 , 0x0000c0c0 }, /* TPBUFFER28 */
96 { STB0899_OFF0_TP_BUFFER29 , STB0899_BASE_TP_BUFFER29 , 0x0000c1c1 }, /* TPBUFFER29 */
97 { STB0899_OFF0_TP_BUFFER30 , STB0899_BASE_TP_BUFFER30 , 0x0000c1c1 }, /* TPBUFFER30 */
98 { STB0899_OFF0_TP_BUFFER31 , STB0899_BASE_TP_BUFFER31 , 0x0000c0c1 }, /* TPBUFFER31 */
99 { STB0899_OFF0_TP_BUFFER32 , STB0899_BASE_TP_BUFFER32 , 0x0000c0c1 }, /* TPBUFFER32 */
100 { STB0899_OFF0_TP_BUFFER33 , STB0899_BASE_TP_BUFFER33 , 0x0000c1c1 }, /* TPBUFFER33 */
101 { STB0899_OFF0_TP_BUFFER34 , STB0899_BASE_TP_BUFFER34 , 0x0000c1c1 }, /* TPBUFFER34 */
102 { STB0899_OFF0_TP_BUFFER35 , STB0899_BASE_TP_BUFFER35 , 0x0000c0c1 }, /* TPBUFFER35 */
103 { STB0899_OFF0_TP_BUFFER36 , STB0899_BASE_TP_BUFFER36 , 0x0000c1c1 }, /* TPBUFFER36 */
104 { STB0899_OFF0_TP_BUFFER37 , STB0899_BASE_TP_BUFFER37 , 0x0000c0c1 }, /* TPBUFFER37 */
105 { STB0899_OFF0_TP_BUFFER38 , STB0899_BASE_TP_BUFFER38 , 0x0000c1c1 }, /* TPBUFFER38 */
106 { STB0899_OFF0_TP_BUFFER39 , STB0899_BASE_TP_BUFFER39 , 0x0000c0c0 }, /* TPBUFFER39 */
107 { STB0899_OFF0_TP_BUFFER40 , STB0899_BASE_TP_BUFFER40 , 0x0000c1c0 }, /* TPBUFFER40 */
108 { STB0899_OFF0_TP_BUFFER41 , STB0899_BASE_TP_BUFFER41 , 0x0000c1c1 }, /* TPBUFFER41 */
109 { STB0899_OFF0_TP_BUFFER42 , STB0899_BASE_TP_BUFFER42 , 0x0000c0c0 }, /* TPBUFFER42 */
110 { STB0899_OFF0_TP_BUFFER43 , STB0899_BASE_TP_BUFFER43 , 0x0000c1c0 }, /* TPBUFFER43 */
111 { STB0899_OFF0_TP_BUFFER44 , STB0899_BASE_TP_BUFFER44 , 0x0000c0c1 }, /* TPBUFFER44 */
112 { STB0899_OFF0_TP_BUFFER45 , STB0899_BASE_TP_BUFFER45 , 0x0000c1be }, /* TPBUFFER45 */
113 { STB0899_OFF0_TP_BUFFER46 , STB0899_BASE_TP_BUFFER46 , 0x0000c1c9 }, /* TPBUFFER46 */
114 { STB0899_OFF0_TP_BUFFER47 , STB0899_BASE_TP_BUFFER47 , 0x0000c0da }, /* TPBUFFER47 */
115 { STB0899_OFF0_TP_BUFFER48 , STB0899_BASE_TP_BUFFER48 , 0x0000c0ba }, /* TPBUFFER48 */
116 { STB0899_OFF0_TP_BUFFER49 , STB0899_BASE_TP_BUFFER49 , 0x0000c1c4 }, /* TPBUFFER49 */
117 { STB0899_OFF0_TP_BUFFER50 , STB0899_BASE_TP_BUFFER50 , 0x0000c1bf }, /* TPBUFFER50 */
118 { STB0899_OFF0_TP_BUFFER51 , STB0899_BASE_TP_BUFFER51 , 0x0000c0c1 }, /* TPBUFFER51 */
119 { STB0899_OFF0_TP_BUFFER52 , STB0899_BASE_TP_BUFFER52 , 0x0000c1c0 }, /* TPBUFFER52 */
120 { STB0899_OFF0_TP_BUFFER53 , STB0899_BASE_TP_BUFFER53 , 0x0000c0c1 }, /* TPBUFFER53 */
121 { STB0899_OFF0_TP_BUFFER54 , STB0899_BASE_TP_BUFFER54 , 0x0000c1c1 }, /* TPBUFFER54 */
122 { STB0899_OFF0_TP_BUFFER55 , STB0899_BASE_TP_BUFFER55 , 0x0000c1c1 }, /* TPBUFFER55 */
123 { STB0899_OFF0_TP_BUFFER56 , STB0899_BASE_TP_BUFFER56 , 0x0000c1c1 }, /* TPBUFFER56 */
124 { STB0899_OFF0_TP_BUFFER57 , STB0899_BASE_TP_BUFFER57 , 0x0000c1c1 }, /* TPBUFFER57 */
125 { STB0899_OFF0_TP_BUFFER58 , STB0899_BASE_TP_BUFFER58 , 0x0000c1c1 }, /* TPBUFFER58 */
126 { STB0899_OFF0_TP_BUFFER59 , STB0899_BASE_TP_BUFFER59 , 0x0000c1c1 }, /* TPBUFFER59 */
127 { STB0899_OFF0_TP_BUFFER60 , STB0899_BASE_TP_BUFFER60 , 0x0000c1c1 }, /* TPBUFFER60 */
128 { STB0899_OFF0_TP_BUFFER61 , STB0899_BASE_TP_BUFFER61 , 0x0000c1c1 }, /* TPBUFFER61 */
129 { STB0899_OFF0_TP_BUFFER62 , STB0899_BASE_TP_BUFFER62 , 0x0000c1c1 }, /* TPBUFFER62 */
130 { STB0899_OFF0_TP_BUFFER63 , STB0899_BASE_TP_BUFFER63 , 0x0000c1c0 }, /* TPBUFFER63 */
131 { STB0899_OFF0_RESET_CNTRL , STB0899_BASE_RESET_CNTRL , 0x00000001 }, /* RESETCNTRL */
132 { STB0899_OFF0_ACM_ENABLE , STB0899_BASE_ACM_ENABLE , 0x00005654 }, /* ACMENABLE */
133 { STB0899_OFF0_DESCR_CNTRL , STB0899_BASE_DESCR_CNTRL , 0x00000000 }, /* DESCRCNTRL */
134 { STB0899_OFF0_CSM_CNTRL1 , STB0899_BASE_CSM_CNTRL1 , 0x00020019 }, /* CSMCNTRL1 */
135 { STB0899_OFF0_CSM_CNTRL2 , STB0899_BASE_CSM_CNTRL2 , 0x004b3237 }, /* CSMCNTRL2 */
136 { STB0899_OFF0_CSM_CNTRL3 , STB0899_BASE_CSM_CNTRL3 , 0x0003dd17 }, /* CSMCNTRL3 */
137 { STB0899_OFF0_CSM_CNTRL4 , STB0899_BASE_CSM_CNTRL4 , 0x00008008 }, /* CSMCNTRL4 */
138 { STB0899_OFF0_UWP_CNTRL1 , STB0899_BASE_UWP_CNTRL1 , 0x002a3106 }, /* UWPCNTRL1 */
139 { STB0899_OFF0_UWP_CNTRL2 , STB0899_BASE_UWP_CNTRL2 , 0x0006140a }, /* UWPCNTRL2 */
140 { STB0899_OFF0_UWP_STAT1 , STB0899_BASE_UWP_STAT1 , 0x00008000 }, /* UWPSTAT1 */
141 { STB0899_OFF0_UWP_STAT2 , STB0899_BASE_UWP_STAT2 , 0x00000000 }, /* UWPSTAT2 */
142 { STB0899_OFF0_DMD_STAT2 , STB0899_BASE_DMD_STAT2 , 0x00000000 }, /* DMDSTAT2 */
143 { STB0899_OFF0_FREQ_ADJ_SCALE , STB0899_BASE_FREQ_ADJ_SCALE , 0x00000471 }, /* FREQADJSCALE */
144 { STB0899_OFF0_UWP_CNTRL3 , STB0899_BASE_UWP_CNTRL3 , 0x017b0465 }, /* UWPCNTRL3 */
145 { STB0899_OFF0_SYM_CLK_SEL , STB0899_BASE_SYM_CLK_SEL , 0x00000002 }, /* SYMCLKSEL */
146 { STB0899_OFF0_SOF_SRCH_TO , STB0899_BASE_SOF_SRCH_TO , 0x00196464 }, /* SOFSRCHTO */
147 { STB0899_OFF0_ACQ_CNTRL1 , STB0899_BASE_ACQ_CNTRL1 , 0x00000603 }, /* ACQCNTRL1 */
148 { STB0899_OFF0_ACQ_CNTRL2 , STB0899_BASE_ACQ_CNTRL2 , 0x02046666 }, /* ACQCNTRL2 */
149 { STB0899_OFF0_ACQ_CNTRL3 , STB0899_BASE_ACQ_CNTRL3 , 0x10046583 }, /* ACQCNTRL3 */
150 { STB0899_OFF0_FE_SETTLE , STB0899_BASE_FE_SETTLE , 0x00010404 }, /* FESETTLE */
151 { STB0899_OFF0_AC_DWELL , STB0899_BASE_AC_DWELL , 0x0002aa8a }, /* ACDWELL */
152 { STB0899_OFF0_ACQUIRE_TRIG , STB0899_BASE_ACQUIRE_TRIG , 0x00000000 }, /* ACQUIRETRIG */
153 { STB0899_OFF0_LOCK_LOST , STB0899_BASE_LOCK_LOST , 0x00000001 }, /* LOCKLOST */
154 { STB0899_OFF0_ACQ_STAT1 , STB0899_BASE_ACQ_STAT1 , 0x00000500 }, /* ACQSTAT1 */
155 { STB0899_OFF0_ACQ_TIMEOUT , STB0899_BASE_ACQ_TIMEOUT , 0x0028a0a0 }, /* ACQTIMEOUT */
156 { STB0899_OFF0_ACQ_TIME , STB0899_BASE_ACQ_TIME , 0x00000000 }, /* ACQTIME */
157 { STB0899_OFF0_FINAL_AGC_CNTRL , STB0899_BASE_FINAL_AGC_CNTRL , 0x00800c17 }, /* FINALAGCCNTRL*/
158 { STB0899_OFF0_FINAL_AGC_GAIN , STB0899_BASE_FINAL_AGC_GAIN , 0x00000000 }, /* FINALAGCCGAIN*/
159 { STB0899_OFF0_EQUALIZER_INIT , STB0899_BASE_EQUALIZER_INIT , 0x00000000 }, /* EQUILIZERINIT*/
160 { STB0899_OFF0_EQ_CNTRL , STB0899_BASE_EQ_CNTRL , 0x00054802 }, /* EQCNTL */
161 { STB0899_OFF0_EQ_I_INIT_COEFF_0, STB0899_BASE_EQ_I_INIT_COEFF_N, 0x00000000 }, /* EQIINITCOEFF0 */
162 { STB0899_OFF1_EQ_I_INIT_COEFF_1, STB0899_BASE_EQ_I_INIT_COEFF_N, 0x00000000 }, /* EQIINITCOEFF1 */
163 { STB0899_OFF2_EQ_I_INIT_COEFF_2, STB0899_BASE_EQ_I_INIT_COEFF_N, 0x00000000 }, /* EQIINITCOEFF2 */
164 { STB0899_OFF3_EQ_I_INIT_COEFF_3, STB0899_BASE_EQ_I_INIT_COEFF_N, 0x00000000 }, /* EQIINITCOEFF3 */
165 { STB0899_OFF4_EQ_I_INIT_COEFF_4, STB0899_BASE_EQ_I_INIT_COEFF_N, 0x00000000 }, /* EQIINITCOEFF4 */
166 { STB0899_OFF5_EQ_I_INIT_COEFF_5, STB0899_BASE_EQ_I_INIT_COEFF_N, 0x00000400 }, /* EQIINITCOEFF5 */
167 { STB0899_OFF6_EQ_I_INIT_COEFF_6, STB0899_BASE_EQ_I_INIT_COEFF_N, 0x00000000 }, /* EQIINITCOEFF6 */
168 { STB0899_OFF7_EQ_I_INIT_COEFF_7, STB0899_BASE_EQ_I_INIT_COEFF_N, 0x00000000 }, /* EQIINITCOEFF7 */
169 { STB0899_OFF8_EQ_I_INIT_COEFF_8, STB0899_BASE_EQ_I_INIT_COEFF_N, 0x00000000 }, /* EQIINITCOEFF8 */
170 { STB0899_OFF9_EQ_I_INIT_COEFF_9, STB0899_BASE_EQ_I_INIT_COEFF_N, 0x00000000 }, /* EQIINITCOEFF9 */
171 { STB0899_OFFa_EQ_I_INIT_COEFF_10,STB0899_BASE_EQ_I_INIT_COEFF_N, 0x00000000 }, /* EQIINITCOEFF10*/
172 { STB0899_OFF0_EQ_Q_INIT_COEFF_0, STB0899_BASE_EQ_Q_INIT_COEFF_N, 0x00000000 }, /* EQQINITCOEFF0 */
173 { STB0899_OFF1_EQ_Q_INIT_COEFF_1, STB0899_BASE_EQ_Q_INIT_COEFF_N, 0x00000000 }, /* EQQINITCOEFF1 */
174 { STB0899_OFF2_EQ_Q_INIT_COEFF_2, STB0899_BASE_EQ_Q_INIT_COEFF_N, 0x00000000 }, /* EQQINITCOEFF2 */
175 { STB0899_OFF3_EQ_Q_INIT_COEFF_3, STB0899_BASE_EQ_Q_INIT_COEFF_N, 0x00000000 }, /* EQQINITCOEFF3 */
176 { STB0899_OFF4_EQ_Q_INIT_COEFF_4, STB0899_BASE_EQ_Q_INIT_COEFF_N, 0x00000000 }, /* EQQINITCOEFF4 */
177 { STB0899_OFF5_EQ_Q_INIT_COEFF_5, STB0899_BASE_EQ_Q_INIT_COEFF_N, 0x00000000 }, /* EQQINITCOEFF5 */
178 { STB0899_OFF6_EQ_Q_INIT_COEFF_6, STB0899_BASE_EQ_Q_INIT_COEFF_N, 0x00000000 }, /* EQQINITCOEFF6 */
179 { STB0899_OFF7_EQ_Q_INIT_COEFF_7, STB0899_BASE_EQ_Q_INIT_COEFF_N, 0x00000000 }, /* EQQINITCOEFF7 */
180 { STB0899_OFF8_EQ_Q_INIT_COEFF_8, STB0899_BASE_EQ_Q_INIT_COEFF_N, 0x00000000 }, /* EQQINITCOEFF8 */
181 { STB0899_OFF9_EQ_Q_INIT_COEFF_9, STB0899_BASE_EQ_Q_INIT_COEFF_N, 0x00000000 }, /* EQQINITCOEFF9 */
182 { STB0899_OFFa_EQ_Q_INIT_COEFF_10,STB0899_BASE_EQ_Q_INIT_COEFF_N, 0x00000000 }, /* EQQINITCOEFF10*/
183 { STB0899_OFF0_EQ_I_OUT_COEFF_0 , STB0899_BASE_EQ_I_OUT_COEFF_N , 0x00000000 }, /* EQICOEFFSOUT0 */
184 { STB0899_OFF1_EQ_I_OUT_COEFF_1 , STB0899_BASE_EQ_I_OUT_COEFF_N , 0x00000000 }, /* EQICOEFFSOUT1 */
185 { STB0899_OFF2_EQ_I_OUT_COEFF_2 , STB0899_BASE_EQ_I_OUT_COEFF_N , 0x00000000 }, /* EQICOEFFSOUT2 */
186 { STB0899_OFF3_EQ_I_OUT_COEFF_3 , STB0899_BASE_EQ_I_OUT_COEFF_N , 0x00000000 }, /* EQICOEFFSOUT3 */
187 { STB0899_OFF4_EQ_I_OUT_COEFF_4 , STB0899_BASE_EQ_I_OUT_COEFF_N , 0x00000000 }, /* EQICOEFFSOUT4 */
188 { STB0899_OFF5_EQ_I_OUT_COEFF_5 , STB0899_BASE_EQ_I_OUT_COEFF_N , 0x00000000 }, /* EQICOEFFSOUT5 */
189 { STB0899_OFF6_EQ_I_OUT_COEFF_6 , STB0899_BASE_EQ_I_OUT_COEFF_N , 0x00000000 }, /* EQICOEFFSOUT6 */
190 { STB0899_OFF7_EQ_I_OUT_COEFF_7 , STB0899_BASE_EQ_I_OUT_COEFF_N , 0x00000000 }, /* EQICOEFFSOUT7 */
191 { STB0899_OFF8_EQ_I_OUT_COEFF_8 , STB0899_BASE_EQ_I_OUT_COEFF_N , 0x00000000 }, /* EQICOEFFSOUT8 */
192 { STB0899_OFF9_EQ_I_OUT_COEFF_9 , STB0899_BASE_EQ_I_OUT_COEFF_N , 0x00000000 }, /* EQICOEFFSOUT9 */
193 { STB0899_OFFa_EQ_I_OUT_COEFF_10,STB0899_BASE_EQ_I_OUT_COEFF_N , 0x00000000 }, /* EQICOEFFSOUT10*/
194 { STB0899_OFF0_EQ_Q_OUT_COEFF_0 , STB0899_BASE_EQ_Q_OUT_COEFF_N , 0x00000000 }, /* EQQCOEFFSOUT0 */
195 { STB0899_OFF1_EQ_Q_OUT_COEFF_1 , STB0899_BASE_EQ_Q_OUT_COEFF_N , 0x00000000 }, /* EQQCOEFFSOUT1 */
196 { STB0899_OFF2_EQ_Q_OUT_COEFF_2 , STB0899_BASE_EQ_Q_OUT_COEFF_N , 0x00000000 }, /* EQQCOEFFSOUT2 */
197 { STB0899_OFF3_EQ_Q_OUT_COEFF_3 , STB0899_BASE_EQ_Q_OUT_COEFF_N , 0x00000000 }, /* EQQCOEFFSOUT3 */
198 { STB0899_OFF4_EQ_Q_OUT_COEFF_4 , STB0899_BASE_EQ_Q_OUT_COEFF_N , 0x00000000 }, /* EQQCOEFFSOUT4 */
199 { STB0899_OFF5_EQ_Q_OUT_COEFF_5 , STB0899_BASE_EQ_Q_OUT_COEFF_N , 0x00000000 }, /* EQQCOEFFSOUT5 */
200 { STB0899_OFF6_EQ_Q_OUT_COEFF_6 , STB0899_BASE_EQ_Q_OUT_COEFF_N , 0x00000000 }, /* EQQCOEFFSOUT6 */
201 { STB0899_OFF7_EQ_Q_OUT_COEFF_7 , STB0899_BASE_EQ_Q_OUT_COEFF_N , 0x00000000 }, /* EQQCOEFFSOUT7 */
202 { STB0899_OFF8_EQ_Q_OUT_COEFF_8 , STB0899_BASE_EQ_Q_OUT_COEFF_N , 0x00000000 }, /* EQQCOEFFSOUT8 */
203 { STB0899_OFF9_EQ_Q_OUT_COEFF_9 , STB0899_BASE_EQ_Q_OUT_COEFF_N , 0x00000000 }, /* EQQCOEFFSOUT9 */
204 { STB0899_OFFa_EQ_Q_OUT_COEFF_10, STB0899_BASE_EQ_Q_OUT_COEFF_N , 0x00000000 }, /* EQQCOEFFSOUT10*/
205 { 0xffff , 0xffffffff , 0xffffffff },
206};
207static const struct stb0899_s2_reg stb0899_s2_init_4[] = {
208 { STB0899_OFF0_BLOCK_LNGTH , STB0899_BASE_BLOCK_LNGTH , 0x00000008 }, /* BLOCKLNGTH */
209 { STB0899_OFF0_ROW_STR , STB0899_BASE_ROW_STR , 0x000000b4 }, /* ROWSTR */
210 { STB0899_OFF0_BN_END_ADDR , STB0899_BASE_BN_END_ADDR , 0x000004b5 }, /* BNANDADDR */
211 { STB0899_OFF0_CN_END_ADDR , STB0899_BASE_CN_END_ADDR , 0x00000b4b }, /* CNANDADDR */
212 { STB0899_OFF0_INFO_LENGTH , STB0899_BASE_INFO_LENGTH , 0x00000078 }, /* INFOLENGTH */
213 { STB0899_OFF0_BOT_ADDR , STB0899_BASE_BOT_ADDR , 0x000001e0 }, /* BOT_ADDR */
214 { STB0899_OFF0_BCH_BLK_LN , STB0899_BASE_BCH_BLK_LN , 0x0000a8c0 }, /* BCHBLKLN */
215 { STB0899_OFF0_BCH_T , STB0899_BASE_BCH_T , 0x0000000c }, /* BCHT */
216 { STB0899_OFF0_CNFG_MODE , STB0899_BASE_CNFG_MODE , 0x00000001 }, /* CNFGMODE */
217 { STB0899_OFF0_LDPC_STAT , STB0899_BASE_LDPC_STAT , 0x0000000d }, /* LDPCSTAT */
218 { STB0899_OFF0_ITER_SCALE , STB0899_BASE_ITER_SCALE , 0x00000040 }, /* ITERSCALE */
219 { STB0899_OFF0_INPUT_MODE , STB0899_BASE_INPUT_MODE , 0x00000000 }, /* INPUTMODE */
220 { STB0899_OFF0_LDPCDECRST , STB0899_BASE_LDPCDECRST , 0x00000000 }, /* LDPCDECRST */
221 { STB0899_OFF0_CLK_PER_BYTE_RW , STB0899_BASE_CLK_PER_BYTE_RW , 0x00000008 }, /* CLKPERBYTE */
222 { STB0899_OFF0_BCH_ERRORS , STB0899_BASE_BCH_ERRORS , 0x00000000 }, /* BCHERRORS */
223 { STB0899_OFF0_LDPC_ERRORS , STB0899_BASE_LDPC_ERRORS , 0x00000000 }, /* LDPCERRORS */
224 { STB0899_OFF0_BCH_MODE , STB0899_BASE_BCH_MODE , 0x00000000 }, /* BCHMODE */
225 { STB0899_OFF0_ERR_ACC_PER , STB0899_BASE_ERR_ACC_PER , 0x00000008 }, /* ERRACCPER */
226 { STB0899_OFF0_BCH_ERR_ACC , STB0899_BASE_BCH_ERR_ACC , 0x00000000 }, /* BCHERRACC */
227 { STB0899_OFF0_FEC_TP_SEL , STB0899_BASE_FEC_TP_SEL , 0x00000000 }, /* FECTPSEL */
228 { 0xffff , 0xffffffff , 0xffffffff },
229};
230
231static const struct stb0899_s1_reg stb0899_s1_init_5[] = {
232 { STB0899_TSTCK , 0x00 },
233 { STB0899_TSTRES , 0x00 },
234 { STB0899_TSTOUT , 0x00 },
235 { STB0899_TSTIN , 0x00 },
236 { STB0899_TSTSYS , 0x00 },
237 { STB0899_TSTCHIP , 0x00 },
238 { STB0899_TSTFREE , 0x00 },
239 { STB0899_TSTI2C , 0x00 },
240 { STB0899_BITSPEEDM , 0x00 },
241 { STB0899_BITSPEEDL , 0x00 },
242 { STB0899_TBUSBIT , 0x00 },
243 { STB0899_TSTDIS , 0x00 },
244 { STB0899_TSTDISRX , 0x00 },
245 { STB0899_TSTJETON , 0x00 },
246 { STB0899_TSTDCADJ , 0x00 },
247 { STB0899_TSTAGC1 , 0x00 },
248 { STB0899_TSTAGC1N , 0x00 },
249 { STB0899_TSTPOLYPH , 0x00 },
250 { STB0899_TSTR , 0x00 },
251 { STB0899_TSTAGC2 , 0x00 },
252 { STB0899_TSTCTL1 , 0x00 },
253 { STB0899_TSTCTL2 , 0x00 },
254 { STB0899_TSTCTL3 , 0x00 },
255 { STB0899_TSTDEMAP , 0x00 },
256 { STB0899_TSTDEMAP2 , 0x00 },
257 { STB0899_TSTDEMMON , 0x00 },
258 { STB0899_TSTRATE , 0x00 },
259 { STB0899_TSTSELOUT , 0x00 },
260 { STB0899_TSYNC , 0x00 },
261 { STB0899_TSTERR , 0x00 },
262 { STB0899_TSTRAM1 , 0x00 },
263 { STB0899_TSTVSELOUT , 0x00 },
264 { STB0899_TSTFORCEIN , 0x00 },
265 { STB0899_TSTRS1 , 0x00 },
266 { STB0899_TSTRS2 , 0x00 },
267 { STB0899_TSTRS3 , 0x00 },
268 { STB0899_GHOSTREG , 0x81 },
269 { 0xffff , 0xff },
270};
271
272#define STB0899_DVBS2_ESNO_AVE 3
273#define STB0899_DVBS2_ESNO_QUANT 32
274#define STB0899_DVBS2_AVFRAMES_COARSE 10
275#define STB0899_DVBS2_AVFRAMES_FINE 20
276#define STB0899_DVBS2_MISS_THRESHOLD 6
277#define STB0899_DVBS2_UWP_THRESHOLD_ACQ 1125
278#define STB0899_DVBS2_UWP_THRESHOLD_TRACK 758
279#define STB0899_DVBS2_UWP_THRESHOLD_SOF 1350
280#define STB0899_DVBS2_SOF_SEARCH_TIMEOUT 1664100
281
282#define STB0899_DVBS2_BTR_NCO_BITS 28
283#define STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET 15
284#define STB0899_DVBS2_CRL_NCO_BITS 30
285#define STB0899_DVBS2_LDPC_MAX_ITER 70
286
287#endif //__STB0899_CFG_H
diff --git a/drivers/media/dvb/frontends/stb0899_drv.c b/drivers/media/dvb/frontends/stb0899_drv.c
new file mode 100644
index 000000000000..528820170228
--- /dev/null
+++ b/drivers/media/dvb/frontends/stb0899_drv.c
@@ -0,0 +1,1684 @@
1/*
2 STB0899 Multistandard Frontend driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/string.h>
26
27#include <linux/dvb/frontend.h>
28#include "dvb_frontend.h"
29
30#include "stb0899_drv.h"
31#include "stb0899_priv.h"
32#include "stb0899_reg.h"
33
34static unsigned int verbose = 0;//1;
35module_param(verbose, int, 0644);
36
37/* C/N in dB/10, NIRM/NIRL */
38static const struct stb0899_tab stb0899_cn_tab[] = {
39 { 200, 2600 },
40 { 190, 2700 },
41 { 180, 2860 },
42 { 170, 3020 },
43 { 160, 3210 },
44 { 150, 3440 },
45 { 140, 3710 },
46 { 130, 4010 },
47 { 120, 4360 },
48 { 110, 4740 },
49 { 100, 5190 },
50 { 90, 5670 },
51 { 80, 6200 },
52 { 70, 6770 },
53 { 60, 7360 },
54 { 50, 7970 },
55 { 40, 8250 },
56 { 30, 9000 },
57 { 20, 9450 },
58 { 15, 9600 },
59};
60
61/* DVB-S AGCIQ_VALUE vs. signal level in dBm/10.
62 * As measured, connected to a modulator.
63 * -8.0 to -50.0 dBm directly connected,
64 * -52.0 to -74.8 with extra attenuation.
65 * Cut-off to AGCIQ_VALUE = 0x80 below -74.8dBm.
66 * Crude linear extrapolation below -84.8dBm and above -8.0dBm.
67 */
68static const struct stb0899_tab stb0899_dvbsrf_tab[] = {
69 { -950, -128 },
70 { -748, -94 },
71 { -745, -92 },
72 { -735, -90 },
73 { -720, -87 },
74 { -670, -77 },
75 { -640, -70 },
76 { -610, -62 },
77 { -600, -60 },
78 { -590, -56 },
79 { -560, -41 },
80 { -540, -25 },
81 { -530, -17 },
82 { -520, -11 },
83 { -500, 1 },
84 { -490, 6 },
85 { -480, 10 },
86 { -440, 22 },
87 { -420, 27 },
88 { -400, 31 },
89 { -380, 34 },
90 { -340, 40 },
91 { -320, 43 },
92 { -280, 48 },
93 { -250, 52 },
94 { -230, 55 },
95 { -180, 61 },
96 { -140, 66 },
97 { -90, 73 },
98 { -80, 74 },
99 { 500, 127 }
100};
101
102/* DVB-S2 IF_AGC_GAIN vs. signal level in dBm/10.
103 * As measured, connected to a modulator.
104 * -8.0 to -50.1 dBm directly connected,
105 * -53.0 to -76.6 with extra attenuation.
106 * Cut-off to IF_AGC_GAIN = 0x3fff below -76.6dBm.
107 * Crude linear extrapolation below -76.6dBm and above -8.0dBm.
108 */
109static const struct stb0899_tab stb0899_dvbs2rf_tab[] = {
110 { 700, 0 },
111 { -80, 3217 },
112 { -150, 3893 },
113 { -190, 4217 },
114 { -240, 4621 },
115 { -280, 4945 },
116 { -320, 5273 },
117 { -350, 5545 },
118 { -370, 5741 },
119 { -410, 6147 },
120 { -450, 6671 },
121 { -490, 7413 },
122 { -501, 7665 },
123 { -530, 8767 },
124 { -560, 10219 },
125 { -580, 10939 },
126 { -590, 11518 },
127 { -600, 11723 },
128 { -650, 12659 },
129 { -690, 13219 },
130 { -730, 13645 },
131 { -750, 13909 },
132 { -766, 14153 },
133 { -999, 16383 }
134};
135
136/* DVB-S2 Es/N0 quant in dB/100 vs read value * 100*/
137struct stb0899_tab stb0899_quant_tab[] = {
138 { 0, 0 },
139 { 0, 100 },
140 { 600, 200 },
141 { 950, 299 },
142 { 1200, 398 },
143 { 1400, 501 },
144 { 1560, 603 },
145 { 1690, 700 },
146 { 1810, 804 },
147 { 1910, 902 },
148 { 2000, 1000 },
149 { 2080, 1096 },
150 { 2160, 1202 },
151 { 2230, 1303 },
152 { 2350, 1496 },
153 { 2410, 1603 },
154 { 2460, 1698 },
155 { 2510, 1799 },
156 { 2600, 1995 },
157 { 2650, 2113 },
158 { 2690, 2213 },
159 { 2720, 2291 },
160 { 2760, 2399 },
161 { 2800, 2512 },
162 { 2860, 2692 },
163 { 2930, 2917 },
164 { 2960, 3020 },
165 { 3010, 3199 },
166 { 3040, 3311 },
167 { 3060, 3388 },
168 { 3120, 3631 },
169 { 3190, 3936 },
170 { 3400, 5012 },
171 { 3610, 6383 },
172 { 3800, 7943 },
173 { 4210, 12735 },
174 { 4500, 17783 },
175 { 4690, 22131 },
176 { 4810, 25410 }
177};
178
179/* DVB-S2 Es/N0 estimate in dB/100 vs read value */
180struct stb0899_tab stb0899_est_tab[] = {
181 { 0, 0 },
182 { 0, 1 },
183 { 301, 2 },
184 { 1204, 16 },
185 { 1806, 64 },
186 { 2408, 256 },
187 { 2709, 512 },
188 { 3010, 1023 },
189 { 3311, 2046 },
190 { 3612, 4093 },
191 { 3823, 6653 },
192 { 3913, 8185 },
193 { 4010, 10233 },
194 { 4107, 12794 },
195 { 4214, 16368 },
196 { 4266, 18450 },
197 { 4311, 20464 },
198 { 4353, 22542 },
199 { 4391, 24604 },
200 { 4425, 26607 },
201 { 4457, 28642 },
202 { 4487, 30690 },
203 { 4515, 32734 },
204 { 4612, 40926 },
205 { 4692, 49204 },
206 { 4816, 65464 },
207 { 4913, 81846 },
208 { 4993, 98401 },
209 { 5060, 114815 },
210 { 5118, 131220 },
211 { 5200, 158489 },
212 { 5300, 199526 },
213 { 5400, 251189 },
214 { 5500, 316228 },
215 { 5600, 398107 },
216 { 5720, 524807 },
217 { 5721, 526017 },
218};
219
220int _stb0899_read_reg(struct stb0899_state *state, unsigned int reg)
221{
222 int ret;
223
224 u8 b0[] = { reg >> 8, reg & 0xff };
225 u8 buf;
226
227 struct i2c_msg msg[] = {
228 {
229 .addr = state->config->demod_address,
230 .flags = 0,
231 .buf = b0,
232 .len = 2
233 },{
234 .addr = state->config->demod_address,
235 .flags = I2C_M_RD,
236 .buf = &buf,
237 .len = 1
238 }
239 };
240
241 ret = i2c_transfer(state->i2c, msg, 2);
242 if (ret != 2) {
243 if (ret != -ERESTARTSYS)
244 dprintk(state->verbose, FE_ERROR, 1,
245 "Read error, Reg=[0x%02x], Status=%d",
246 reg, ret);
247
248 return ret < 0 ? ret : -EREMOTEIO;
249 }
250 if (unlikely(*state->verbose >= FE_DEBUGREG))
251 dprintk(state->verbose, FE_ERROR, 1, "Reg=[0x%02x], data=%02x",
252 reg, buf);
253
254 return (unsigned int)buf;
255}
256
257int stb0899_read_reg(struct stb0899_state *state, unsigned int reg)
258{
259 int result;
260
261 result = _stb0899_read_reg(state, reg);
262 /*
263 * Bug ID 9:
264 * access to 0xf2xx/0xf6xx
265 * must be followed by read from 0xf2ff/0xf6ff.
266 */
267 if ((reg != 0xf2ff) && (reg != 0xf6ff) &&
268 (((reg & 0xff00) == 0xf200) || ((reg & 0xff00) == 0xf600)))
269 _stb0899_read_reg(state, (reg | 0x00ff));
270
271 return result;
272}
273
274u32 _stb0899_read_s2reg(struct stb0899_state *state,
275 u32 stb0899_i2cdev,
276 u32 stb0899_base_addr,
277 u16 stb0899_reg_offset)
278{
279 int status;
280 u32 data;
281 u8 buf[7] = { 0 };
282 u16 tmpaddr;
283
284 u8 buf_0[] = {
285 GETBYTE(stb0899_i2cdev, BYTE1), /* 0xf3 S2 Base Address (MSB) */
286 GETBYTE(stb0899_i2cdev, BYTE0), /* 0xfc S2 Base Address (LSB) */
287 GETBYTE(stb0899_base_addr, BYTE0), /* 0x00 Base Address (LSB) */
288 GETBYTE(stb0899_base_addr, BYTE1), /* 0x04 Base Address (LSB) */
289 GETBYTE(stb0899_base_addr, BYTE2), /* 0x00 Base Address (MSB) */
290 GETBYTE(stb0899_base_addr, BYTE3), /* 0x00 Base Address (MSB) */
291 };
292 u8 buf_1[] = {
293 0x00, /* 0xf3 Reg Offset */
294 0x00, /* 0x44 Reg Offset */
295 };
296
297 struct i2c_msg msg_0 = {
298 .addr = state->config->demod_address,
299 .flags = 0,
300 .buf = buf_0,
301 .len = 6
302 };
303
304 struct i2c_msg msg_1 = {
305 .addr = state->config->demod_address,
306 .flags = 0,
307 .buf = buf_1,
308 .len = 2
309 };
310
311 struct i2c_msg msg_r = {
312 .addr = state->config->demod_address,
313 .flags = I2C_M_RD,
314 .buf = buf,
315 .len = 4
316 };
317
318 tmpaddr = stb0899_reg_offset & 0xff00;
319 if (!(stb0899_reg_offset & 0x8))
320 tmpaddr = stb0899_reg_offset | 0x20;
321
322 buf_1[0] = GETBYTE(tmpaddr, BYTE1);
323 buf_1[1] = GETBYTE(tmpaddr, BYTE0);
324
325 status = i2c_transfer(state->i2c, &msg_0, 1);
326 if (status < 1) {
327 if (status != -ERESTARTSYS)
328 printk(KERN_ERR "%s ERR(1), Device=[0x%04x], Base address=[0x%08x], Offset=[0x%04x], Status=%d\n",
329 __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, status);
330
331 goto err;
332 }
333
334 /* Dummy */
335 status = i2c_transfer(state->i2c, &msg_1, 1);
336 if (status < 1)
337 goto err;
338
339 status = i2c_transfer(state->i2c, &msg_r, 1);
340 if (status < 1)
341 goto err;
342
343 buf_1[0] = GETBYTE(stb0899_reg_offset, BYTE1);
344 buf_1[1] = GETBYTE(stb0899_reg_offset, BYTE0);
345
346 /* Actual */
347 status = i2c_transfer(state->i2c, &msg_1, 1);
348 if (status < 1) {
349 if (status != -ERESTARTSYS)
350 printk(KERN_ERR "%s ERR(2), Device=[0x%04x], Base address=[0x%08x], Offset=[0x%04x], Status=%d\n",
351 __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, status);
352 goto err;
353 }
354
355 status = i2c_transfer(state->i2c, &msg_r, 1);
356 if (status < 1) {
357 if (status != -ERESTARTSYS)
358 printk(KERN_ERR "%s ERR(3), Device=[0x%04x], Base address=[0x%08x], Offset=[0x%04x], Status=%d\n",
359 __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, status);
360 return status < 0 ? status : -EREMOTEIO;
361 }
362
363 data = MAKEWORD32(buf[3], buf[2], buf[1], buf[0]);
364 if (unlikely(*state->verbose >= FE_DEBUGREG))
365 printk(KERN_DEBUG "%s Device=[0x%04x], Base address=[0x%08x], Offset=[0x%04x], Data=[0x%08x]\n",
366 __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, data);
367
368 return data;
369
370err:
371 return status < 0 ? status : -EREMOTEIO;
372}
373
374int stb0899_write_s2reg(struct stb0899_state *state,
375 u32 stb0899_i2cdev,
376 u32 stb0899_base_addr,
377 u16 stb0899_reg_offset,
378 u32 stb0899_data)
379{
380 int status;
381
382 /* Base Address Setup */
383 u8 buf_0[] = {
384 GETBYTE(stb0899_i2cdev, BYTE1), /* 0xf3 S2 Base Address (MSB) */
385 GETBYTE(stb0899_i2cdev, BYTE0), /* 0xfc S2 Base Address (LSB) */
386 GETBYTE(stb0899_base_addr, BYTE0), /* 0x00 Base Address (LSB) */
387 GETBYTE(stb0899_base_addr, BYTE1), /* 0x04 Base Address (LSB) */
388 GETBYTE(stb0899_base_addr, BYTE2), /* 0x00 Base Address (MSB) */
389 GETBYTE(stb0899_base_addr, BYTE3), /* 0x00 Base Address (MSB) */
390 };
391 u8 buf_1[] = {
392 0x00, /* 0xf3 Reg Offset */
393 0x00, /* 0x44 Reg Offset */
394 0x00, /* data */
395 0x00, /* data */
396 0x00, /* data */
397 0x00, /* data */
398 };
399
400 struct i2c_msg msg_0 = {
401 .addr = state->config->demod_address,
402 .flags = 0,
403 .buf = buf_0,
404 .len = 6
405 };
406
407 struct i2c_msg msg_1 = {
408 .addr = state->config->demod_address,
409 .flags = 0,
410 .buf = buf_1,
411 .len = 6
412 };
413
414 buf_1[0] = GETBYTE(stb0899_reg_offset, BYTE1);
415 buf_1[1] = GETBYTE(stb0899_reg_offset, BYTE0);
416 buf_1[2] = GETBYTE(stb0899_data, BYTE0);
417 buf_1[3] = GETBYTE(stb0899_data, BYTE1);
418 buf_1[4] = GETBYTE(stb0899_data, BYTE2);
419 buf_1[5] = GETBYTE(stb0899_data, BYTE3);
420
421 if (unlikely(*state->verbose >= FE_DEBUGREG))
422 printk(KERN_DEBUG "%s Device=[0x%04x], Base Address=[0x%08x], Offset=[0x%04x], Data=[0x%08x]\n",
423 __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, stb0899_data);
424
425 status = i2c_transfer(state->i2c, &msg_0, 1);
426 if (unlikely(status < 1)) {
427 if (status != -ERESTARTSYS)
428 printk(KERN_ERR "%s ERR (1), Device=[0x%04x], Base Address=[0x%08x], Offset=[0x%04x], Data=[0x%08x], status=%d\n",
429 __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, stb0899_data, status);
430 goto err;
431 }
432 status = i2c_transfer(state->i2c, &msg_1, 1);
433 if (unlikely(status < 1)) {
434 if (status != -ERESTARTSYS)
435 printk(KERN_ERR "%s ERR (2), Device=[0x%04x], Base Address=[0x%08x], Offset=[0x%04x], Data=[0x%08x], status=%d\n",
436 __func__, stb0899_i2cdev, stb0899_base_addr, stb0899_reg_offset, stb0899_data, status);
437
438 return status < 0 ? status : -EREMOTEIO;
439 }
440
441 return 0;
442
443err:
444 return status < 0 ? status : -EREMOTEIO;
445}
446
447int stb0899_read_regs(struct stb0899_state *state, unsigned int reg, u8 *buf, u32 count)
448{
449 int status;
450
451 u8 b0[] = { reg >> 8, reg & 0xff };
452
453 struct i2c_msg msg[] = {
454 {
455 .addr = state->config->demod_address,
456 .flags = 0,
457 .buf = b0,
458 .len = 2
459 },{
460 .addr = state->config->demod_address,
461 .flags = I2C_M_RD,
462 .buf = buf,
463 .len = count
464 }
465 };
466
467 status = i2c_transfer(state->i2c, msg, 2);
468 if (status != 2) {
469 if (status != -ERESTARTSYS)
470 printk(KERN_ERR "%s Read error, Reg=[0x%04x], Count=%u, Status=%d\n",
471 __func__, reg, count, status);
472 goto err;
473 }
474 /*
475 * Bug ID 9:
476 * access to 0xf2xx/0xf6xx
477 * must be followed by read from 0xf2ff/0xf6ff.
478 */
479 if ((reg != 0xf2ff) && (reg != 0xf6ff) &&
480 (((reg & 0xff00) == 0xf200) || ((reg & 0xff00) == 0xf600)))
481 _stb0899_read_reg(state, (reg | 0x00ff));
482
483 if (unlikely(*state->verbose >= FE_DEBUGREG)) {
484 int i;
485
486 printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
487 for (i = 0; i < count; i++) {
488 printk(" %02x", buf[i]);
489 }
490 printk("\n");
491 }
492
493 return 0;
494err:
495 return status < 0 ? status : -EREMOTEIO;
496}
497
498int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, u32 count)
499{
500 int ret;
501 u8 buf[2 + count];
502 struct i2c_msg i2c_msg = {
503 .addr = state->config->demod_address,
504 .flags = 0,
505 .buf = buf,
506 .len = 2 + count
507 };
508
509 buf[0] = reg >> 8;
510 buf[1] = reg & 0xff;
511 memcpy(&buf[2], data, count);
512
513 if (unlikely(*state->verbose >= FE_DEBUGREG)) {
514 int i;
515
516 printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
517 for (i = 0; i < count; i++)
518 printk(" %02x", data[i]);
519 printk("\n");
520 }
521 ret = i2c_transfer(state->i2c, &i2c_msg, 1);
522
523 /*
524 * Bug ID 9:
525 * access to 0xf2xx/0xf6xx
526 * must be followed by read from 0xf2ff/0xf6ff.
527 */
528 if ((((reg & 0xff00) == 0xf200) || ((reg & 0xff00) == 0xf600)))
529 stb0899_read_reg(state, (reg | 0x00ff));
530
531 if (ret != 1) {
532 if (ret != -ERESTARTSYS)
533 dprintk(state->verbose, FE_ERROR, 1, "Reg=[0x%04x], Data=[0x%02x ...], Count=%u, Status=%d",
534 reg, data[0], count, ret);
535 return ret < 0 ? ret : -EREMOTEIO;
536 }
537
538 return 0;
539}
540
541int stb0899_write_reg(struct stb0899_state *state, unsigned int reg, u8 data)
542{
543 return stb0899_write_regs(state, reg, &data, 1);
544}
545
546/*
547 * stb0899_get_mclk
548 * Get STB0899 master clock frequency
549 * ExtClk: external clock frequency (Hz)
550 */
551static u32 stb0899_get_mclk(struct stb0899_state *state)
552{
553 u32 mclk = 0, div = 0;
554
555 div = stb0899_read_reg(state, STB0899_NCOARSE);
556 mclk = (div + 1) * state->config->xtal_freq / 6;
557 dprintk(state->verbose, FE_DEBUG, 1, "div=%d, mclk=%d", div, mclk);
558
559 return mclk;
560}
561
562/*
563 * stb0899_set_mclk
564 * Set STB0899 master Clock frequency
565 * Mclk: demodulator master clock
566 * ExtClk: external clock frequency (Hz)
567 */
568static void stb0899_set_mclk(struct stb0899_state *state, u32 Mclk)
569{
570 struct stb0899_internal *internal = &state->internal;
571 u8 mdiv = 0;
572
573 dprintk(state->verbose, FE_DEBUG, 1, "state->config=%p", state->config);
574 mdiv = ((6 * Mclk) / state->config->xtal_freq) - 1;
575 dprintk(state->verbose, FE_DEBUG, 1, "mdiv=%d", mdiv);
576
577 stb0899_write_reg(state, STB0899_NCOARSE, mdiv);
578 internal->master_clk = stb0899_get_mclk(state);
579
580 dprintk(state->verbose, FE_DEBUG, 1, "MasterCLOCK=%d", internal->master_clk);
581}
582
583static int stb0899_postproc(struct stb0899_state *state, u8 ctl, int enable)
584{
585 struct stb0899_config *config = state->config;
586 const struct stb0899_postproc *postproc = config->postproc;
587
588 /* post process event */
589 if (postproc) {
590 if (enable) {
591 if (postproc[ctl].level == STB0899_GPIOPULLUP)
592 stb0899_write_reg(state, postproc[ctl].gpio, 0x02);
593 else
594 stb0899_write_reg(state, postproc[ctl].gpio, 0x82);
595 } else {
596 if (postproc[ctl].level == STB0899_GPIOPULLUP)
597 stb0899_write_reg(state, postproc[ctl].gpio, 0x82);
598 else
599 stb0899_write_reg(state, postproc[ctl].gpio, 0x02);
600 }
601 }
602 return 0;
603}
604
605static void stb0899_release(struct dvb_frontend *fe)
606{
607 struct stb0899_state *state = fe->demodulator_priv;
608
609 dprintk(state->verbose, FE_DEBUG, 1, "Release Frontend");
610 /* post process event */
611 stb0899_postproc(state, STB0899_POSTPROC_GPIO_POWER, 0);
612 kfree(state);
613}
614
615/*
616 * stb0899_get_alpha
617 * return: rolloff
618 */
619static int stb0899_get_alpha(struct stb0899_state *state)
620{
621 u8 mode_coeff;
622
623 mode_coeff = stb0899_read_reg(state, STB0899_DEMOD);
624
625 if (STB0899_GETFIELD(MODECOEFF, mode_coeff) == 1)
626 return 20;
627 else
628 return 35;
629}
630
631/*
632 * stb0899_init_calc
633 */
634static void stb0899_init_calc(struct stb0899_state *state)
635{
636 struct stb0899_internal *internal = &state->internal;
637 int master_clk;
638 u8 agc[2];
639 u8 agc1cn;
640 u32 reg;
641
642 /* Read registers (in burst mode) */
643 agc1cn = stb0899_read_reg(state, STB0899_AGC1CN);
644 stb0899_read_regs(state, STB0899_AGC1REF, agc, 2); /* AGC1R and AGC2O */
645
646 /* Initial calculations */
647 master_clk = stb0899_get_mclk(state);
648 internal->t_agc1 = 0;
649 internal->t_agc2 = 0;
650 internal->master_clk = master_clk;
651 internal->mclk = master_clk / 65536L;
652 internal->rolloff = stb0899_get_alpha(state);
653
654 /* DVBS2 Initial calculations */
655 /* Set AGC value to the middle */
656 internal->agc_gain = 8154;
657 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL);
658 STB0899_SETFIELD_VAL(IF_GAIN_INIT, reg, internal->agc_gain);
659 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL, STB0899_OFF0_IF_AGC_CNTRL, reg);
660
661 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, RRC_ALPHA);
662 internal->rrc_alpha = STB0899_GETFIELD(RRC_ALPHA, reg);
663
664 internal->center_freq = 0;
665 internal->av_frame_coarse = 10;
666 internal->av_frame_fine = 20;
667 internal->step_size = 2;
668/*
669 if ((pParams->SpectralInv == FE_IQ_NORMAL) || (pParams->SpectralInv == FE_IQ_AUTO))
670 pParams->IQLocked = 0;
671 else
672 pParams->IQLocked = 1;
673*/
674}
675
676static int stb0899_wait_diseqc_fifo_empty(struct stb0899_state *state, int timeout)
677{
678 u8 reg = 0;
679 unsigned long start = jiffies;
680
681 while (1) {
682 reg = stb0899_read_reg(state, STB0899_DISSTATUS);
683 if (!STB0899_GETFIELD(FIFOFULL, reg))
684 break;
685 if ((jiffies - start) > timeout) {
686 dprintk(state->verbose, FE_ERROR, 1, "timed out !!");
687 return -ETIMEDOUT;
688 }
689 }
690
691 return 0;
692}
693
694static int stb0899_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd)
695{
696 struct stb0899_state *state = fe->demodulator_priv;
697 u8 reg, i;
698
699 if (cmd->msg_len > 8)
700 return -EINVAL;
701
702 /* enable FIFO precharge */
703 reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
704 STB0899_SETFIELD_VAL(DISPRECHARGE, reg, 1);
705 stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
706 for (i = 0; i < cmd->msg_len; i++) {
707 /* wait for FIFO empty */
708 if (stb0899_wait_diseqc_fifo_empty(state, 10) < 0)
709 return -ETIMEDOUT;
710
711 stb0899_write_reg(state, STB0899_DISFIFO, cmd->msg[i]);
712 }
713 reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
714 STB0899_SETFIELD_VAL(DISPRECHARGE, reg, 0);
715 stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
716
717 return 0;
718}
719
720static int stb0899_wait_diseqc_rxidle(struct stb0899_state *state, int timeout)
721{
722 u8 reg = 0;
723 unsigned long start = jiffies;
724
725 while (!STB0899_GETFIELD(RXEND, reg)) {
726 reg = stb0899_read_reg(state, STB0899_DISRX_ST0);
727 if (jiffies - start > timeout) {
728 dprintk(state->verbose, FE_ERROR, 1, "timed out!!");
729 return -ETIMEDOUT;
730 }
731 msleep(10);
732 }
733
734 return 0;
735}
736
737static int stb0899_recv_slave_reply(struct dvb_frontend *fe, struct dvb_diseqc_slave_reply *reply)
738{
739 struct stb0899_state *state = fe->demodulator_priv;
740 u8 reg, length = 0, i;
741 int result;
742
743 if (stb0899_wait_diseqc_rxidle(state, 100) < 0)
744 return -ETIMEDOUT;
745
746 reg = stb0899_read_reg(state, STB0899_DISRX_ST0);
747 if (STB0899_GETFIELD(RXEND, reg)) {
748
749 reg = stb0899_read_reg(state, STB0899_DISRX_ST1);
750 length = STB0899_GETFIELD(FIFOBYTENBR, reg);
751
752 if (length > sizeof (reply->msg)) {
753 result = -EOVERFLOW;
754 goto exit;
755 }
756 reply->msg_len = length;
757
758 /* extract data */
759 for (i = 0; i < length; i++)
760 reply->msg[i] = stb0899_read_reg(state, STB0899_DISFIFO);
761 }
762
763 return 0;
764exit:
765
766 return result;
767}
768
769static int stb0899_wait_diseqc_txidle(struct stb0899_state *state, int timeout)
770{
771 u8 reg = 0;
772 unsigned long start = jiffies;
773
774 while (!STB0899_GETFIELD(TXIDLE, reg)) {
775 reg = stb0899_read_reg(state, STB0899_DISSTATUS);
776 if (jiffies - start > timeout) {
777 dprintk(state->verbose, FE_ERROR, 1, "timed out!!");
778 return -ETIMEDOUT;
779 }
780 msleep(10);
781 }
782 return 0;
783}
784
785static int stb0899_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst)
786{
787 struct stb0899_state *state = fe->demodulator_priv;
788 u8 reg, old_state;
789
790 /* wait for diseqc idle */
791 if (stb0899_wait_diseqc_txidle(state, 100) < 0)
792 return -ETIMEDOUT;
793
794 reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
795 old_state = reg;
796 /* set to burst mode */
797 STB0899_SETFIELD_VAL(DISEQCMODE, reg, 0x02);
798 STB0899_SETFIELD_VAL(DISPRECHARGE, reg, 0x01);
799 stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
800 switch (burst) {
801 case SEC_MINI_A:
802 /* unmodulated */
803 stb0899_write_reg(state, STB0899_DISFIFO, 0x00);
804 break;
805 case SEC_MINI_B:
806 /* modulated */
807 stb0899_write_reg(state, STB0899_DISFIFO, 0xff);
808 break;
809 }
810 reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
811 STB0899_SETFIELD_VAL(DISPRECHARGE, reg, 0x00);
812 stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
813 /* wait for diseqc idle */
814 if (stb0899_wait_diseqc_txidle(state, 100) < 0)
815 return -ETIMEDOUT;
816
817 /* restore state */
818 stb0899_write_reg(state, STB0899_DISCNTRL1, old_state);
819
820 return 0;
821}
822
823static int stb0899_diseqc_init(struct stb0899_state *state)
824{
825 struct dvb_diseqc_master_cmd tx_data;
826/*
827 struct dvb_diseqc_slave_reply rx_data;
828*/
829 u8 f22_tx, f22_rx, reg;
830
831 u32 mclk, tx_freq = 22000;/* count = 0, i; */
832 tx_data.msg[0] = 0xe2;
833 tx_data.msg_len = 3;
834 reg = stb0899_read_reg(state, STB0899_DISCNTRL2);
835 STB0899_SETFIELD_VAL(ONECHIP_TRX, reg, 0);
836 stb0899_write_reg(state, STB0899_DISCNTRL2, reg);
837
838 /* disable Tx spy */
839 reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
840 STB0899_SETFIELD_VAL(DISEQCRESET, reg, 1);
841 stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
842
843 reg = stb0899_read_reg(state, STB0899_DISCNTRL1);
844 STB0899_SETFIELD_VAL(DISEQCRESET, reg, 0);
845 stb0899_write_reg(state, STB0899_DISCNTRL1, reg);
846
847 mclk = stb0899_get_mclk(state);
848 f22_tx = mclk / (tx_freq * 32);
849 stb0899_write_reg(state, STB0899_DISF22, f22_tx); /* DiSEqC Tx freq */
850 state->rx_freq = 20000;
851 f22_rx = mclk / (state->rx_freq * 32);
852
853 return 0;
854}
855
856static int stb0899_sleep(struct dvb_frontend *fe)
857{
858 struct stb0899_state *state = fe->demodulator_priv;
859/*
860 u8 reg;
861*/
862 dprintk(state->verbose, FE_DEBUG, 1, "Going to Sleep .. (Really tired .. :-))");
863 /* post process event */
864 stb0899_postproc(state, STB0899_POSTPROC_GPIO_POWER, 0);
865
866 return 0;
867}
868
869static int stb0899_wakeup(struct dvb_frontend *fe)
870{
871 int rc;
872 struct stb0899_state *state = fe->demodulator_priv;
873
874 if ((rc = stb0899_write_reg(state, STB0899_SYNTCTRL, STB0899_SELOSCI)))
875 return rc;
876 /* Activate all clocks; DVB-S2 registers are inaccessible otherwise. */
877 if ((rc = stb0899_write_reg(state, STB0899_STOPCLK1, 0x00)))
878 return rc;
879 if ((rc = stb0899_write_reg(state, STB0899_STOPCLK2, 0x00)))
880 return rc;
881
882 /* post process event */
883 stb0899_postproc(state, STB0899_POSTPROC_GPIO_POWER, 1);
884
885 return 0;
886}
887
888static int stb0899_init(struct dvb_frontend *fe)
889{
890 int i;
891 struct stb0899_state *state = fe->demodulator_priv;
892 struct stb0899_config *config = state->config;
893
894 dprintk(state->verbose, FE_DEBUG, 1, "Initializing STB0899 ... ");
895
896 /* init device */
897 dprintk(state->verbose, FE_DEBUG, 1, "init device");
898 for (i = 0; config->init_dev[i].address != 0xffff; i++)
899 stb0899_write_reg(state, config->init_dev[i].address, config->init_dev[i].data);
900
901 dprintk(state->verbose, FE_DEBUG, 1, "init S2 demod");
902 /* init S2 demod */
903 for (i = 0; config->init_s2_demod[i].offset != 0xffff; i++)
904 stb0899_write_s2reg(state, STB0899_S2DEMOD,
905 config->init_s2_demod[i].base_address,
906 config->init_s2_demod[i].offset,
907 config->init_s2_demod[i].data);
908
909 dprintk(state->verbose, FE_DEBUG, 1, "init S1 demod");
910 /* init S1 demod */
911 for (i = 0; config->init_s1_demod[i].address != 0xffff; i++)
912 stb0899_write_reg(state, config->init_s1_demod[i].address, config->init_s1_demod[i].data);
913
914 dprintk(state->verbose, FE_DEBUG, 1, "init S2 FEC");
915 /* init S2 fec */
916 for (i = 0; config->init_s2_fec[i].offset != 0xffff; i++)
917 stb0899_write_s2reg(state, STB0899_S2FEC,
918 config->init_s2_fec[i].base_address,
919 config->init_s2_fec[i].offset,
920 config->init_s2_fec[i].data);
921
922 dprintk(state->verbose, FE_DEBUG, 1, "init TST");
923 /* init test */
924 for (i = 0; config->init_tst[i].address != 0xffff; i++)
925 stb0899_write_reg(state, config->init_tst[i].address, config->init_tst[i].data);
926
927 stb0899_init_calc(state);
928 stb0899_diseqc_init(state);
929
930 return 0;
931}
932
933static int stb0899_table_lookup(const struct stb0899_tab *tab, int max, int val)
934{
935 int res = 0;
936 int min = 0, med;
937
938 if (val < tab[min].read)
939 res = tab[min].real;
940 else if (val >= tab[max].read)
941 res = tab[max].real;
942 else {
943 while ((max - min) > 1) {
944 med = (max + min) / 2;
945 if (val >= tab[min].read && val < tab[med].read)
946 max = med;
947 else
948 min = med;
949 }
950 res = ((val - tab[min].read) *
951 (tab[max].real - tab[min].real) /
952 (tab[max].read - tab[min].read)) +
953 tab[min].real;
954 }
955
956 return res;
957}
958
959static int stb0899_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
960{
961 struct stb0899_state *state = fe->demodulator_priv;
962 struct stb0899_internal *internal = &state->internal;
963
964 int val;
965 u32 reg;
966 switch (state->delsys) {
967 case SYS_DVBS:
968 case SYS_DSS:
969 if (internal->lock) {
970 reg = stb0899_read_reg(state, STB0899_VSTATUS);
971 if (STB0899_GETFIELD(VSTATUS_LOCKEDVIT, reg)) {
972
973 reg = stb0899_read_reg(state, STB0899_AGCIQIN);
974 val = (s32)(s8)STB0899_GETFIELD(AGCIQVALUE, reg);
975
976 *strength = stb0899_table_lookup(stb0899_dvbsrf_tab, ARRAY_SIZE(stb0899_dvbsrf_tab) - 1, val);
977 *strength += 750;
978 dprintk(state->verbose, FE_DEBUG, 1, "AGCIQVALUE = 0x%02x, C = %d * 0.1 dBm",
979 val & 0xff, *strength);
980 }
981 }
982 break;
983 case SYS_DVBS2:
984 if (internal->lock) {
985 reg = STB0899_READ_S2REG(STB0899_DEMOD, IF_AGC_GAIN);
986 val = STB0899_GETFIELD(IF_AGC_GAIN, reg);
987
988 *strength = stb0899_table_lookup(stb0899_dvbs2rf_tab, ARRAY_SIZE(stb0899_dvbs2rf_tab) - 1, val);
989 *strength += 750;
990 dprintk(state->verbose, FE_DEBUG, 1, "IF_AGC_GAIN = 0x%04x, C = %d * 0.1 dBm",
991 val & 0x3fff, *strength);
992 }
993 break;
994 default:
995 dprintk(state->verbose, FE_DEBUG, 1, "Unsupported delivery system");
996 return -EINVAL;
997 }
998
999 return 0;
1000}
1001
1002static int stb0899_read_snr(struct dvb_frontend *fe, u16 *snr)
1003{
1004 struct stb0899_state *state = fe->demodulator_priv;
1005 struct stb0899_internal *internal = &state->internal;
1006
1007 unsigned int val, quant, quantn = -1, est, estn = -1;
1008 u8 buf[2];
1009 u32 reg;
1010
1011 reg = stb0899_read_reg(state, STB0899_VSTATUS);
1012 switch (state->delsys) {
1013 case SYS_DVBS:
1014 case SYS_DSS:
1015 if (internal->lock) {
1016 if (STB0899_GETFIELD(VSTATUS_LOCKEDVIT, reg)) {
1017
1018 stb0899_read_regs(state, STB0899_NIRM, buf, 2);
1019 val = MAKEWORD16(buf[0], buf[1]);
1020
1021 *snr = stb0899_table_lookup(stb0899_cn_tab, ARRAY_SIZE(stb0899_cn_tab) - 1, val);
1022 dprintk(state->verbose, FE_DEBUG, 1, "NIR = 0x%02x%02x = %u, C/N = %d * 0.1 dBm\n",
1023 buf[0], buf[1], val, *snr);
1024 }
1025 }
1026 break;
1027 case SYS_DVBS2:
1028 if (internal->lock) {
1029 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_CNTRL1);
1030 quant = STB0899_GETFIELD(UWP_ESN0_QUANT, reg);
1031 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_STAT2);
1032 est = STB0899_GETFIELD(ESN0_EST, reg);
1033 if (est == 1)
1034 val = 301; /* C/N = 30.1 dB */
1035 else if (est == 2)
1036 val = 270; /* C/N = 27.0 dB */
1037 else {
1038 /* quantn = 100 * log(quant^2) */
1039 quantn = stb0899_table_lookup(stb0899_quant_tab, ARRAY_SIZE(stb0899_quant_tab) - 1, quant * 100);
1040 /* estn = 100 * log(est) */
1041 estn = stb0899_table_lookup(stb0899_est_tab, ARRAY_SIZE(stb0899_est_tab) - 1, est);
1042 /* snr(dBm/10) = -10*(log(est)-log(quant^2)) => snr(dBm/10) = (100*log(quant^2)-100*log(est))/10 */
1043 val = (quantn - estn) / 10;
1044 }
1045 *snr = val;
1046 dprintk(state->verbose, FE_DEBUG, 1, "Es/N0 quant = %d (%d) estimate = %u (%d), C/N = %d * 0.1 dBm",
1047 quant, quantn, est, estn, val);
1048 }
1049 break;
1050 default:
1051 dprintk(state->verbose, FE_DEBUG, 1, "Unsupported delivery system");
1052 return -EINVAL;
1053 }
1054
1055 return 0;
1056}
1057
1058static int stb0899_read_status(struct dvb_frontend *fe, enum fe_status *status)
1059{
1060 struct stb0899_state *state = fe->demodulator_priv;
1061 struct stb0899_internal *internal = &state->internal;
1062 u8 reg;
1063 *status = 0;
1064
1065 switch (state->delsys) {
1066 case SYS_DVBS:
1067 case SYS_DSS:
1068 dprintk(state->verbose, FE_DEBUG, 1, "Delivery system DVB-S/DSS");
1069 if (internal->lock) {
1070 reg = stb0899_read_reg(state, STB0899_VSTATUS);
1071 if (STB0899_GETFIELD(VSTATUS_LOCKEDVIT, reg)) {
1072 dprintk(state->verbose, FE_DEBUG, 1, "--------> FE_HAS_CARRIER | FE_HAS_LOCK");
1073 *status |= FE_HAS_CARRIER | FE_HAS_LOCK;
1074
1075 reg = stb0899_read_reg(state, STB0899_PLPARM);
1076 if (STB0899_GETFIELD(VITCURPUN, reg)) {
1077 dprintk(state->verbose, FE_DEBUG, 1, "--------> FE_HAS_VITERBI | FE_HAS_SYNC");
1078 *status |= FE_HAS_VITERBI | FE_HAS_SYNC;
1079 /* post process event */
1080 stb0899_postproc(state, STB0899_POSTPROC_GPIO_LOCK, 1);
1081 }
1082 }
1083 }
1084 break;
1085 case SYS_DVBS2:
1086 dprintk(state->verbose, FE_DEBUG, 1, "Delivery system DVB-S2");
1087 if (internal->lock) {
1088 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_STAT2);
1089 if (STB0899_GETFIELD(UWP_LOCK, reg) && STB0899_GETFIELD(CSM_LOCK, reg)) {
1090 *status |= FE_HAS_CARRIER;
1091 dprintk(state->verbose, FE_DEBUG, 1,
1092 "UWP & CSM Lock ! ---> DVB-S2 FE_HAS_CARRIER");
1093
1094 reg = stb0899_read_reg(state, STB0899_CFGPDELSTATUS1);
1095 if (STB0899_GETFIELD(CFGPDELSTATUS_LOCK, reg)) {
1096 *status |= FE_HAS_LOCK;
1097 dprintk(state->verbose, FE_DEBUG, 1,
1098 "Packet Delineator Locked ! -----> DVB-S2 FE_HAS_LOCK");
1099
1100 }
1101 if (STB0899_GETFIELD(CONTINUOUS_STREAM, reg)) {
1102 *status |= FE_HAS_VITERBI;
1103 dprintk(state->verbose, FE_DEBUG, 1,
1104 "Packet Delineator found VITERBI ! -----> DVB-S2 FE_HAS_VITERBI");
1105 }
1106 if (STB0899_GETFIELD(ACCEPTED_STREAM, reg)) {
1107 *status |= FE_HAS_SYNC;
1108 dprintk(state->verbose, FE_DEBUG, 1,
1109 "Packet Delineator found SYNC ! -----> DVB-S2 FE_HAS_SYNC");
1110 /* post process event */
1111 stb0899_postproc(state, STB0899_POSTPROC_GPIO_LOCK, 1);
1112 }
1113 }
1114 }
1115 break;
1116 default:
1117 dprintk(state->verbose, FE_DEBUG, 1, "Unsupported delivery system");
1118 return -EINVAL;
1119 }
1120 return 0;
1121}
1122
1123/*
1124 * stb0899_get_error
1125 * viterbi error for DVB-S/DSS
1126 * packet error for DVB-S2
1127 * Bit Error Rate or Packet Error Rate * 10 ^ 7
1128 */
1129static int stb0899_read_ber(struct dvb_frontend *fe, u32 *ber)
1130{
1131 struct stb0899_state *state = fe->demodulator_priv;
1132 struct stb0899_internal *internal = &state->internal;
1133
1134 u8 lsb, msb;
1135 u32 i;
1136
1137 *ber = 0;
1138
1139 switch (state->delsys) {
1140 case SYS_DVBS:
1141 case SYS_DSS:
1142 if (internal->lock) {
1143 /* average 5 BER values */
1144 for (i = 0; i < 5; i++) {
1145 msleep(100);
1146 lsb = stb0899_read_reg(state, STB0899_ECNT1L);
1147 msb = stb0899_read_reg(state, STB0899_ECNT1M);
1148 *ber += MAKEWORD16(msb, lsb);
1149 }
1150 *ber /= 5;
1151 /* Viterbi Check */
1152 if (STB0899_GETFIELD(VSTATUS_PRFVIT, internal->v_status)) {
1153 /* Error Rate */
1154 *ber *= 9766;
1155 /* ber = ber * 10 ^ 7 */
1156 *ber /= (-1 + (1 << (2 * STB0899_GETFIELD(NOE, internal->err_ctrl))));
1157 *ber /= 8;
1158 }
1159 }
1160 break;
1161 case SYS_DVBS2:
1162 if (internal->lock) {
1163 /* Average 5 PER values */
1164 for (i = 0; i < 5; i++) {
1165 msleep(100);
1166 lsb = stb0899_read_reg(state, STB0899_ECNT1L);
1167 msb = stb0899_read_reg(state, STB0899_ECNT1M);
1168 *ber += MAKEWORD16(msb, lsb);
1169 }
1170 /* ber = ber * 10 ^ 7 */
1171 *ber *= 10000000;
1172 *ber /= (-1 + (1 << (4 + 2 * STB0899_GETFIELD(NOE, internal->err_ctrl))));
1173 }
1174 break;
1175 default:
1176 dprintk(state->verbose, FE_DEBUG, 1, "Unsupported delivery system");
1177 return -EINVAL;
1178 }
1179
1180 return 0;
1181}
1182
1183static int stb0899_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
1184{
1185 struct stb0899_state *state = fe->demodulator_priv;
1186
1187 switch (voltage) {
1188 case SEC_VOLTAGE_13:
1189 stb0899_write_reg(state, STB0899_GPIO00CFG, 0x82);
1190 stb0899_write_reg(state, STB0899_GPIO01CFG, 0x02);
1191 stb0899_write_reg(state, STB0899_GPIO02CFG, 0x00);
1192 break;
1193 case SEC_VOLTAGE_18:
1194 stb0899_write_reg(state, STB0899_GPIO00CFG, 0x02);
1195 stb0899_write_reg(state, STB0899_GPIO01CFG, 0x02);
1196 stb0899_write_reg(state, STB0899_GPIO02CFG, 0x82);
1197 break;
1198 case SEC_VOLTAGE_OFF:
1199 stb0899_write_reg(state, STB0899_GPIO00CFG, 0x82);
1200 stb0899_write_reg(state, STB0899_GPIO01CFG, 0x82);
1201 stb0899_write_reg(state, STB0899_GPIO02CFG, 0x82);
1202 break;
1203 default:
1204 return -EINVAL;
1205 }
1206
1207 return 0;
1208}
1209
1210static int stb0899_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
1211{
1212 struct stb0899_state *state = fe->demodulator_priv;
1213 struct stb0899_internal *internal = &state->internal;
1214
1215 u8 div, reg;
1216
1217 /* wait for diseqc idle */
1218 if (stb0899_wait_diseqc_txidle(state, 100) < 0)
1219 return -ETIMEDOUT;
1220
1221 switch (tone) {
1222 case SEC_TONE_ON:
1223 div = (internal->master_clk / 100) / 5632;
1224 div = (div + 5) / 10;
1225 stb0899_write_reg(state, STB0899_DISEQCOCFG, 0x66);
1226 reg = stb0899_read_reg(state, STB0899_ACRPRESC);
1227 STB0899_SETFIELD_VAL(ACRPRESC, reg, 0x03);
1228 stb0899_write_reg(state, STB0899_ACRPRESC, reg);
1229 stb0899_write_reg(state, STB0899_ACRDIV1, div);
1230 break;
1231 case SEC_TONE_OFF:
1232 stb0899_write_reg(state, STB0899_DISEQCOCFG, 0x20);
1233 break;
1234 default:
1235 return -EINVAL;
1236 }
1237 return 0;
1238}
1239
1240int stb0899_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
1241{
1242 int i2c_stat;
1243 struct stb0899_state *state = fe->demodulator_priv;
1244
1245 i2c_stat = stb0899_read_reg(state, STB0899_I2CRPT);
1246 if (i2c_stat < 0)
1247 goto err;
1248
1249 if (enable) {
1250 dprintk(state->verbose, FE_DEBUG, 1, "Enabling I2C Repeater ...");
1251 i2c_stat |= STB0899_I2CTON;
1252 if (stb0899_write_reg(state, STB0899_I2CRPT, i2c_stat) < 0)
1253 goto err;
1254 } else {
1255 dprintk(state->verbose, FE_DEBUG, 1, "Disabling I2C Repeater ...");
1256 i2c_stat &= ~STB0899_I2CTON;
1257 if (stb0899_write_reg(state, STB0899_I2CRPT, i2c_stat) < 0)
1258 goto err;
1259 }
1260 return 0;
1261err:
1262 dprintk(state->verbose, FE_ERROR, 1, "I2C Repeater control failed");
1263 return -EREMOTEIO;
1264}
1265
1266
1267static inline void CONVERT32(u32 x, char *str)
1268{
1269 *str++ = (x >> 24) & 0xff;
1270 *str++ = (x >> 16) & 0xff;
1271 *str++ = (x >> 8) & 0xff;
1272 *str++ = (x >> 0) & 0xff;
1273 *str = '\0';
1274}
1275
1276int stb0899_get_dev_id(struct stb0899_state *state)
1277{
1278 u8 chip_id, release;
1279 u16 id;
1280 u32 demod_ver = 0, fec_ver = 0;
1281 char demod_str[5] = { 0 };
1282 char fec_str[5] = { 0 };
1283
1284 id = stb0899_read_reg(state, STB0899_DEV_ID);
1285 dprintk(state->verbose, FE_DEBUG, 1, "ID reg=[0x%02x]", id);
1286 chip_id = STB0899_GETFIELD(CHIP_ID, id);
1287 release = STB0899_GETFIELD(CHIP_REL, id);
1288
1289 dprintk(state->verbose, FE_ERROR, 1, "Device ID=[%d], Release=[%d]",
1290 chip_id, release);
1291
1292 CONVERT32(STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_CORE_ID), (char *)&demod_str);
1293
1294 demod_ver = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_VERSION_ID);
1295 dprintk(state->verbose, FE_ERROR, 1, "Demodulator Core ID=[%s], Version=[%d]", (char *) &demod_str, demod_ver);
1296 CONVERT32(STB0899_READ_S2REG(STB0899_S2FEC, FEC_CORE_ID_REG), (char *)&fec_str);
1297 fec_ver = STB0899_READ_S2REG(STB0899_S2FEC, FEC_VER_ID_REG);
1298 if (! (chip_id > 0)) {
1299 dprintk(state->verbose, FE_ERROR, 1, "couldn't find a STB 0899");
1300
1301 return -ENODEV;
1302 }
1303 dprintk(state->verbose, FE_ERROR, 1, "FEC Core ID=[%s], Version=[%d]", (char*) &fec_str, fec_ver);
1304
1305 return 0;
1306}
1307
1308static void stb0899_set_delivery(struct stb0899_state *state)
1309{
1310 u8 reg;
1311 u8 stop_clk[2];
1312
1313 stop_clk[0] = stb0899_read_reg(state, STB0899_STOPCLK1);
1314 stop_clk[1] = stb0899_read_reg(state, STB0899_STOPCLK2);
1315
1316 switch (state->delsys) {
1317 case SYS_DVBS:
1318 dprintk(state->verbose, FE_DEBUG, 1, "Delivery System -- DVB-S");
1319 /* FECM/Viterbi ON */
1320 reg = stb0899_read_reg(state, STB0899_FECM);
1321 STB0899_SETFIELD_VAL(FECM_RSVD0, reg, 0);
1322 STB0899_SETFIELD_VAL(FECM_VITERBI_ON, reg, 1);
1323 stb0899_write_reg(state, STB0899_FECM, reg);
1324
1325 stb0899_write_reg(state, STB0899_RSULC, 0xb1);
1326 stb0899_write_reg(state, STB0899_TSULC, 0x40);
1327 stb0899_write_reg(state, STB0899_RSLLC, 0x42);
1328 stb0899_write_reg(state, STB0899_TSLPL, 0x12);
1329
1330 reg = stb0899_read_reg(state, STB0899_TSTRES);
1331 STB0899_SETFIELD_VAL(FRESLDPC, reg, 1);
1332 stb0899_write_reg(state, STB0899_TSTRES, reg);
1333
1334 STB0899_SETFIELD_VAL(STOP_CHK8PSK, stop_clk[0], 1);
1335 STB0899_SETFIELD_VAL(STOP_CKFEC108, stop_clk[0], 1);
1336 STB0899_SETFIELD_VAL(STOP_CKFEC216, stop_clk[0], 1);
1337
1338 STB0899_SETFIELD_VAL(STOP_CKPKDLIN108, stop_clk[1], 1);
1339 STB0899_SETFIELD_VAL(STOP_CKPKDLIN216, stop_clk[1], 1);
1340
1341 STB0899_SETFIELD_VAL(STOP_CKINTBUF216, stop_clk[0], 1);
1342 STB0899_SETFIELD_VAL(STOP_CKCORE216, stop_clk[0], 0);
1343
1344 STB0899_SETFIELD_VAL(STOP_CKS2DMD108, stop_clk[1], 1);
1345 break;
1346 case SYS_DVBS2:
1347 /* FECM/Viterbi OFF */
1348 reg = stb0899_read_reg(state, STB0899_FECM);
1349 STB0899_SETFIELD_VAL(FECM_RSVD0, reg, 0);
1350 STB0899_SETFIELD_VAL(FECM_VITERBI_ON, reg, 0);
1351 stb0899_write_reg(state, STB0899_FECM, reg);
1352
1353 stb0899_write_reg(state, STB0899_RSULC, 0xb1);
1354 stb0899_write_reg(state, STB0899_TSULC, 0x42);
1355 stb0899_write_reg(state, STB0899_RSLLC, 0x40);
1356 stb0899_write_reg(state, STB0899_TSLPL, 0x02);
1357
1358 reg = stb0899_read_reg(state, STB0899_TSTRES);
1359 STB0899_SETFIELD_VAL(FRESLDPC, reg, 0);
1360 stb0899_write_reg(state, STB0899_TSTRES, reg);
1361
1362 STB0899_SETFIELD_VAL(STOP_CHK8PSK, stop_clk[0], 1);
1363 STB0899_SETFIELD_VAL(STOP_CKFEC108, stop_clk[0], 0);
1364 STB0899_SETFIELD_VAL(STOP_CKFEC216, stop_clk[0], 0);
1365
1366 STB0899_SETFIELD_VAL(STOP_CKPKDLIN108, stop_clk[1], 0);
1367 STB0899_SETFIELD_VAL(STOP_CKPKDLIN216, stop_clk[1], 0);
1368
1369 STB0899_SETFIELD_VAL(STOP_CKINTBUF216, stop_clk[0], 0);
1370 STB0899_SETFIELD_VAL(STOP_CKCORE216, stop_clk[0], 0);
1371
1372 STB0899_SETFIELD_VAL(STOP_CKS2DMD108, stop_clk[1], 0);
1373 break;
1374 case SYS_DSS:
1375 /* FECM/Viterbi ON */
1376 reg = stb0899_read_reg(state, STB0899_FECM);
1377 STB0899_SETFIELD_VAL(FECM_RSVD0, reg, 1);
1378 STB0899_SETFIELD_VAL(FECM_VITERBI_ON, reg, 1);
1379 stb0899_write_reg(state, STB0899_FECM, reg);
1380
1381 stb0899_write_reg(state, STB0899_RSULC, 0xa1);
1382 stb0899_write_reg(state, STB0899_TSULC, 0x61);
1383 stb0899_write_reg(state, STB0899_RSLLC, 0x42);
1384
1385 reg = stb0899_read_reg(state, STB0899_TSTRES);
1386 STB0899_SETFIELD_VAL(FRESLDPC, reg, 1);
1387 stb0899_write_reg(state, STB0899_TSTRES, reg);
1388
1389 STB0899_SETFIELD_VAL(STOP_CHK8PSK, stop_clk[0], 1);
1390 STB0899_SETFIELD_VAL(STOP_CKFEC108, stop_clk[0], 1);
1391 STB0899_SETFIELD_VAL(STOP_CKFEC216, stop_clk[0], 1);
1392
1393 STB0899_SETFIELD_VAL(STOP_CKPKDLIN108, stop_clk[1], 1);
1394 STB0899_SETFIELD_VAL(STOP_CKPKDLIN216, stop_clk[1], 1);
1395
1396 STB0899_SETFIELD_VAL(STOP_CKCORE216, stop_clk[0], 0);
1397
1398 STB0899_SETFIELD_VAL(STOP_CKS2DMD108, stop_clk[1], 1);
1399 break;
1400 default:
1401 dprintk(state->verbose, FE_ERROR, 1, "Unsupported delivery system");
1402 break;
1403 }
1404 STB0899_SETFIELD_VAL(STOP_CKADCI108, stop_clk[0], 0);
1405 stb0899_write_regs(state, STB0899_STOPCLK1, stop_clk, 2);
1406}
1407
1408/*
1409 * stb0899_set_iterations
1410 * set the LDPC iteration scale function
1411 */
1412static void stb0899_set_iterations(struct stb0899_state *state)
1413{
1414 struct stb0899_internal *internal = &state->internal;
1415 struct stb0899_config *config = state->config;
1416
1417 s32 iter_scale;
1418 u32 reg;
1419
1420 iter_scale = 17 * (internal->master_clk / 1000);
1421 iter_scale += 410000;
1422 iter_scale /= (internal->srate / 1000000);
1423 iter_scale /= 1000;
1424
1425 if (iter_scale > config->ldpc_max_iter)
1426 iter_scale = config->ldpc_max_iter;
1427
1428 reg = STB0899_READ_S2REG(STB0899_S2DEMOD, MAX_ITER);
1429 STB0899_SETFIELD_VAL(MAX_ITERATIONS, reg, iter_scale);
1430 stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_MAX_ITER, STB0899_OFF0_MAX_ITER, reg);
1431}
1432
1433static enum dvbfe_search stb0899_search(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
1434{
1435 struct stb0899_state *state = fe->demodulator_priv;
1436 struct stb0899_params *i_params = &state->params;
1437 struct stb0899_internal *internal = &state->internal;
1438 struct stb0899_config *config = state->config;
1439 struct dtv_frontend_properties *props = &fe->dtv_property_cache;
1440
1441 u32 SearchRange, gain;
1442
1443 i_params->freq = p->frequency;
1444 i_params->srate = p->u.qpsk.symbol_rate;
1445 state->delsys = props->delivery_system;
1446 dprintk(state->verbose, FE_DEBUG, 1, "delivery system=%d", state->delsys);
1447
1448 SearchRange = 10000000;
1449 dprintk(state->verbose, FE_DEBUG, 1, "Frequency=%d, Srate=%d", i_params->freq, i_params->srate);
1450 /* checking Search Range is meaningless for a fixed 3 Mhz */
1451 if (INRANGE(i_params->srate, 1000000, 45000000)) {
1452 dprintk(state->verbose, FE_DEBUG, 1, "Parameters IN RANGE");
1453 stb0899_set_delivery(state);
1454
1455 if (state->config->tuner_set_rfsiggain) {
1456 if (internal->srate > 15000000)
1457 gain = 8; /* 15Mb < srate < 45Mb, gain = 8dB */
1458 else if (internal->srate > 5000000)
1459 gain = 12; /* 5Mb < srate < 15Mb, gain = 12dB */
1460 else
1461 gain = 14; /* 1Mb < srate < 5Mb, gain = 14db */
1462 state->config->tuner_set_rfsiggain(fe, gain);
1463 }
1464
1465 if (i_params->srate <= 5000000)
1466 stb0899_set_mclk(state, config->lo_clk);
1467 else
1468 stb0899_set_mclk(state, config->hi_clk);
1469
1470 switch (state->delsys) {
1471 case SYS_DVBS:
1472 case SYS_DSS:
1473 dprintk(state->verbose, FE_DEBUG, 1, "DVB-S delivery system");
1474 internal->freq = i_params->freq;
1475 internal->srate = i_params->srate;
1476 /*
1477 * search = user search range +
1478 * 500Khz +
1479 * 2 * Tuner_step_size +
1480 * 10% of the symbol rate
1481 */
1482 internal->srch_range = SearchRange + 1500000 + (i_params->srate / 5);
1483 internal->derot_percent = 30;
1484
1485 /* What to do for tuners having no bandwidth setup ? */
1486 /* enable tuner I/O */
1487 stb0899_i2c_gate_ctrl(&state->frontend, 1);
1488
1489 if (state->config->tuner_set_bandwidth)
1490 state->config->tuner_set_bandwidth(fe, (13 * (stb0899_carr_width(state) + SearchRange)) / 10);
1491 if (state->config->tuner_get_bandwidth)
1492 state->config->tuner_get_bandwidth(fe, &internal->tuner_bw);
1493
1494 /* disable tuner I/O */
1495 stb0899_i2c_gate_ctrl(&state->frontend, 0);
1496
1497 /* Set DVB-S1 AGC */
1498 stb0899_write_reg(state, STB0899_AGCRFCFG, 0x11);
1499
1500 /* Run the search algorithm */
1501 dprintk(state->verbose, FE_DEBUG, 1, "running DVB-S search algo ..");
1502 if (stb0899_dvbs_algo(state) == RANGEOK) {
1503 internal->lock = 1;
1504 dprintk(state->verbose, FE_DEBUG, 1,
1505 "-------------------------------------> DVB-S LOCK !");
1506
1507// stb0899_write_reg(state, STB0899_ERRCTRL1, 0x3d); /* Viterbi Errors */
1508// internal->v_status = stb0899_read_reg(state, STB0899_VSTATUS);
1509// internal->err_ctrl = stb0899_read_reg(state, STB0899_ERRCTRL1);
1510// dprintk(state->verbose, FE_DEBUG, 1, "VSTATUS=0x%02x", internal->v_status);
1511// dprintk(state->verbose, FE_DEBUG, 1, "ERR_CTRL=0x%02x", internal->err_ctrl);
1512
1513 return DVBFE_ALGO_SEARCH_SUCCESS;
1514 } else {
1515 internal->lock = 0;
1516
1517 return DVBFE_ALGO_SEARCH_FAILED;
1518 }
1519 break;
1520 case SYS_DVBS2:
1521 internal->freq = i_params->freq;
1522 internal->srate = i_params->srate;
1523 internal->srch_range = SearchRange;
1524
1525 /* enable tuner I/O */
1526 stb0899_i2c_gate_ctrl(&state->frontend, 1);
1527
1528 if (state->config->tuner_set_bandwidth)
1529 state->config->tuner_set_bandwidth(fe, (stb0899_carr_width(state) + SearchRange));
1530 if (state->config->tuner_get_bandwidth)
1531 state->config->tuner_get_bandwidth(fe, &internal->tuner_bw);
1532
1533 /* disable tuner I/O */
1534 stb0899_i2c_gate_ctrl(&state->frontend, 0);
1535
1536// pParams->SpectralInv = pSearch->IQ_Inversion;
1537
1538 /* Set DVB-S2 AGC */
1539 stb0899_write_reg(state, STB0899_AGCRFCFG, 0x1c);
1540
1541 /* Set IterScale =f(MCLK,SYMB) */
1542 stb0899_set_iterations(state);
1543
1544 /* Run the search algorithm */
1545 dprintk(state->verbose, FE_DEBUG, 1, "running DVB-S2 search algo ..");
1546 if (stb0899_dvbs2_algo(state) == DVBS2_FEC_LOCK) {
1547 internal->lock = 1;
1548 dprintk(state->verbose, FE_DEBUG, 1,
1549 "-------------------------------------> DVB-S2 LOCK !");
1550
1551// stb0899_write_reg(state, STB0899_ERRCTRL1, 0xb6); /* Packet Errors */
1552// internal->v_status = stb0899_read_reg(state, STB0899_VSTATUS);
1553// internal->err_ctrl = stb0899_read_reg(state, STB0899_ERRCTRL1);
1554
1555 return DVBFE_ALGO_SEARCH_SUCCESS;
1556 } else {
1557 internal->lock = 0;
1558
1559 return DVBFE_ALGO_SEARCH_FAILED;
1560 }
1561 break;
1562 default:
1563 dprintk(state->verbose, FE_ERROR, 1, "Unsupported delivery system");
1564 return DVBFE_ALGO_SEARCH_INVALID;
1565 }
1566 }
1567
1568 return DVBFE_ALGO_SEARCH_ERROR;
1569}
1570/*
1571 * stb0899_track
1572 * periodically check the signal level against a specified
1573 * threshold level and perform derotator centering.
1574 * called once we have a lock from a succesful search
1575 * event.
1576 *
1577 * Will be called periodically called to maintain the
1578 * lock.
1579 *
1580 * Will be used to get parameters as well as info from
1581 * the decoded baseband header
1582 *
1583 * Once a new lock has established, the internal state
1584 * frequency (internal->freq) is updated
1585 */
1586static int stb0899_track(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
1587{
1588 return 0;
1589}
1590
1591static int stb0899_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
1592{
1593 struct stb0899_state *state = fe->demodulator_priv;
1594 struct stb0899_internal *internal = &state->internal;
1595
1596 dprintk(state->verbose, FE_DEBUG, 1, "Get params");
1597 p->u.qpsk.symbol_rate = internal->srate;
1598
1599 return 0;
1600}
1601
1602static enum dvbfe_algo stb0899_frontend_algo(struct dvb_frontend *fe)
1603{
1604 return DVBFE_ALGO_CUSTOM;
1605}
1606
1607static struct dvb_frontend_ops stb0899_ops = {
1608
1609 .info = {
1610 .name = "STB0899 Multistandard",
1611 .type = FE_QPSK,
1612 .frequency_min = 950000,
1613 .frequency_max = 2150000,
1614 .frequency_stepsize = 0,
1615 .frequency_tolerance = 0,
1616 .symbol_rate_min = 5000000,
1617 .symbol_rate_max = 45000000,
1618
1619 .caps = FE_CAN_INVERSION_AUTO |
1620 FE_CAN_FEC_AUTO |
1621 FE_CAN_QPSK
1622 },
1623
1624 .release = stb0899_release,
1625 .init = stb0899_init,
1626 .sleep = stb0899_sleep,
1627// .wakeup = stb0899_wakeup,
1628
1629 .i2c_gate_ctrl = stb0899_i2c_gate_ctrl,
1630
1631 .get_frontend_algo = stb0899_frontend_algo,
1632 .search = stb0899_search,
1633 .track = stb0899_track,
1634 .get_frontend = stb0899_get_frontend,
1635
1636
1637 .read_status = stb0899_read_status,
1638 .read_snr = stb0899_read_snr,
1639 .read_signal_strength = stb0899_read_signal_strength,
1640 .read_ber = stb0899_read_ber,
1641
1642 .set_voltage = stb0899_set_voltage,
1643 .set_tone = stb0899_set_tone,
1644
1645 .diseqc_send_master_cmd = stb0899_send_diseqc_msg,
1646 .diseqc_recv_slave_reply = stb0899_recv_slave_reply,
1647 .diseqc_send_burst = stb0899_send_diseqc_burst,
1648};
1649
1650struct dvb_frontend *stb0899_attach(struct stb0899_config *config, struct i2c_adapter *i2c)
1651{
1652 struct stb0899_state *state = NULL;
1653 enum stb0899_inversion inversion;
1654
1655 state = kzalloc(sizeof (struct stb0899_state), GFP_KERNEL);
1656 if (state == NULL)
1657 goto error;
1658
1659 inversion = config->inversion;
1660 state->verbose = &verbose;
1661 state->config = config;
1662 state->i2c = i2c;
1663 state->frontend.ops = stb0899_ops;
1664 state->frontend.demodulator_priv = state;
1665 state->internal.inversion = inversion;
1666
1667 stb0899_wakeup(&state->frontend);
1668 if (stb0899_get_dev_id(state) == -ENODEV) {
1669 printk("%s: Exiting .. !\n", __func__);
1670 goto error;
1671 }
1672
1673 printk("%s: Attaching STB0899 \n", __func__);
1674 return &state->frontend;
1675
1676error:
1677 kfree(state);
1678 return NULL;
1679}
1680EXPORT_SYMBOL(stb0899_attach);
1681MODULE_PARM_DESC(verbose, "Set Verbosity level");
1682MODULE_AUTHOR("Manu Abraham");
1683MODULE_DESCRIPTION("STB0899 Multi-Std frontend");
1684MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/stb0899_drv.h b/drivers/media/dvb/frontends/stb0899_drv.h
new file mode 100644
index 000000000000..98b200ce0c34
--- /dev/null
+++ b/drivers/media/dvb/frontends/stb0899_drv.h
@@ -0,0 +1,162 @@
1/*
2 STB0899 Multistandard Frontend driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#ifndef __STB0899_DRV_H
23#define __STB0899_DRV_H
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27
28#include "dvb_frontend.h"
29
30#define STB0899_TSMODE_SERIAL 1
31#define STB0899_CLKPOL_FALLING 2
32#define STB0899_CLKNULL_PARITY 3
33#define STB0899_SYNC_FORCED 4
34#define STB0899_FECMODE_DSS 5
35
36struct stb0899_s1_reg {
37 u16 address;
38 u8 data;
39};
40
41struct stb0899_s2_reg {
42 u16 offset;
43 u32 base_address;
44 u32 data;
45};
46
47enum stb0899_inversion {
48 IQ_SWAP_OFF = 0,
49 IQ_SWAP_ON,
50 IQ_SWAP_AUTO
51};
52
53#define STB0899_GPIO00 0xf140
54#define STB0899_GPIO01 0xf141
55#define STB0899_GPIO02 0xf142
56#define STB0899_GPIO03 0xf143
57#define STB0899_GPIO04 0xf144
58#define STB0899_GPIO05 0xf145
59#define STB0899_GPIO06 0xf146
60#define STB0899_GPIO07 0xf147
61#define STB0899_GPIO08 0xf148
62#define STB0899_GPIO09 0xf149
63#define STB0899_GPIO10 0xf14a
64#define STB0899_GPIO11 0xf14b
65#define STB0899_GPIO12 0xf14c
66#define STB0899_GPIO13 0xf14d
67#define STB0899_GPIO14 0xf14e
68#define STB0899_GPIO15 0xf14f
69#define STB0899_GPIO16 0xf150
70#define STB0899_GPIO17 0xf151
71#define STB0899_GPIO18 0xf152
72#define STB0899_GPIO19 0xf153
73#define STB0899_GPIO20 0xf154
74
75#define STB0899_GPIOPULLUP 0x01 /* Output device is connected to Vdd */
76#define STB0899_GPIOPULLDN 0x00 /* Output device is connected to Vss */
77
78#define STB0899_POSTPROC_GPIO_POWER 0x00
79#define STB0899_POSTPROC_GPIO_LOCK 0x01
80
81/*
82 * Post process output configuration control
83 * 1. POWER ON/OFF (index 0)
84 * 2. FE_HAS_LOCK/LOCK_LOSS (index 1)
85 *
86 * @gpio = one of the above listed GPIO's
87 * @level = output state: pulled up or low
88 */
89struct stb0899_postproc {
90 u16 gpio;
91 u8 level;
92};
93
94struct stb0899_config {
95 const struct stb0899_s1_reg *init_dev;
96 const struct stb0899_s2_reg *init_s2_demod;
97 const struct stb0899_s1_reg *init_s1_demod;
98 const struct stb0899_s2_reg *init_s2_fec;
99 const struct stb0899_s1_reg *init_tst;
100
101 const struct stb0899_postproc *postproc;
102
103 enum stb0899_inversion inversion;
104
105 u32 xtal_freq;
106
107 u8 demod_address;
108 u8 ts_output_mode;
109 u8 block_sync_mode;
110 u8 ts_pfbit_toggle;
111
112 u8 clock_polarity;
113 u8 data_clk_parity;
114 u8 fec_mode;
115 u8 data_output_ctl;
116 u8 data_fifo_mode;
117 u8 out_rate_comp;
118 u8 i2c_repeater;
119// int inversion;
120 int lo_clk;
121 int hi_clk;
122
123 u32 esno_ave;
124 u32 esno_quant;
125 u32 avframes_coarse;
126 u32 avframes_fine;
127 u32 miss_threshold;
128 u32 uwp_threshold_acq;
129 u32 uwp_threshold_track;
130 u32 uwp_threshold_sof;
131 u32 sof_search_timeout;
132
133 u32 btr_nco_bits;
134 u32 btr_gain_shift_offset;
135 u32 crl_nco_bits;
136 u32 ldpc_max_iter;
137
138 int (*tuner_set_frequency)(struct dvb_frontend *fe, u32 frequency);
139 int (*tuner_get_frequency)(struct dvb_frontend *fe, u32 *frequency);
140 int (*tuner_set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth);
141 int (*tuner_get_bandwidth)(struct dvb_frontend *fe, u32 *bandwidth);
142 int (*tuner_set_rfsiggain)(struct dvb_frontend *fe, u32 rf_gain);
143};
144
145#if defined(CONFIG_DVB_STB0899) || (defined(CONFIG_DVB_STB0899_MODULE) && defined(MODULE))
146
147extern struct dvb_frontend *stb0899_attach(struct stb0899_config *config,
148 struct i2c_adapter *i2c);
149
150#else
151
152static inline struct dvb_frontend *stb0899_attach(struct stb0899_config *config,
153 struct i2c_adapter *i2c)
154{
155 printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
156 return NULL;
157}
158
159#endif //CONFIG_DVB_STB0899
160
161
162#endif
diff --git a/drivers/media/dvb/frontends/stb0899_priv.h b/drivers/media/dvb/frontends/stb0899_priv.h
new file mode 100644
index 000000000000..24619e3689db
--- /dev/null
+++ b/drivers/media/dvb/frontends/stb0899_priv.h
@@ -0,0 +1,267 @@
1/*
2 STB0899 Multistandard Frontend driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#ifndef __STB0899_PRIV_H
23#define __STB0899_PRIV_H
24
25#include "dvb_frontend.h"
26#include "stb0899_drv.h"
27
28#define FE_ERROR 0
29#define FE_NOTICE 1
30#define FE_INFO 2
31#define FE_DEBUG 3
32#define FE_DEBUGREG 4
33
34#define dprintk(x, y, z, format, arg...) do { \
35 if (z) { \
36 if ((*x > FE_ERROR) && (*x > y)) \
37 printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
38 else if ((*x > FE_NOTICE) && (*x > y)) \
39 printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
40 else if ((*x > FE_INFO) && (*x > y)) \
41 printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
42 else if ((*x > FE_DEBUG) && (*x > y)) \
43 printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
44 } else { \
45 if (*x > y) \
46 printk(format, ##arg); \
47 } \
48} while(0)
49
50#define INRANGE(val, x, y) (((x <= val) && (val <= y)) || \
51 ((y <= val) && (val <= x)) ? 1 : 0)
52
53#define BYTE0 0
54#define BYTE1 8
55#define BYTE2 16
56#define BYTE3 24
57
58#define GETBYTE(x, y) (((x) >> (y)) & 0xff)
59#define MAKEWORD32(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
60#define MAKEWORD16(a, b) (((a) << 8) | (b))
61
62#define MIN(x, y) ((x) <= (y) ? (x) : (y))
63#define MAX(x, y) ((x) >= (y) ? (x) : (y))
64#define ABS(x) ((x) >= 0 ? (x) : -(x))
65
66#define LSB(x) ((x & 0xff))
67#define MSB(y) ((y >> 8) & 0xff)
68
69
70#define STB0899_GETFIELD(bitf, val) ((val >> STB0899_OFFST_##bitf) & ((1 << STB0899_WIDTH_##bitf) - 1))
71
72
73#define STB0899_SETFIELD(mask, val, width, offset) (mask & (~(((1 << width) - 1) << \
74 offset))) | ((val & \
75 ((1 << width) - 1)) << offset)
76
77#define STB0899_SETFIELD_VAL(bitf, mask, val) (mask = (mask & (~(((1 << STB0899_WIDTH_##bitf) - 1) <<\
78 STB0899_OFFST_##bitf))) | \
79 (val << STB0899_OFFST_##bitf))
80
81
82enum stb0899_status {
83 NOAGC1 = 0,
84 AGC1OK,
85 NOTIMING,
86 ANALOGCARRIER,
87 TIMINGOK,
88 NOAGC2,
89 AGC2OK,
90 NOCARRIER,
91 CARRIEROK,
92 NODATA,
93 FALSELOCK,
94 DATAOK,
95 OUTOFRANGE,
96 RANGEOK,
97 DVBS2_DEMOD_LOCK,
98 DVBS2_DEMOD_NOLOCK,
99 DVBS2_FEC_LOCK,
100 DVBS2_FEC_NOLOCK
101};
102
103enum stb0899_modcod {
104 STB0899_DUMMY_PLF,
105 STB0899_QPSK_14,
106 STB0899_QPSK_13,
107 STB0899_QPSK_25,
108 STB0899_QPSK_12,
109 STB0899_QPSK_35,
110 STB0899_QPSK_23,
111 STB0899_QPSK_34,
112 STB0899_QPSK_45,
113 STB0899_QPSK_56,
114 STB0899_QPSK_89,
115 STB0899_QPSK_910,
116 STB0899_8PSK_35,
117 STB0899_8PSK_23,
118 STB0899_8PSK_34,
119 STB0899_8PSK_56,
120 STB0899_8PSK_89,
121 STB0899_8PSK_910,
122 STB0899_16APSK_23,
123 STB0899_16APSK_34,
124 STB0899_16APSK_45,
125 STB0899_16APSK_56,
126 STB0899_16APSK_89,
127 STB0899_16APSK_910,
128 STB0899_32APSK_34,
129 STB0899_32APSK_45,
130 STB0899_32APSK_56,
131 STB0899_32APSK_89,
132 STB0899_32APSK_910
133};
134
135enum stb0899_frame {
136 STB0899_LONG_FRAME,
137 STB0899_SHORT_FRAME
138};
139
140enum stb0899_alpha {
141 RRC_20,
142 RRC_25,
143 RRC_35
144};
145
146struct stb0899_tab {
147 s32 real;
148 s32 read;
149};
150
151enum stb0899_fec {
152 STB0899_FEC_1_2 = 13,
153 STB0899_FEC_2_3 = 18,
154 STB0899_FEC_3_4 = 21,
155 STB0899_FEC_5_6 = 24,
156 STB0899_FEC_6_7 = 25,
157 STB0899_FEC_7_8 = 26
158};
159
160struct stb0899_params {
161 u32 freq; /* Frequency */
162 u32 srate; /* Symbol rate */
163 enum fe_code_rate fecrate;
164};
165
166struct stb0899_internal {
167 u32 master_clk;
168 u32 freq; /* Demod internal Frequency */
169 u32 srate; /* Demod internal Symbol rate */
170 enum stb0899_fec fecrate; /* Demod internal FEC rate */
171 u32 srch_range; /* Demod internal Search Range */
172 u32 sub_range; /* Demod current sub range (Hz) */
173 u32 tuner_step; /* Tuner step (Hz) */
174 u32 tuner_offst; /* Relative offset to carrier (Hz) */
175 u32 tuner_bw; /* Current bandwidth of the tuner (Hz) */
176
177 s32 mclk; /* Masterclock Divider factor (binary) */
178 s32 rolloff; /* Current RollOff of the filter (x100) */
179
180 s16 derot_freq; /* Current derotator frequency (Hz) */
181 s16 derot_percent;
182
183 s16 direction; /* Current derotator search direction */
184 s16 derot_step; /* Derotator step (binary value) */
185 s16 t_derot; /* Derotator time constant (ms) */
186 s16 t_data; /* Data recovery time constant (ms) */
187 s16 sub_dir; /* Direction of the next sub range */
188
189 s16 t_agc1; /* Agc1 time constant (ms) */
190 s16 t_agc2; /* Agc2 time constant (ms) */
191
192 u32 lock; /* Demod internal lock state */
193 enum stb0899_status status; /* Demod internal status */
194
195 /* DVB-S2 */
196 s32 agc_gain; /* RF AGC Gain */
197 s32 center_freq; /* Nominal carrier frequency */
198 s32 av_frame_coarse; /* Coarse carrier freq search frames */
199 s32 av_frame_fine; /* Fine carrier freq search frames */
200
201 s16 step_size; /* Carrier frequency search step size */
202
203 enum stb0899_alpha rrc_alpha;
204 enum stb0899_inversion inversion;
205 enum stb0899_modcod modcod;
206 u8 pilots; /* Pilots found */
207
208 enum stb0899_frame frame_length;
209 u8 v_status; /* VSTATUS */
210 u8 err_ctrl; /* ERRCTRLn */
211};
212
213struct stb0899_state {
214 struct i2c_adapter *i2c;
215 struct stb0899_config *config;
216 struct dvb_frontend frontend;
217
218 u32 *verbose; /* Cached module verbosity level */
219
220 struct stb0899_internal internal; /* Device internal parameters */
221
222 /* cached params from API */
223 enum fe_delivery_system delsys;
224 struct stb0899_params params;
225
226 u32 rx_freq; /* DiSEqC 2.0 receiver freq */
227 struct mutex search_lock;
228};
229/* stb0899.c */
230extern int stb0899_read_reg(struct stb0899_state *state,
231 unsigned int reg);
232
233extern u32 _stb0899_read_s2reg(struct stb0899_state *state,
234 u32 stb0899_i2cdev,
235 u32 stb0899_base_addr,
236 u16 stb0899_reg_offset);
237
238extern int stb0899_read_regs(struct stb0899_state *state,
239 unsigned int reg, u8 *buf,
240 u32 count);
241
242extern int stb0899_write_regs(struct stb0899_state *state,
243 unsigned int reg, u8 *data,
244 u32 count);
245
246extern int stb0899_write_reg(struct stb0899_state *state,
247 unsigned int reg,
248 u8 data);
249
250extern int stb0899_write_s2reg(struct stb0899_state *state,
251 u32 stb0899_i2cdev,
252 u32 stb0899_base_addr,
253 u16 stb0899_reg_offset,
254 u32 stb0899_data);
255
256extern int stb0899_i2c_gate_ctrl(struct dvb_frontend *fe, int enable);
257
258
259#define STB0899_READ_S2REG(DEVICE, REG) (_stb0899_read_s2reg(state, DEVICE, STB0899_BASE_##REG, STB0899_OFF0_##REG))
260//#define STB0899_WRITE_S2REG(DEVICE, REG, DATA) (_stb0899_write_s2reg(state, DEVICE, STB0899_BASE_##REG, STB0899_OFF0_##REG, DATA))
261
262/* stb0899_algo.c */
263extern enum stb0899_status stb0899_dvbs_algo(struct stb0899_state *state);
264extern enum stb0899_status stb0899_dvbs2_algo(struct stb0899_state *state);
265extern long stb0899_carr_width(struct stb0899_state *state);
266
267#endif //__STB0899_PRIV_H
diff --git a/drivers/media/dvb/frontends/stb0899_reg.h b/drivers/media/dvb/frontends/stb0899_reg.h
new file mode 100644
index 000000000000..ba1ed56304a0
--- /dev/null
+++ b/drivers/media/dvb/frontends/stb0899_reg.h
@@ -0,0 +1,2027 @@
1/*
2 STB0899 Multistandard Frontend driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#ifndef __STB0899_REG_H
23#define __STB0899_REG_H
24
25/* S1 */
26#define STB0899_DEV_ID 0xf000
27#define STB0899_CHIP_ID (0x0f << 4)
28#define STB0899_OFFST_CHIP_ID 4
29#define STB0899_WIDTH_CHIP_ID 4
30#define STB0899_CHIP_REL (0x0f << 0)
31#define STB0899_OFFST_CHIP_REL 0
32#define STB0899_WIDTH_CHIP_REL 4
33
34#define STB0899_DEMOD 0xf40e
35#define STB0899_MODECOEFF (0x01 << 0)
36#define STB0899_OFFST_MODECOEFF 0
37#define STB0899_WIDTH_MODECOEFF 1
38
39#define STB0899_RCOMPC 0xf410
40#define STB0899_AGC1CN 0xf412
41#define STB0899_AGC1REF 0xf413
42#define STB0899_RTC 0xf417
43#define STB0899_TMGCFG 0xf418
44#define STB0899_AGC2REF 0xf419
45#define STB0899_TLSR 0xf41a
46
47#define STB0899_CFD 0xf41b
48#define STB0899_CFD_ON (0x01 << 7)
49#define STB0899_OFFST_CFD_ON 7
50#define STB0899_WIDTH_CFD_ON 1
51
52#define STB0899_ACLC 0xf41c
53
54#define STB0899_BCLC 0xf41d
55#define STB0899_OFFST_ALGO 6
56#define STB0899_WIDTH_ALGO_QPSK2 2
57#define STB0899_ALGO_QPSK2 (2 << 6)
58#define STB0899_ALGO_QPSK1 (1 << 6)
59#define STB0899_ALGO_BPSK (0 << 6)
60#define STB0899_OFFST_BETA 0
61#define STB0899_WIDTH_BETA 6
62
63#define STB0899_EQON 0xf41e
64#define STB0899_LDT 0xf41f
65#define STB0899_LDT2 0xf420
66#define STB0899_EQUALREF 0xf425
67#define STB0899_TMGRAMP 0xf426
68#define STB0899_TMGTHD 0xf427
69#define STB0899_IDCCOMP 0xf428
70#define STB0899_QDCCOMP 0xf429
71#define STB0899_POWERI 0xf42a
72#define STB0899_POWERQ 0xf42b
73#define STB0899_RCOMP 0xf42c
74
75#define STB0899_AGCIQIN 0xf42e
76#define STB0899_AGCIQVALUE (0xff << 0)
77#define STB0899_OFFST_AGCIQVALUE 0
78#define STB0899_WIDTH_AGCIQVALUE 8
79
80#define STB0899_AGC2I1 0xf436
81#define STB0899_AGC2I2 0xf437
82
83#define STB0899_TLIR 0xf438
84#define STB0899_TLIR_TMG_LOCK_IND (0xff << 0)
85#define STB0899_OFFST_TLIR_TMG_LOCK_IND 0
86#define STB0899_WIDTH_TLIR_TMG_LOCK_IND 8
87
88#define STB0899_RTF 0xf439
89#define STB0899_RTF_TIMING_LOOP_FREQ (0xff << 0)
90#define STB0899_OFFST_RTF_TIMING_LOOP_FREQ 0
91#define STB0899_WIDTH_RTF_TIMING_LOOP_FREQ 8
92
93#define STB0899_DSTATUS 0xf43a
94#define STB0899_CARRIER_FOUND (0x01 << 7)
95#define STB0899_OFFST_CARRIER_FOUND 7
96#define STB0899_WIDTH_CARRIER_FOUND 1
97#define STB0899_TMG_LOCK (0x01 << 6)
98#define STB0899_OFFST_TMG_LOCK 6
99#define STB0899_WIDTH_TMG_LOCK 1
100#define STB0899_DEMOD_LOCK (0x01 << 5)
101#define STB0899_OFFST_DEMOD_LOCK 5
102#define STB0899_WIDTH_DEMOD_LOCK 1
103#define STB0899_TMG_AUTO (0x01 << 4)
104#define STB0899_OFFST_TMG_AUTO 4
105#define STB0899_WIDTH_TMG_AUTO 1
106#define STB0899_END_MAIN (0x01 << 3)
107#define STB0899_OFFST_END_MAIN 3
108#define STB0899_WIDTH_END_MAIN 1
109
110#define STB0899_LDI 0xf43b
111#define STB0899_OFFST_LDI 0
112#define STB0899_WIDTH_LDI 8
113
114#define STB0899_CFRM 0xf43e
115#define STB0899_OFFST_CFRM 0
116#define STB0899_WIDTH_CFRM 8
117
118#define STB0899_CFRL 0xf43f
119#define STB0899_OFFST_CFRL 0
120#define STB0899_WIDTH_CFRL 8
121
122#define STB0899_NIRM 0xf440
123#define STB0899_OFFST_NIRM 0
124#define STB0899_WIDTH_NIRM 8
125
126#define STB0899_NIRL 0xf441
127#define STB0899_OFFST_NIRL 0
128#define STB0899_WIDTH_NIRL 8
129
130#define STB0899_ISYMB 0xf444
131#define STB0899_QSYMB 0xf445
132
133#define STB0899_SFRH 0xf446
134#define STB0899_OFFST_SFRH 0
135#define STB0899_WIDTH_SFRH 8
136
137#define STB0899_SFRM 0xf447
138#define STB0899_OFFST_SFRM 0
139#define STB0899_WIDTH_SFRM 8
140
141#define STB0899_SFRL 0xf448
142#define STB0899_OFFST_SFRL 4
143#define STB0899_WIDTH_SFRL 4
144
145#define STB0899_SFRUPH 0xf44c
146#define STB0899_SFRUPM 0xf44d
147#define STB0899_SFRUPL 0xf44e
148
149#define STB0899_EQUAI1 0xf4e0
150#define STB0899_EQUAQ1 0xf4e1
151#define STB0899_EQUAI2 0xf4e2
152#define STB0899_EQUAQ2 0xf4e3
153#define STB0899_EQUAI3 0xf4e4
154#define STB0899_EQUAQ3 0xf4e5
155#define STB0899_EQUAI4 0xf4e6
156#define STB0899_EQUAQ4 0xf4e7
157#define STB0899_EQUAI5 0xf4e8
158#define STB0899_EQUAQ5 0xf4e9
159
160#define STB0899_DSTATUS2 0xf50c
161#define STB0899_DS2_TMG_AUTOSRCH (0x01 << 7)
162#define STB8999_OFFST_DS2_TMG_AUTOSRCH 7
163#define STB0899_WIDTH_DS2_TMG_AUTOSRCH 1
164#define STB0899_DS2_END_MAINLOOP (0x01 << 6)
165#define STB0899_OFFST_DS2_END_MAINLOOP 6
166#define STB0899_WIDTH_DS2_END_MAINLOOP 1
167#define STB0899_DS2_CFSYNC (0x01 << 5)
168#define STB0899_OFFST_DS2_CFSYNC 5
169#define STB0899_WIDTH_DS2_CFSYNC 1
170#define STB0899_DS2_TMGLOCK (0x01 << 4)
171#define STB0899_OFFST_DS2_TMGLOCK 4
172#define STB0899_WIDTH_DS2_TMGLOCK 1
173#define STB0899_DS2_DEMODWAIT (0x01 << 3)
174#define STB0899_OFFST_DS2_DEMODWAIT 3
175#define STB0899_WIDTH_DS2_DEMODWAIT 1
176#define STB0899_DS2_FECON (0x01 << 1)
177#define STB0899_OFFST_DS2_FECON 1
178#define STB0899_WIDTH_DS2_FECON 1
179
180/* S1 FEC */
181#define STB0899_VSTATUS 0xf50d
182#define STB0899_VSTATUS_VITERBI_ON (0x01 << 7)
183#define STB0899_OFFST_VSTATUS_VITERBI_ON 7
184#define STB0899_WIDTH_VSTATUS_VITERBI_ON 1
185#define STB0899_VSTATUS_END_LOOPVIT (0x01 << 6)
186#define STB0899_OFFST_VSTATUS_END_LOOPVIT 6
187#define STB0899_WIDTH_VSTATUS_END_LOOPVIT 1
188#define STB0899_VSTATUS_PRFVIT (0x01 << 4)
189#define STB0899_OFFST_VSTATUS_PRFVIT 4
190#define STB0899_WIDTH_VSTATUS_PRFVIT 1
191#define STB0899_VSTATUS_LOCKEDVIT (0x01 << 3)
192#define STB0899_OFFST_VSTATUS_LOCKEDVIT 3
193#define STB0899_WIDTH_VSTATUS_LOCKEDVIT 1
194
195#define STB0899_VERROR 0xf50f
196
197#define STB0899_IQSWAP 0xf523
198#define STB0899_SYM (0x01 << 3)
199#define STB0899_OFFST_SYM 3
200#define STB0899_WIDTH_SYM 1
201
202#define STB0899_FECAUTO1 0xf530
203#define STB0899_DSSSRCH (0x01 << 3)
204#define STB0899_OFFST_DSSSRCH 3
205#define STB0899_WIDTH_DSSSRCH 1
206#define STB0899_SYMSRCH (0x01 << 2)
207#define STB0899_OFFST_SYMSRCH 2
208#define STB0899_WIDTH_SYMSRCH 1
209#define STB0899_QPSKSRCH (0x01 << 1)
210#define STB0899_OFFST_QPSKSRCH 1
211#define STB0899_WIDTH_QPSKSRCH 1
212#define STB0899_BPSKSRCH (0x01 << 0)
213#define STB0899_OFFST_BPSKSRCH 0
214#define STB0899_WIDTH_BPSKSRCH 1
215
216#define STB0899_FECM 0xf533
217#define STB0899_FECM_NOT_DVB (0x01 << 7)
218#define STB0899_OFFST_FECM_NOT_DVB 7
219#define STB0899_WIDTH_FECM_NOT_DVB 1
220#define STB0899_FECM_RSVD1 (0x07 << 4)
221#define STB0899_OFFST_FECM_RSVD1 4
222#define STB0899_WIDTH_FECM_RSVD1 3
223#define STB0899_FECM_VITERBI_ON (0x01 << 3)
224#define STB0899_OFFST_FECM_VITERBI_ON 3
225#define STB0899_WIDTH_FECM_VITERBI_ON 1
226#define STB0899_FECM_RSVD0 (0x01 << 2)
227#define STB0899_OFFST_FECM_RSVD0 2
228#define STB0899_WIDTH_FECM_RSVD0 1
229#define STB0899_FECM_SYNCDIS (0x01 << 1)
230#define STB0899_OFFST_FECM_SYNCDIS 1
231#define STB0899_WIDTH_FECM_SYNCDIS 1
232#define STB0899_FECM_SYMI (0x01 << 0)
233#define STB0899_OFFST_FECM_SYMI 0
234#define STB0899_WIDTH_FECM_SYMI 1
235
236#define STB0899_VTH12 0xf534
237#define STB0899_VTH23 0xf535
238#define STB0899_VTH34 0xf536
239#define STB0899_VTH56 0xf537
240#define STB0899_VTH67 0xf538
241#define STB0899_VTH78 0xf539
242
243#define STB0899_PRVIT 0xf53c
244#define STB0899_PR_7_8 (0x01 << 5)
245#define STB0899_OFFST_PR_7_8 5
246#define STB0899_WIDTH_PR_7_8 1
247#define STB0899_PR_6_7 (0x01 << 4)
248#define STB0899_OFFST_PR_6_7 4
249#define STB0899_WIDTH_PR_6_7 1
250#define STB0899_PR_5_6 (0x01 << 3)
251#define STB0899_OFFST_PR_5_6 3
252#define STB0899_WIDTH_PR_5_6 1
253#define STB0899_PR_3_4 (0x01 << 2)
254#define STB0899_OFFST_PR_3_4 2
255#define STB0899_WIDTH_PR_3_4 1
256#define STB0899_PR_2_3 (0x01 << 1)
257#define STB0899_OFFST_PR_2_3 1
258#define STB0899_WIDTH_PR_2_3 1
259#define STB0899_PR_1_2 (0x01 << 0)
260#define STB0899_OFFST_PR_1_2 0
261#define STB0899_WIDTH_PR_1_2 1
262
263#define STB0899_VITSYNC 0xf53d
264#define STB0899_AM (0x01 << 7)
265#define STB0899_OFFST_AM 7
266#define STB0899_WIDTH_AM 1
267#define STB0899_FREEZE (0x01 << 6)
268#define STB0899_OFFST_FREEZE 6
269#define STB0899_WIDTH_FREEZE 1
270#define STB0899_SN_65536 (0x03 << 4)
271#define STB0899_OFFST_SN_65536 4
272#define STB0899_WIDTH_SN_65536 2
273#define STB0899_SN_16384 (0x01 << 5)
274#define STB0899_OFFST_SN_16384 5
275#define STB0899_WIDTH_SN_16384 1
276#define STB0899_SN_4096 (0x01 << 4)
277#define STB0899_OFFST_SN_4096 4
278#define STB0899_WIDTH_SN_4096 1
279#define STB0899_SN_1024 (0x00 << 4)
280#define STB0899_OFFST_SN_1024 4
281#define STB0899_WIDTH_SN_1024 0
282#define STB0899_TO_128 (0x03 << 2)
283#define STB0899_OFFST_TO_128 2
284#define STB0899_WIDTH_TO_128 2
285#define STB0899_TO_64 (0x01 << 3)
286#define STB0899_OFFST_TO_64 3
287#define STB0899_WIDTH_TO_64 1
288#define STB0899_TO_32 (0x01 << 2)
289#define STB0899_OFFST_TO_32 2
290#define STB0899_WIDTH_TO_32 1
291#define STB0899_TO_16 (0x00 << 2)
292#define STB0899_OFFST_TO_16 2
293#define STB0899_WIDTH_TO_16 0
294#define STB0899_HYST_128 (0x03 << 1)
295#define STB0899_OFFST_HYST_128 1
296#define STB0899_WIDTH_HYST_128 2
297#define STB0899_HYST_64 (0x01 << 1)
298#define STB0899_OFFST_HYST_64 1
299#define STB0899_WIDTH_HYST_64 1
300#define STB0899_HYST_32 (0x01 << 0)
301#define STB0899_OFFST_HYST_32 0
302#define STB0899_WIDTH_HYST_32 1
303#define STB0899_HYST_16 (0x00 << 0)
304#define STB0899_OFFST_HYST_16 0
305#define STB0899_WIDTH_HYST_16 0
306
307#define STB0899_RSULC 0xf548
308#define STB0899_ULDIL_ON (0x01 << 7)
309#define STB0899_OFFST_ULDIL_ON 7
310#define STB0899_WIDTH_ULDIL_ON 1
311#define STB0899_ULAUTO_ON (0x01 << 6)
312#define STB0899_OFFST_ULAUTO_ON 6
313#define STB0899_WIDTH_ULAUTO_ON 1
314#define STB0899_ULRS_ON (0x01 << 5)
315#define STB0899_OFFST_ULRS_ON 5
316#define STB0899_WIDTH_ULRS_ON 1
317#define STB0899_ULDESCRAM_ON (0x01 << 4)
318#define STB0899_OFFST_ULDESCRAM_ON 4
319#define STB0899_WIDTH_ULDESCRAM_ON 1
320#define STB0899_UL_DISABLE (0x01 << 2)
321#define STB0899_OFFST_UL_DISABLE 2
322#define STB0899_WIDTH_UL_DISABLE 1
323#define STB0899_NOFTHRESHOLD (0x01 << 0)
324#define STB0899_OFFST_NOFTHRESHOLD 0
325#define STB0899_WIDTH_NOFTHRESHOLD 1
326
327#define STB0899_RSLLC 0xf54a
328#define STB0899_DEMAPVIT 0xf583
329#define STB0899_DEMAPVIT_RSVD (0x01 << 7)
330#define STB0899_OFFST_DEMAPVIT_RSVD 7
331#define STB0899_WIDTH_DEMAPVIT_RSVD 1
332#define STB0899_DEMAPVIT_KDIVIDER (0x7f << 0)
333#define STB0899_OFFST_DEMAPVIT_KDIVIDER 0
334#define STB0899_WIDTH_DEMAPVIT_KDIVIDER 7
335
336#define STB0899_PLPARM 0xf58c
337#define STB0899_VITMAPPING (0x07 << 5)
338#define STB0899_OFFST_VITMAPPING 5
339#define STB0899_WIDTH_VITMAPPING 3
340#define STB0899_VITMAPPING_BPSK (0x01 << 5)
341#define STB0899_OFFST_VITMAPPING_BPSK 5
342#define STB0899_WIDTH_VITMAPPING_BPSK 1
343#define STB0899_VITMAPPING_QPSK (0x00 << 5)
344#define STB0899_OFFST_VITMAPPING_QPSK 5
345#define STB0899_WIDTH_VITMAPPING_QPSK 0
346#define STB0899_VITCURPUN (0x1f << 0)
347#define STB0899_OFFST_VITCURPUN 0
348#define STB0899_WIDTH_VITCURPUN 5
349#define STB0899_VITCURPUN_1_2 (0x0d << 0)
350#define STB0899_VITCURPUN_2_3 (0x12 << 0)
351#define STB0899_VITCURPUN_3_4 (0x15 << 0)
352#define STB0899_VITCURPUN_5_6 (0x18 << 0)
353#define STB0899_VITCURPUN_6_7 (0x19 << 0)
354#define STB0899_VITCURPUN_7_8 (0x1a << 0)
355
356/* S2 DEMOD */
357#define STB0899_OFF0_DMD_STATUS 0xf300
358#define STB0899_BASE_DMD_STATUS 0x00000000
359#define STB0899_IF_AGC_LOCK (0x01 << 8)
360#define STB0899_OFFST_IF_AGC_LOCK 0
361#define STB0899_WIDTH_IF_AGC_LOCK 1
362
363#define STB0899_OFF0_CRL_FREQ 0xf304
364#define STB0899_BASE_CRL_FREQ 0x00000000
365#define STB0899_CARR_FREQ (0x3fffffff << 0)
366#define STB0899_OFFST_CARR_FREQ 0
367#define STB0899_WIDTH_CARR_FREQ 30
368
369#define STB0899_OFF0_BTR_FREQ 0xf308
370#define STB0899_BASE_BTR_FREQ 0x00000000
371#define STB0899_BTR_FREQ (0xfffffff << 0)
372#define STB0899_OFFST_BTR_FREQ 0
373#define STB0899_WIDTH_BTR_FREQ 28
374
375#define STB0899_OFF0_IF_AGC_GAIN 0xf30c
376#define STB0899_BASE_IF_AGC_GAIN 0x00000000
377#define STB0899_IF_AGC_GAIN (0x3fff < 0)
378#define STB0899_OFFST_IF_AGC_GAIN 0
379#define STB0899_WIDTH_IF_AGC_GAIN 14
380
381#define STB0899_OFF0_BB_AGC_GAIN 0xf310
382#define STB0899_BASE_BB_AGC_GAIN 0x00000000
383#define STB0899_BB_AGC_GAIN (0x3fff < 0)
384#define STB0899_OFFST_BB_AGC_GAIN 0
385#define STB0899_WIDTH_BB_AGC_GAIN 14
386
387#define STB0899_OFF0_DC_OFFSET 0xf314
388#define STB0899_BASE_DC_OFFSET 0x00000000
389#define STB0899_I (0xff < 8)
390#define STB0899_OFFST_I 8
391#define STB0899_WIDTH_I 8
392#define STB0899_Q (0xff < 0)
393#define STB0899_OFFST_Q 8
394#define STB0899_WIDTH_Q 8
395
396#define STB0899_OFF0_DMD_CNTRL 0xf31c
397#define STB0899_BASE_DMD_CNTRL 0x00000000
398#define STB0899_ADC0_PINS1IN (0x01 << 6)
399#define STB0899_OFFST_ADC0_PINS1IN 6
400#define STB0899_WIDTH_ADC0_PINS1IN 1
401#define STB0899_IN2COMP1_OFFBIN0 (0x01 << 3)
402#define STB0899_OFFST_IN2COMP1_OFFBIN0 3
403#define STB0899_WIDTH_IN2COMP1_OFFBIN0 1
404#define STB0899_DC_COMP (0x01 << 2)
405#define STB0899_OFFST_DC_COMP 2
406#define STB0899_WIDTH_DC_COMP 1
407#define STB0899_MODMODE (0x03 << 0)
408#define STB0899_OFFST_MODMODE 0
409#define STB0899_WIDTH_MODMODE 2
410
411#define STB0899_OFF0_IF_AGC_CNTRL 0xf320
412#define STB0899_BASE_IF_AGC_CNTRL 0x00000000
413#define STB0899_IF_GAIN_INIT (0x3fff << 13)
414#define STB0899_OFFST_IF_GAIN_INIT 13
415#define STB0899_WIDTH_IF_GAIN_INIT 14
416#define STB0899_IF_GAIN_SENSE (0x01 << 12)
417#define STB0899_OFFST_IF_GAIN_SENSE 12
418#define STB0899_WIDTH_IF_GAIN_SENSE 1
419#define STB0899_IF_LOOP_GAIN (0x0f << 8)
420#define STB0899_OFFST_IF_LOOP_GAIN 8
421#define STB0899_WIDTH_IF_LOOP_GAIN 4
422#define STB0899_IF_LD_GAIN_INIT (0x01 << 7)
423#define STB0899_OFFST_IF_LD_GAIN_INIT 7
424#define STB0899_WIDTH_IF_LD_GAIN_INIT 1
425#define STB0899_IF_AGC_REF (0x7f << 0)
426#define STB0899_OFFST_IF_AGC_REF 0
427#define STB0899_WIDTH_IF_AGC_REF 7
428
429#define STB0899_OFF0_BB_AGC_CNTRL 0xf324
430#define STB0899_BASE_BB_AGC_CNTRL 0x00000000
431#define STB0899_BB_GAIN_INIT (0x3fff << 12)
432#define STB0899_OFFST_BB_GAIN_INIT 12
433#define STB0899_WIDTH_BB_GAIN_INIT 14
434#define STB0899_BB_LOOP_GAIN (0x0f << 8)
435#define STB0899_OFFST_BB_LOOP_GAIN 8
436#define STB0899_WIDTH_BB_LOOP_GAIN 4
437#define STB0899_BB_LD_GAIN_INIT (0x01 << 7)
438#define STB0899_OFFST_BB_LD_GAIN_INIT 7
439#define STB0899_WIDTH_BB_LD_GAIN_INIT 1
440#define STB0899_BB_AGC_REF (0x7f << 0)
441#define STB0899_OFFST_BB_AGC_REF 0
442#define STB0899_WIDTH_BB_AGC_REF 7
443
444#define STB0899_OFF0_CRL_CNTRL 0xf328
445#define STB0899_BASE_CRL_CNTRL 0x00000000
446#define STB0899_CRL_LOCK_CLEAR (0x01 << 5)
447#define STB0899_OFFST_CRL_LOCK_CLEAR 5
448#define STB0899_WIDTH_CRL_LOCK_CLEAR 1
449#define STB0899_CRL_SWPR_CLEAR (0x01 << 4)
450#define STB0899_OFFST_CRL_SWPR_CLEAR 4
451#define STB0899_WIDTH_CRL_SWPR_CLEAR 1
452#define STB0899_CRL_SWP_ENA (0x01 << 3)
453#define STB0899_OFFST_CRL_SWP_ENA 3
454#define STB0899_WIDTH_CRL_SWP_ENA 1
455#define STB0899_CRL_DET_SEL (0x01 << 2)
456#define STB0899_OFFST_CRL_DET_SEL 2
457#define STB0899_WIDTH_CRL_DET_SEL 1
458#define STB0899_CRL_SENSE (0x01 << 1)
459#define STB0899_OFFST_CRL_SENSE 1
460#define STB0899_WIDTH_CRL_SENSE 1
461#define STB0899_CRL_PHSERR_CLEAR (0x01 << 0)
462#define STB0899_OFFST_CRL_PHSERR_CLEAR 0
463#define STB0899_WIDTH_CRL_PHSERR_CLEAR 1
464
465#define STB0899_OFF0_CRL_PHS_INIT 0xf32c
466#define STB0899_BASE_CRL_PHS_INIT 0x00000000
467#define STB0899_CRL_PHS_INIT_31 (0x1 << 30)
468#define STB0899_OFFST_CRL_PHS_INIT_31 30
469#define STB0899_WIDTH_CRL_PHS_INIT_31 1
470#define STB0899_CRL_LD_INIT_PHASE (0x1 << 24)
471#define STB0899_OFFST_CRL_LD_INIT_PHASE 24
472#define STB0899_WIDTH_CRL_LD_INIT_PHASE 1
473#define STB0899_CRL_INIT_PHASE (0xffffff << 0)
474#define STB0899_OFFST_CRL_INIT_PHASE 0
475#define STB0899_WIDTH_CRL_INIT_PHASE 24
476
477#define STB0899_OFF0_CRL_FREQ_INIT 0xf330
478#define STB0899_BASE_CRL_FREQ_INIT 0x00000000
479#define STB0899_CRL_FREQ_INIT_31 (0x1 << 30)
480#define STB0899_OFFST_CRL_FREQ_INIT_31 30
481#define STB0899_WIDTH_CRL_FREQ_INIT_31 1
482#define STB0899_CRL_LD_FREQ_INIT (0x1 << 24)
483#define STB0899_OFFST_CRL_LD_FREQ_INIT 24
484#define STB0899_WIDTH_CRL_LD_FREQ_INIT 1
485#define STB0899_CRL_FREQ_INIT (0xffffff << 0)
486#define STB0899_OFFST_CRL_FREQ_INIT 0
487#define STB0899_WIDTH_CRL_FREQ_INIT 24
488
489#define STB0899_OFF0_CRL_LOOP_GAIN 0xf334
490#define STB0899_BASE_CRL_LOOP_GAIN 0x00000000
491#define STB0899_KCRL2_RSHFT (0xf << 16)
492#define STB0899_OFFST_KCRL2_RSHFT 16
493#define STB0899_WIDTH_KCRL2_RSHFT 4
494#define STB0899_KCRL1 (0xf << 12)
495#define STB0899_OFFST_KCRL1 12
496#define STB0899_WIDTH_KCRL1 4
497#define STB0899_KCRL1_RSHFT (0xf << 8)
498#define STB0899_OFFST_KCRL1_RSHFT 8
499#define STB0899_WIDTH_KCRL1_RSHFT 4
500#define STB0899_KCRL0 (0xf << 4)
501#define STB0899_OFFST_KCRL0 4
502#define STB0899_WIDTH_KCRL0 4
503#define STB0899_KCRL0_RSHFT (0xf << 0)
504#define STB0899_OFFST_KCRL0_RSHFT 0
505#define STB0899_WIDTH_KCRL0_RSHFT 4
506
507#define STB0899_OFF0_CRL_NOM_FREQ 0xf338
508#define STB0899_BASE_CRL_NOM_FREQ 0x00000000
509#define STB0899_CRL_NOM_FREQ (0x3fffffff << 0)
510#define STB0899_OFFST_CRL_NOM_FREQ 0
511#define STB0899_WIDTH_CRL_NOM_FREQ 30
512
513#define STB0899_OFF0_CRL_SWP_RATE 0xf33c
514#define STB0899_BASE_CRL_SWP_RATE 0x00000000
515#define STB0899_CRL_SWP_RATE (0x3fffffff << 0)
516#define STB0899_OFFST_CRL_SWP_RATE 0
517#define STB0899_WIDTH_CRL_SWP_RATE 30
518
519#define STB0899_OFF0_CRL_MAX_SWP 0xf340
520#define STB0899_BASE_CRL_MAX_SWP 0x00000000
521#define STB0899_CRL_MAX_SWP (0x3fffffff << 0)
522#define STB0899_OFFST_CRL_MAX_SWP 0
523#define STB0899_WIDTH_CRL_MAX_SWP 30
524
525#define STB0899_OFF0_CRL_LK_CNTRL 0xf344
526#define STB0899_BASE_CRL_LK_CNTRL 0x00000000
527
528#define STB0899_OFF0_DECIM_CNTRL 0xf348
529#define STB0899_BASE_DECIM_CNTRL 0x00000000
530#define STB0899_BAND_LIMIT_B (0x01 << 5)
531#define STB0899_OFFST_BAND_LIMIT_B 5
532#define STB0899_WIDTH_BAND_LIMIT_B 1
533#define STB0899_WIN_SEL (0x03 << 3)
534#define STB0899_OFFST_WIN_SEL 3
535#define STB0899_WIDTH_WIN_SEL 2
536#define STB0899_DECIM_RATE (0x07 << 0)
537#define STB0899_OFFST_DECIM_RATE 0
538#define STB0899_WIDTH_DECIM_RATE 3
539
540#define STB0899_OFF0_BTR_CNTRL 0xf34c
541#define STB0899_BASE_BTR_CNTRL 0x00000000
542#define STB0899_BTR_FREQ_CORR (0x7ff << 4)
543#define STB0899_OFFST_BTR_FREQ_CORR 4
544#define STB0899_WIDTH_BTR_FREQ_CORR 11
545#define STB0899_BTR_CLR_LOCK (0x01 << 3)
546#define STB0899_OFFST_BTR_CLR_LOCK 3
547#define STB0899_WIDTH_BTR_CLR_LOCK 1
548#define STB0899_BTR_SENSE (0x01 << 2)
549#define STB0899_OFFST_BTR_SENSE 2
550#define STB0899_WIDTH_BTR_SENSE 1
551#define STB0899_BTR_ERR_ENA (0x01 << 1)
552#define STB0899_OFFST_BTR_ERR_ENA 1
553#define STB0899_WIDTH_BTR_ERR_ENA 1
554#define STB0899_INTRP_PHS_SENSE (0x01 << 0)
555#define STB0899_OFFST_INTRP_PHS_SENSE 0
556#define STB0899_WIDTH_INTRP_PHS_SENSE 1
557
558#define STB0899_OFF0_BTR_LOOP_GAIN 0xf350
559#define STB0899_BASE_BTR_LOOP_GAIN 0x00000000
560#define STB0899_KBTR2_RSHFT (0x0f << 16)
561#define STB0899_OFFST_KBTR2_RSHFT 16
562#define STB0899_WIDTH_KBTR2_RSHFT 4
563#define STB0899_KBTR1 (0x0f << 12)
564#define STB0899_OFFST_KBTR1 12
565#define STB0899_WIDTH_KBTR1 4
566#define STB0899_KBTR1_RSHFT (0x0f << 8)
567#define STB0899_OFFST_KBTR1_RSHFT 8
568#define STB0899_WIDTH_KBTR1_RSHFT 4
569#define STB0899_KBTR0 (0x0f << 4)
570#define STB0899_OFFST_KBTR0 4
571#define STB0899_WIDTH_KBTR0 4
572#define STB0899_KBTR0_RSHFT (0x0f << 0)
573#define STB0899_OFFST_KBTR0_RSHFT 0
574#define STB0899_WIDTH_KBTR0_RSHFT 4
575
576#define STB0899_OFF0_BTR_PHS_INIT 0xf354
577#define STB0899_BASE_BTR_PHS_INIT 0x00000000
578#define STB0899_BTR_LD_PHASE_INIT (0x01 << 28)
579#define STB0899_OFFST_BTR_LD_PHASE_INIT 28
580#define STB0899_WIDTH_BTR_LD_PHASE_INIT 1
581#define STB0899_BTR_INIT_PHASE (0xfffffff << 0)
582#define STB0899_OFFST_BTR_INIT_PHASE 0
583#define STB0899_WIDTH_BTR_INIT_PHASE 28
584
585#define STB0899_OFF0_BTR_FREQ_INIT 0xf358
586#define STB0899_BASE_BTR_FREQ_INIT 0x00000000
587#define STB0899_BTR_LD_FREQ_INIT (1 << 28)
588#define STB0899_OFFST_BTR_LD_FREQ_INIT 28
589#define STB0899_WIDTH_BTR_LD_FREQ_INIT 1
590#define STB0899_BTR_FREQ_INIT (0xfffffff << 0)
591#define STB0899_OFFST_BTR_FREQ_INIT 0
592#define STB0899_WIDTH_BTR_FREQ_INIT 28
593
594#define STB0899_OFF0_BTR_NOM_FREQ 0xf35c
595#define STB0899_BASE_BTR_NOM_FREQ 0x00000000
596#define STB0899_BTR_NOM_FREQ (0xfffffff << 0)
597#define STB0899_OFFST_BTR_NOM_FREQ 0
598#define STB0899_WIDTH_BTR_NOM_FREQ 28
599
600#define STB0899_OFF0_BTR_LK_CNTRL 0xf360
601#define STB0899_BASE_BTR_LK_CNTRL 0x00000000
602#define STB0899_BTR_MIN_ENERGY (0x0f << 24)
603#define STB0899_OFFST_BTR_MIN_ENERGY 24
604#define STB0899_WIDTH_BTR_MIN_ENERGY 4
605#define STB0899_BTR_LOCK_TH_LO (0xff << 16)
606#define STB0899_OFFST_BTR_LOCK_TH_LO 16
607#define STB0899_WIDTH_BTR_LOCK_TH_LO 8
608#define STB0899_BTR_LOCK_TH_HI (0xff << 8)
609#define STB0899_OFFST_BTR_LOCK_TH_HI 8
610#define STB0899_WIDTH_BTR_LOCK_TH_HI 8
611#define STB0899_BTR_LOCK_GAIN (0x03 << 6)
612#define STB0899_OFFST_BTR_LOCK_GAIN 6
613#define STB0899_WIDTH_BTR_LOCK_GAIN 2
614#define STB0899_BTR_LOCK_LEAK (0x3f << 0)
615#define STB0899_OFFST_BTR_LOCK_LEAK 0
616#define STB0899_WIDTH_BTR_LOCK_LEAK 6
617
618#define STB0899_OFF0_DECN_CNTRL 0xf364
619#define STB0899_BASE_DECN_CNTRL 0x00000000
620
621#define STB0899_OFF0_TP_CNTRL 0xf368
622#define STB0899_BASE_TP_CNTRL 0x00000000
623
624#define STB0899_OFF0_TP_BUF_STATUS 0xf36c
625#define STB0899_BASE_TP_BUF_STATUS 0x00000000
626#define STB0899_TP_BUFFER_FULL (1 << 0)
627
628#define STB0899_OFF0_DC_ESTIM 0xf37c
629#define STB0899_BASE_DC_ESTIM 0x0000
630#define STB0899_I_DC_ESTIMATE (0xff << 8)
631#define STB0899_OFFST_I_DC_ESTIMATE 8
632#define STB0899_WIDTH_I_DC_ESTIMATE 8
633#define STB0899_Q_DC_ESTIMATE (0xff << 0)
634#define STB0899_OFFST_Q_DC_ESTIMATE 0
635#define STB0899_WIDTH_Q_DC_ESTIMATE 8
636
637#define STB0899_OFF0_FLL_CNTRL 0xf310
638#define STB0899_BASE_FLL_CNTRL 0x00000020
639#define STB0899_CRL_FLL_ACC (0x01 << 4)
640#define STB0899_OFFST_CRL_FLL_ACC 4
641#define STB0899_WIDTH_CRL_FLL_ACC 1
642#define STB0899_FLL_AVG_PERIOD (0x0f << 0)
643#define STB0899_OFFST_FLL_AVG_PERIOD 0
644#define STB0899_WIDTH_FLL_AVG_PERIOD 4
645
646#define STB0899_OFF0_FLL_FREQ_WD 0xf314
647#define STB0899_BASE_FLL_FREQ_WD 0x00000020
648#define STB0899_FLL_FREQ_WD (0xffffffff << 0)
649#define STB0899_OFFST_FLL_FREQ_WD 0
650#define STB0899_WIDTH_FLL_FREQ_WD 32
651
652#define STB0899_OFF0_ANTI_ALIAS_SEL 0xf358
653#define STB0899_BASE_ANTI_ALIAS_SEL 0x00000020
654#define STB0899_ANTI_ALIAS_SELB (0x03 << 0)
655#define STB0899_OFFST_ANTI_ALIAS_SELB 0
656#define STB0899_WIDTH_ANTI_ALIAS_SELB 2
657
658#define STB0899_OFF0_RRC_ALPHA 0xf35c
659#define STB0899_BASE_RRC_ALPHA 0x00000020
660#define STB0899_RRC_ALPHA (0x03 << 0)
661#define STB0899_OFFST_RRC_ALPHA 0
662#define STB0899_WIDTH_RRC_ALPHA 2
663
664#define STB0899_OFF0_DC_ADAPT_LSHFT 0xf360
665#define STB0899_BASE_DC_ADAPT_LSHFT 0x00000020
666#define STB0899_DC_ADAPT_LSHFT (0x077 << 0)
667#define STB0899_OFFST_DC_ADAPT_LSHFT 0
668#define STB0899_WIDTH_DC_ADAPT_LSHFT 3
669
670#define STB0899_OFF0_IMB_OFFSET 0xf364
671#define STB0899_BASE_IMB_OFFSET 0x00000020
672#define STB0899_PHS_IMB_COMP (0xff << 8)
673#define STB0899_OFFST_PHS_IMB_COMP 8
674#define STB0899_WIDTH_PHS_IMB_COMP 8
675#define STB0899_AMPL_IMB_COMP (0xff << 0)
676#define STB0899_OFFST_AMPL_IMB_COMP 0
677#define STB0899_WIDTH_AMPL_IMB_COMP 8
678
679#define STB0899_OFF0_IMB_ESTIMATE 0xf368
680#define STB0899_BASE_IMB_ESTIMATE 0x00000020
681#define STB0899_PHS_IMB_ESTIMATE (0xff << 8)
682#define STB0899_OFFST_PHS_IMB_ESTIMATE 8
683#define STB0899_WIDTH_PHS_IMB_ESTIMATE 8
684#define STB0899_AMPL_IMB_ESTIMATE (0xff << 0)
685#define STB0899_OFFST_AMPL_IMB_ESTIMATE 0
686#define STB0899_WIDTH_AMPL_IMB_ESTIMATE 8
687
688#define STB0899_OFF0_IMB_CNTRL 0xf36c
689#define STB0899_BASE_IMB_CNTRL 0x00000020
690#define STB0899_PHS_ADAPT_LSHFT (0x07 << 4)
691#define STB0899_OFFST_PHS_ADAPT_LSHFT 4
692#define STB0899_WIDTH_PHS_ADAPT_LSHFT 3
693#define STB0899_AMPL_ADAPT_LSHFT (0x07 << 1)
694#define STB0899_OFFST_AMPL_ADAPT_LSHFT 1
695#define STB0899_WIDTH_AMPL_ADAPT_LSHFT 3
696#define STB0899_IMB_COMP (0x01 << 0)
697#define STB0899_OFFST_IMB_COMP 0
698#define STB0899_WIDTH_IMB_COMP 1
699
700#define STB0899_OFF0_IF_AGC_CNTRL2 0xf374
701#define STB0899_BASE_IF_AGC_CNTRL2 0x00000020
702#define STB0899_IF_AGC_LOCK_TH (0xff << 11)
703#define STB0899_OFFST_IF_AGC_LOCK_TH 11
704#define STB0899_WIDTH_IF_AGC_LOCK_TH 8
705#define STB0899_IF_AGC_SD_DIV (0xff << 3)
706#define STB0899_OFFST_IF_AGC_SD_DIV 3
707#define STB0899_WIDTH_IF_AGC_SD_DIV 8
708#define STB0899_IF_AGC_DUMP_PER (0x07 << 0)
709#define STB0899_OFFST_IF_AGC_DUMP_PER 0
710#define STB0899_WIDTH_IF_AGC_DUMP_PER 3
711
712#define STB0899_OFF0_DMD_CNTRL2 0xf378
713#define STB0899_BASE_DMD_CNTRL2 0x00000020
714#define STB0899_SPECTRUM_INVERT (0x01 << 2)
715#define STB0899_OFFST_SPECTRUM_INVERT 2
716#define STB0899_WIDTH_SPECTRUM_INVERT 1
717#define STB0899_AGC_MODE (0x01 << 1)
718#define STB0899_OFFST_AGC_MODE 1
719#define STB0899_WIDTH_AGC_MODE 1
720#define STB0899_CRL_FREQ_ADJ (0x01 << 0)
721#define STB0899_OFFST_CRL_FREQ_ADJ 0
722#define STB0899_WIDTH_CRL_FREQ_ADJ 1
723
724#define STB0899_OFF0_TP_BUFFER 0xf300
725#define STB0899_BASE_TP_BUFFER 0x00000040
726#define STB0899_TP_BUFFER_IN (0xffff << 0)
727#define STB0899_OFFST_TP_BUFFER_IN 0
728#define STB0899_WIDTH_TP_BUFFER_IN 16
729
730#define STB0899_OFF0_TP_BUFFER1 0xf304
731#define STB0899_BASE_TP_BUFFER1 0x00000040
732#define STB0899_OFF0_TP_BUFFER2 0xf308
733#define STB0899_BASE_TP_BUFFER2 0x00000040
734#define STB0899_OFF0_TP_BUFFER3 0xf30c
735#define STB0899_BASE_TP_BUFFER3 0x00000040
736#define STB0899_OFF0_TP_BUFFER4 0xf310
737#define STB0899_BASE_TP_BUFFER4 0x00000040
738#define STB0899_OFF0_TP_BUFFER5 0xf314
739#define STB0899_BASE_TP_BUFFER5 0x00000040
740#define STB0899_OFF0_TP_BUFFER6 0xf318
741#define STB0899_BASE_TP_BUFFER6 0x00000040
742#define STB0899_OFF0_TP_BUFFER7 0xf31c
743#define STB0899_BASE_TP_BUFFER7 0x00000040
744#define STB0899_OFF0_TP_BUFFER8 0xf320
745#define STB0899_BASE_TP_BUFFER8 0x00000040
746#define STB0899_OFF0_TP_BUFFER9 0xf324
747#define STB0899_BASE_TP_BUFFER9 0x00000040
748#define STB0899_OFF0_TP_BUFFER10 0xf328
749#define STB0899_BASE_TP_BUFFER10 0x00000040
750#define STB0899_OFF0_TP_BUFFER11 0xf32c
751#define STB0899_BASE_TP_BUFFER11 0x00000040
752#define STB0899_OFF0_TP_BUFFER12 0xf330
753#define STB0899_BASE_TP_BUFFER12 0x00000040
754#define STB0899_OFF0_TP_BUFFER13 0xf334
755#define STB0899_BASE_TP_BUFFER13 0x00000040
756#define STB0899_OFF0_TP_BUFFER14 0xf338
757#define STB0899_BASE_TP_BUFFER14 0x00000040
758#define STB0899_OFF0_TP_BUFFER15 0xf33c
759#define STB0899_BASE_TP_BUFFER15 0x00000040
760#define STB0899_OFF0_TP_BUFFER16 0xf340
761#define STB0899_BASE_TP_BUFFER16 0x00000040
762#define STB0899_OFF0_TP_BUFFER17 0xf344
763#define STB0899_BASE_TP_BUFFER17 0x00000040
764#define STB0899_OFF0_TP_BUFFER18 0xf348
765#define STB0899_BASE_TP_BUFFER18 0x00000040
766#define STB0899_OFF0_TP_BUFFER19 0xf34c
767#define STB0899_BASE_TP_BUFFER19 0x00000040
768#define STB0899_OFF0_TP_BUFFER20 0xf350
769#define STB0899_BASE_TP_BUFFER20 0x00000040
770#define STB0899_OFF0_TP_BUFFER21 0xf354
771#define STB0899_BASE_TP_BUFFER21 0x00000040
772#define STB0899_OFF0_TP_BUFFER22 0xf358
773#define STB0899_BASE_TP_BUFFER22 0x00000040
774#define STB0899_OFF0_TP_BUFFER23 0xf35c
775#define STB0899_BASE_TP_BUFFER23 0x00000040
776#define STB0899_OFF0_TP_BUFFER24 0xf360
777#define STB0899_BASE_TP_BUFFER24 0x00000040
778#define STB0899_OFF0_TP_BUFFER25 0xf364
779#define STB0899_BASE_TP_BUFFER25 0x00000040
780#define STB0899_OFF0_TP_BUFFER26 0xf368
781#define STB0899_BASE_TP_BUFFER26 0x00000040
782#define STB0899_OFF0_TP_BUFFER27 0xf36c
783#define STB0899_BASE_TP_BUFFER27 0x00000040
784#define STB0899_OFF0_TP_BUFFER28 0xf370
785#define STB0899_BASE_TP_BUFFER28 0x00000040
786#define STB0899_OFF0_TP_BUFFER29 0xf374
787#define STB0899_BASE_TP_BUFFER29 0x00000040
788#define STB0899_OFF0_TP_BUFFER30 0xf378
789#define STB0899_BASE_TP_BUFFER30 0x00000040
790#define STB0899_OFF0_TP_BUFFER31 0xf37c
791#define STB0899_BASE_TP_BUFFER31 0x00000040
792#define STB0899_OFF0_TP_BUFFER32 0xf300
793#define STB0899_BASE_TP_BUFFER32 0x00000060
794#define STB0899_OFF0_TP_BUFFER33 0xf304
795#define STB0899_BASE_TP_BUFFER33 0x00000060
796#define STB0899_OFF0_TP_BUFFER34 0xf308
797#define STB0899_BASE_TP_BUFFER34 0x00000060
798#define STB0899_OFF0_TP_BUFFER35 0xf30c
799#define STB0899_BASE_TP_BUFFER35 0x00000060
800#define STB0899_OFF0_TP_BUFFER36 0xf310
801#define STB0899_BASE_TP_BUFFER36 0x00000060
802#define STB0899_OFF0_TP_BUFFER37 0xf314
803#define STB0899_BASE_TP_BUFFER37 0x00000060
804#define STB0899_OFF0_TP_BUFFER38 0xf318
805#define STB0899_BASE_TP_BUFFER38 0x00000060
806#define STB0899_OFF0_TP_BUFFER39 0xf31c
807#define STB0899_BASE_TP_BUFFER39 0x00000060
808#define STB0899_OFF0_TP_BUFFER40 0xf320
809#define STB0899_BASE_TP_BUFFER40 0x00000060
810#define STB0899_OFF0_TP_BUFFER41 0xf324
811#define STB0899_BASE_TP_BUFFER41 0x00000060
812#define STB0899_OFF0_TP_BUFFER42 0xf328
813#define STB0899_BASE_TP_BUFFER42 0x00000060
814#define STB0899_OFF0_TP_BUFFER43 0xf32c
815#define STB0899_BASE_TP_BUFFER43 0x00000060
816#define STB0899_OFF0_TP_BUFFER44 0xf330
817#define STB0899_BASE_TP_BUFFER44 0x00000060
818#define STB0899_OFF0_TP_BUFFER45 0xf334
819#define STB0899_BASE_TP_BUFFER45 0x00000060
820#define STB0899_OFF0_TP_BUFFER46 0xf338
821#define STB0899_BASE_TP_BUFFER46 0x00000060
822#define STB0899_OFF0_TP_BUFFER47 0xf33c
823#define STB0899_BASE_TP_BUFFER47 0x00000060
824#define STB0899_OFF0_TP_BUFFER48 0xf340
825#define STB0899_BASE_TP_BUFFER48 0x00000060
826#define STB0899_OFF0_TP_BUFFER49 0xf344
827#define STB0899_BASE_TP_BUFFER49 0x00000060
828#define STB0899_OFF0_TP_BUFFER50 0xf348
829#define STB0899_BASE_TP_BUFFER50 0x00000060
830#define STB0899_OFF0_TP_BUFFER51 0xf34c
831#define STB0899_BASE_TP_BUFFER51 0x00000060
832#define STB0899_OFF0_TP_BUFFER52 0xf350
833#define STB0899_BASE_TP_BUFFER52 0x00000060
834#define STB0899_OFF0_TP_BUFFER53 0xf354
835#define STB0899_BASE_TP_BUFFER53 0x00000060
836#define STB0899_OFF0_TP_BUFFER54 0xf358
837#define STB0899_BASE_TP_BUFFER54 0x00000060
838#define STB0899_OFF0_TP_BUFFER55 0xf35c
839#define STB0899_BASE_TP_BUFFER55 0x00000060
840#define STB0899_OFF0_TP_BUFFER56 0xf360
841#define STB0899_BASE_TP_BUFFER56 0x00000060
842#define STB0899_OFF0_TP_BUFFER57 0xf364
843#define STB0899_BASE_TP_BUFFER57 0x00000060
844#define STB0899_OFF0_TP_BUFFER58 0xf368
845#define STB0899_BASE_TP_BUFFER58 0x00000060
846#define STB0899_OFF0_TP_BUFFER59 0xf36c
847#define STB0899_BASE_TP_BUFFER59 0x00000060
848#define STB0899_OFF0_TP_BUFFER60 0xf370
849#define STB0899_BASE_TP_BUFFER60 0x00000060
850#define STB0899_OFF0_TP_BUFFER61 0xf374
851#define STB0899_BASE_TP_BUFFER61 0x00000060
852#define STB0899_OFF0_TP_BUFFER62 0xf378
853#define STB0899_BASE_TP_BUFFER62 0x00000060
854#define STB0899_OFF0_TP_BUFFER63 0xf37c
855#define STB0899_BASE_TP_BUFFER63 0x00000060
856
857#define STB0899_OFF0_RESET_CNTRL 0xf300
858#define STB0899_BASE_RESET_CNTRL 0x00000400
859#define STB0899_DVBS2_RESET (0x01 << 0)
860#define STB0899_OFFST_DVBS2_RESET 0
861#define STB0899_WIDTH_DVBS2_RESET 1
862
863#define STB0899_OFF0_ACM_ENABLE 0xf304
864#define STB0899_BASE_ACM_ENABLE 0x00000400
865#define STB0899_ACM_ENABLE 1
866
867#define STB0899_OFF0_DESCR_CNTRL 0xf30c
868#define STB0899_BASE_DESCR_CNTRL 0x00000400
869#define STB0899_OFFST_DESCR_CNTRL 0
870#define STB0899_WIDTH_DESCR_CNTRL 16
871
872#define STB0899_OFF0_UWP_CNTRL1 0xf320
873#define STB0899_BASE_UWP_CNTRL1 0x00000400
874#define STB0899_UWP_TH_SOF (0x7fff << 11)
875#define STB0899_OFFST_UWP_TH_SOF 11
876#define STB0899_WIDTH_UWP_TH_SOF 15
877#define STB0899_UWP_ESN0_QUANT (0xff << 3)
878#define STB0899_OFFST_UWP_ESN0_QUANT 3
879#define STB0899_WIDTH_UWP_ESN0_QUANT 8
880#define STB0899_UWP_ESN0_AVE (0x03 << 1)
881#define STB0899_OFFST_UWP_ESN0_AVE 1
882#define STB0899_WIDTH_UWP_ESN0_AVE 2
883#define STB0899_UWP_START (0x01 << 0)
884#define STB0899_OFFST_UWP_START 0
885#define STB0899_WIDTH_UWP_START 1
886
887#define STB0899_OFF0_UWP_CNTRL2 0xf324
888#define STB0899_BASE_UWP_CNTRL2 0x00000400
889#define STB0899_UWP_MISS_TH (0xff << 16)
890#define STB0899_OFFST_UWP_MISS_TH 16
891#define STB0899_WIDTH_UWP_MISS_TH 8
892#define STB0899_FE_FINE_TRK (0xff << 8)
893#define STB0899_OFFST_FE_FINE_TRK 8
894#define STB0899_WIDTH_FE_FINE_TRK 8
895#define STB0899_FE_COARSE_TRK (0xff << 0)
896#define STB0899_OFFST_FE_COARSE_TRK 0
897#define STB0899_WIDTH_FE_COARSE_TRK 8
898
899#define STB0899_OFF0_UWP_STAT1 0xf328
900#define STB0899_BASE_UWP_STAT1 0x00000400
901#define STB0899_UWP_STATE (0x03ff << 15)
902#define STB0899_OFFST_UWP_STATE 15
903#define STB0899_WIDTH_UWP_STATE 10
904#define STB0899_UW_MAX_PEAK (0x7fff << 0)
905#define STB0899_OFFST_UW_MAX_PEAK 0
906#define STB0899_WIDTH_UW_MAX_PEAK 15
907
908#define STB0899_OFF0_UWP_STAT2 0xf32c
909#define STB0899_BASE_UWP_STAT2 0x00000400
910#define STB0899_ESNO_EST (0x07ffff << 7)
911#define STB0899_OFFST_ESN0_EST 7
912#define STB0899_WIDTH_ESN0_EST 19
913#define STB0899_UWP_DECODE_MOD (0x7f << 0)
914#define STB0899_OFFST_UWP_DECODE_MOD 0
915#define STB0899_WIDTH_UWP_DECODE_MOD 7
916
917#define STB0899_OFF0_DMD_CORE_ID 0xf334
918#define STB0899_BASE_DMD_CORE_ID 0x00000400
919#define STB0899_CORE_ID (0xffffffff << 0)
920#define STB0899_OFFST_CORE_ID 0
921#define STB0899_WIDTH_CORE_ID 32
922
923#define STB0899_OFF0_DMD_VERSION_ID 0xf33c
924#define STB0899_BASE_DMD_VERSION_ID 0x00000400
925#define STB0899_VERSION_ID (0xff << 0)
926#define STB0899_OFFST_VERSION_ID 0
927#define STB0899_WIDTH_VERSION_ID 8
928
929#define STB0899_OFF0_DMD_STAT2 0xf340
930#define STB0899_BASE_DMD_STAT2 0x00000400
931#define STB0899_CSM_LOCK (0x01 << 1)
932#define STB0899_OFFST_CSM_LOCK 1
933#define STB0899_WIDTH_CSM_LOCK 1
934#define STB0899_UWP_LOCK (0x01 << 0)
935#define STB0899_OFFST_UWP_LOCK 0
936#define STB0899_WIDTH_UWP_LOCK 1
937
938#define STB0899_OFF0_FREQ_ADJ_SCALE 0xf344
939#define STB0899_BASE_FREQ_ADJ_SCALE 0x00000400
940#define STB0899_FREQ_ADJ_SCALE (0x0fff << 0)
941#define STB0899_OFFST_FREQ_ADJ_SCALE 0
942#define STB0899_WIDTH_FREQ_ADJ_SCALE 12
943
944#define STB0899_OFF0_UWP_CNTRL3 0xf34c
945#define STB0899_BASE_UWP_CNTRL3 0x00000400
946#define STB0899_UWP_TH_TRACK (0x7fff << 15)
947#define STB0899_OFFST_UWP_TH_TRACK 15
948#define STB0899_WIDTH_UWP_TH_TRACK 15
949#define STB0899_UWP_TH_ACQ (0x7fff << 0)
950#define STB0899_OFFST_UWP_TH_ACQ 0
951#define STB0899_WIDTH_UWP_TH_ACQ 15
952
953#define STB0899_OFF0_SYM_CLK_SEL 0xf350
954#define STB0899_BASE_SYM_CLK_SEL 0x00000400
955#define STB0899_SYM_CLK_SEL (0x03 << 0)
956#define STB0899_OFFST_SYM_CLK_SEL 0
957#define STB0899_WIDTH_SYM_CLK_SEL 2
958
959#define STB0899_OFF0_SOF_SRCH_TO 0xf354
960#define STB0899_BASE_SOF_SRCH_TO 0x00000400
961#define STB0899_SOF_SEARCH_TIMEOUT (0x3fffff << 0)
962#define STB0899_OFFST_SOF_SEARCH_TIMEOUT 0
963#define STB0899_WIDTH_SOF_SEARCH_TIMEOUT 22
964
965#define STB0899_OFF0_ACQ_CNTRL1 0xf358
966#define STB0899_BASE_ACQ_CNTRL1 0x00000400
967#define STB0899_FE_FINE_ACQ (0xff << 8)
968#define STB0899_OFFST_FE_FINE_ACQ 8
969#define STB0899_WIDTH_FE_FINE_ACQ 8
970#define STB0899_FE_COARSE_ACQ (0xff << 0)
971#define STB0899_OFFST_FE_COARSE_ACQ 0
972#define STB0899_WIDTH_FE_COARSE_ACQ 8
973
974#define STB0899_OFF0_ACQ_CNTRL2 0xf35c
975#define STB0899_BASE_ACQ_CNTRL2 0x00000400
976#define STB0899_ZIGZAG (0x01 << 25)
977#define STB0899_OFFST_ZIGZAG 25
978#define STB0899_WIDTH_ZIGZAG 1
979#define STB0899_NUM_STEPS (0xff << 17)
980#define STB0899_OFFST_NUM_STEPS 17
981#define STB0899_WIDTH_NUM_STEPS 8
982#define STB0899_FREQ_STEPSIZE (0x1ffff << 0)
983#define STB0899_OFFST_FREQ_STEPSIZE 0
984#define STB0899_WIDTH_FREQ_STEPSIZE 17
985
986#define STB0899_OFF0_ACQ_CNTRL3 0xf360
987#define STB0899_BASE_ACQ_CNTRL3 0x00000400
988#define STB0899_THRESHOLD_SCL (0x3f << 23)
989#define STB0899_OFFST_THRESHOLD_SCL 23
990#define STB0899_WIDTH_THRESHOLD_SCL 6
991#define STB0899_UWP_TH_SRCH (0x7fff << 8)
992#define STB0899_OFFST_UWP_TH_SRCH 8
993#define STB0899_WIDTH_UWP_TH_SRCH 15
994#define STB0899_AUTO_REACQUIRE (0x01 << 7)
995#define STB0899_OFFST_AUTO_REACQUIRE 7
996#define STB0899_WIDTH_AUTO_REACQUIRE 1
997#define STB0899_TRACK_LOCK_SEL (0x01 << 6)
998#define STB0899_OFFST_TRACK_LOCK_SEL 6
999#define STB0899_WIDTH_TRACK_LOCK_SEL 1
1000#define STB0899_ACQ_SEARCH_MODE (0x03 << 4)
1001#define STB0899_OFFST_ACQ_SEARCH_MODE 4
1002#define STB0899_WIDTH_ACQ_SEARCH_MODE 2
1003#define STB0899_CONFIRM_FRAMES (0x0f << 0)
1004#define STB0899_OFFST_CONFIRM_FRAMES 0
1005#define STB0899_WIDTH_CONFIRM_FRAMES 4
1006
1007#define STB0899_OFF0_FE_SETTLE 0xf364
1008#define STB0899_BASE_FE_SETTLE 0x00000400
1009#define STB0899_SETTLING_TIME (0x3fffff << 0)
1010#define STB0899_OFFST_SETTLING_TIME 0
1011#define STB0899_WIDTH_SETTLING_TIME 22
1012
1013#define STB0899_OFF0_AC_DWELL 0xf368
1014#define STB0899_BASE_AC_DWELL 0x00000400
1015#define STB0899_DWELL_TIME (0x3fffff << 0)
1016#define STB0899_OFFST_DWELL_TIME 0
1017#define STB0899_WIDTH_DWELL_TIME 22
1018
1019#define STB0899_OFF0_ACQUIRE_TRIG 0xf36c
1020#define STB0899_BASE_ACQUIRE_TRIG 0x00000400
1021#define STB0899_ACQUIRE (0x01 << 0)
1022#define STB0899_OFFST_ACQUIRE 0
1023#define STB0899_WIDTH_ACQUIRE 1
1024
1025#define STB0899_OFF0_LOCK_LOST 0xf370
1026#define STB0899_BASE_LOCK_LOST 0x00000400
1027#define STB0899_LOCK_LOST (0x01 << 0)
1028#define STB0899_OFFST_LOCK_LOST 0
1029#define STB0899_WIDTH_LOCK_LOST 1
1030
1031#define STB0899_OFF0_ACQ_STAT1 0xf374
1032#define STB0899_BASE_ACQ_STAT1 0x00000400
1033#define STB0899_STEP_FREQ (0x1fffff << 11)
1034#define STB0899_OFFST_STEP_FREQ 11
1035#define STB0899_WIDTH_STEP_FREQ 21
1036#define STB0899_ACQ_STATE (0x07 << 8)
1037#define STB0899_OFFST_ACQ_STATE 8
1038#define STB0899_WIDTH_ACQ_STATE 3
1039#define STB0899_UW_DETECT_COUNT (0xff << 0)
1040#define STB0899_OFFST_UW_DETECT_COUNT 0
1041#define STB0899_WIDTH_UW_DETECT_COUNT 8
1042
1043#define STB0899_OFF0_ACQ_TIMEOUT 0xf378
1044#define STB0899_BASE_ACQ_TIMEOUT 0x00000400
1045#define STB0899_ACQ_TIMEOUT (0x3fffff << 0)
1046#define STB0899_OFFST_ACQ_TIMEOUT 0
1047#define STB0899_WIDTH_ACQ_TIMEOUT 22
1048
1049#define STB0899_OFF0_ACQ_TIME 0xf37c
1050#define STB0899_BASE_ACQ_TIME 0x00000400
1051#define STB0899_ACQ_TIME_SYM (0xffffff << 0)
1052#define STB0899_OFFST_ACQ_TIME_SYM 0
1053#define STB0899_WIDTH_ACQ_TIME_SYM 24
1054
1055#define STB0899_OFF0_FINAL_AGC_CNTRL 0xf308
1056#define STB0899_BASE_FINAL_AGC_CNTRL 0x00000440
1057#define STB0899_FINAL_GAIN_INIT (0x3fff << 12)
1058#define STB0899_OFFST_FINAL_GAIN_INIT 12
1059#define STB0899_WIDTH_FINAL_GAIN_INIT 14
1060#define STB0899_FINAL_LOOP_GAIN (0x0f << 8)
1061#define STB0899_OFFST_FINAL_LOOP_GAIN 8
1062#define STB0899_WIDTH_FINAL_LOOP_GAIN 4
1063#define STB0899_FINAL_LD_GAIN_INIT (0x01 << 7)
1064#define STB0899_OFFST_FINAL_LD_GAIN_INIT 7
1065#define STB0899_WIDTH_FINAL_LD_GAIN_INIT 1
1066#define STB0899_FINAL_AGC_REF (0x7f << 0)
1067#define STB0899_OFFST_FINAL_AGC_REF 0
1068#define STB0899_WIDTH_FINAL_AGC_REF 7
1069
1070#define STB0899_OFF0_FINAL_AGC_GAIN 0xf30c
1071#define STB0899_BASE_FINAL_AGC_GAIN 0x00000440
1072#define STB0899_FINAL_AGC_GAIN (0x3fff << 0)
1073#define STB0899_OFFST_FINAL_AGC_GAIN 0
1074#define STB0899_WIDTH_FINAL_AGC_GAIN 14
1075
1076#define STB0899_OFF0_EQUALIZER_INIT 0xf310
1077#define STB0899_BASE_EQUALIZER_INIT 0x00000440
1078#define STB0899_EQ_SRST (0x01 << 1)
1079#define STB0899_OFFST_EQ_SRST 1
1080#define STB0899_WIDTH_EQ_SRST 1
1081#define STB0899_EQ_INIT (0x01 << 0)
1082#define STB0899_OFFST_EQ_INIT 0
1083#define STB0899_WIDTH_EQ_INIT 1
1084
1085#define STB0899_OFF0_EQ_CNTRL 0xf314
1086#define STB0899_BASE_EQ_CNTRL 0x00000440
1087#define STB0899_EQ_ADAPT_MODE (0x01 << 18)
1088#define STB0899_OFFST_EQ_ADAPT_MODE 18
1089#define STB0899_WIDTH_EQ_ADAPT_MODE 1
1090#define STB0899_EQ_DELAY (0x0f << 14)
1091#define STB0899_OFFST_EQ_DELAY 14
1092#define STB0899_WIDTH_EQ_DELAY 4
1093#define STB0899_EQ_QUANT_LEVEL (0xff << 6)
1094#define STB0899_OFFST_EQ_QUANT_LEVEL 6
1095#define STB0899_WIDTH_EQ_QUANT_LEVEL 8
1096#define STB0899_EQ_DISABLE_UPDATE (0x01 << 5)
1097#define STB0899_OFFST_EQ_DISABLE_UPDATE 5
1098#define STB0899_WIDTH_EQ_DISABLE_UPDATE 1
1099#define STB0899_EQ_BYPASS (0x01 << 4)
1100#define STB0899_OFFST_EQ_BYPASS 4
1101#define STB0899_WIDTH_EQ_BYPASS 1
1102#define STB0899_EQ_SHIFT (0x0f << 0)
1103#define STB0899_OFFST_EQ_SHIFT 0
1104#define STB0899_WIDTH_EQ_SHIFT 4
1105
1106#define STB0899_OFF0_EQ_I_INIT_COEFF_0 0xf320
1107#define STB0899_OFF1_EQ_I_INIT_COEFF_1 0xf324
1108#define STB0899_OFF2_EQ_I_INIT_COEFF_2 0xf328
1109#define STB0899_OFF3_EQ_I_INIT_COEFF_3 0xf32c
1110#define STB0899_OFF4_EQ_I_INIT_COEFF_4 0xf330
1111#define STB0899_OFF5_EQ_I_INIT_COEFF_5 0xf334
1112#define STB0899_OFF6_EQ_I_INIT_COEFF_6 0xf338
1113#define STB0899_OFF7_EQ_I_INIT_COEFF_7 0xf33c
1114#define STB0899_OFF8_EQ_I_INIT_COEFF_8 0xf340
1115#define STB0899_OFF9_EQ_I_INIT_COEFF_9 0xf344
1116#define STB0899_OFFa_EQ_I_INIT_COEFF_10 0xf348
1117#define STB0899_BASE_EQ_I_INIT_COEFF_N 0x00000440
1118#define STB0899_EQ_I_INIT_COEFF_N (0x0fff << 0)
1119#define STB0899_OFFST_EQ_I_INIT_COEFF_N 0
1120#define STB0899_WIDTH_EQ_I_INIT_COEFF_N 12
1121
1122#define STB0899_OFF0_EQ_Q_INIT_COEFF_0 0xf350
1123#define STB0899_OFF1_EQ_Q_INIT_COEFF_1 0xf354
1124#define STB0899_OFF2_EQ_Q_INIT_COEFF_2 0xf358
1125#define STB0899_OFF3_EQ_Q_INIT_COEFF_3 0xf35c
1126#define STB0899_OFF4_EQ_Q_INIT_COEFF_4 0xf360
1127#define STB0899_OFF5_EQ_Q_INIT_COEFF_5 0xf364
1128#define STB0899_OFF6_EQ_Q_INIT_COEFF_6 0xf368
1129#define STB0899_OFF7_EQ_Q_INIT_COEFF_7 0xf36c
1130#define STB0899_OFF8_EQ_Q_INIT_COEFF_8 0xf370
1131#define STB0899_OFF9_EQ_Q_INIT_COEFF_9 0xf374
1132#define STB0899_OFFa_EQ_Q_INIT_COEFF_10 0xf378
1133#define STB0899_BASE_EQ_Q_INIT_COEFF_N 0x00000440
1134#define STB0899_EQ_Q_INIT_COEFF_N (0x0fff << 0)
1135#define STB0899_OFFST_EQ_Q_INIT_COEFF_N 0
1136#define STB0899_WIDTH_EQ_Q_INIT_COEFF_N 12
1137
1138#define STB0899_OFF0_EQ_I_OUT_COEFF_0 0xf300
1139#define STB0899_OFF1_EQ_I_OUT_COEFF_1 0xf304
1140#define STB0899_OFF2_EQ_I_OUT_COEFF_2 0xf308
1141#define STB0899_OFF3_EQ_I_OUT_COEFF_3 0xf30c
1142#define STB0899_OFF4_EQ_I_OUT_COEFF_4 0xf310
1143#define STB0899_OFF5_EQ_I_OUT_COEFF_5 0xf314
1144#define STB0899_OFF6_EQ_I_OUT_COEFF_6 0xf318
1145#define STB0899_OFF7_EQ_I_OUT_COEFF_7 0xf31c
1146#define STB0899_OFF8_EQ_I_OUT_COEFF_8 0xf320
1147#define STB0899_OFF9_EQ_I_OUT_COEFF_9 0xf324
1148#define STB0899_OFFa_EQ_I_OUT_COEFF_10 0xf328
1149#define STB0899_BASE_EQ_I_OUT_COEFF_N 0x00000460
1150#define STB0899_EQ_I_OUT_COEFF_N (0x0fff << 0)
1151#define STB0899_OFFST_EQ_I_OUT_COEFF_N 0
1152#define STB0899_WIDTH_EQ_I_OUT_COEFF_N 12
1153
1154#define STB0899_OFF0_EQ_Q_OUT_COEFF_0 0xf330
1155#define STB0899_OFF1_EQ_Q_OUT_COEFF_1 0xf334
1156#define STB0899_OFF2_EQ_Q_OUT_COEFF_2 0xf338
1157#define STB0899_OFF3_EQ_Q_OUT_COEFF_3 0xf33c
1158#define STB0899_OFF4_EQ_Q_OUT_COEFF_4 0xf340
1159#define STB0899_OFF5_EQ_Q_OUT_COEFF_5 0xf344
1160#define STB0899_OFF6_EQ_Q_OUT_COEFF_6 0xf348
1161#define STB0899_OFF7_EQ_Q_OUT_COEFF_7 0xf34c
1162#define STB0899_OFF8_EQ_Q_OUT_COEFF_8 0xf350
1163#define STB0899_OFF9_EQ_Q_OUT_COEFF_9 0xf354
1164#define STB0899_OFFa_EQ_Q_OUT_COEFF_10 0xf358
1165#define STB0899_BASE_EQ_Q_OUT_COEFF_N 0x00000460
1166#define STB0899_EQ_Q_OUT_COEFF_N (0x0fff << 0)
1167#define STB0899_OFFST_EQ_Q_OUT_COEFF_N 0
1168#define STB0899_WIDTH_EQ_Q_OUT_COEFF_N 12
1169
1170/* S2 FEC */
1171#define STB0899_OFF0_BLOCK_LNGTH 0xfa04
1172#define STB0899_BASE_BLOCK_LNGTH 0x00000000
1173#define STB0899_BLOCK_LENGTH (0xff << 0)
1174#define STB0899_OFFST_BLOCK_LENGTH 0
1175#define STB0899_WIDTH_BLOCK_LENGTH 8
1176
1177#define STB0899_OFF0_ROW_STR 0xfa08
1178#define STB0899_BASE_ROW_STR 0x00000000
1179#define STB0899_ROW_STRIDE (0xff << 0)
1180#define STB0899_OFFST_ROW_STRIDE 0
1181#define STB0899_WIDTH_ROW_STRIDE 8
1182
1183#define STB0899_OFF0_MAX_ITER 0xfa0c
1184#define STB0899_BASE_MAX_ITER 0x00000000
1185#define STB0899_MAX_ITERATIONS (0xff << 0)
1186#define STB0899_OFFST_MAX_ITERATIONS 0
1187#define STB0899_WIDTH_MAX_ITERATIONS 8
1188
1189#define STB0899_OFF0_BN_END_ADDR 0xfa10
1190#define STB0899_BASE_BN_END_ADDR 0x00000000
1191#define STB0899_BN_END_ADDR (0x0fff << 0)
1192#define STB0899_OFFST_BN_END_ADDR 0
1193#define STB0899_WIDTH_BN_END_ADDR 12
1194
1195#define STB0899_OFF0_CN_END_ADDR 0xfa14
1196#define STB0899_BASE_CN_END_ADDR 0x00000000
1197#define STB0899_CN_END_ADDR (0x0fff << 0)
1198#define STB0899_OFFST_CN_END_ADDR 0
1199#define STB0899_WIDTH_CN_END_ADDR 12
1200
1201#define STB0899_OFF0_INFO_LENGTH 0xfa1c
1202#define STB0899_BASE_INFO_LENGTH 0x00000000
1203#define STB0899_INFO_LENGTH (0xff << 0)
1204#define STB0899_OFFST_INFO_LENGTH 0
1205#define STB0899_WIDTH_INFO_LENGTH 8
1206
1207#define STB0899_OFF0_BOT_ADDR 0xfa20
1208#define STB0899_BASE_BOT_ADDR 0x00000000
1209#define STB0899_BOTTOM_BASE_ADDR (0x03ff << 0)
1210#define STB0899_OFFST_BOTTOM_BASE_ADDR 0
1211#define STB0899_WIDTH_BOTTOM_BASE_ADDR 10
1212
1213#define STB0899_OFF0_BCH_BLK_LN 0xfa24
1214#define STB0899_BASE_BCH_BLK_LN 0x00000000
1215#define STB0899_BCH_BLOCK_LENGTH (0xffff << 0)
1216#define STB0899_OFFST_BCH_BLOCK_LENGTH 0
1217#define STB0899_WIDTH_BCH_BLOCK_LENGTH 16
1218
1219#define STB0899_OFF0_BCH_T 0xfa28
1220#define STB0899_BASE_BCH_T 0x00000000
1221#define STB0899_BCH_T (0x0f << 0)
1222#define STB0899_OFFST_BCH_T 0
1223#define STB0899_WIDTH_BCH_T 4
1224
1225#define STB0899_OFF0_CNFG_MODE 0xfa00
1226#define STB0899_BASE_CNFG_MODE 0x00000800
1227#define STB0899_MODCOD (0x1f << 2)
1228#define STB0899_OFFST_MODCOD 2
1229#define STB0899_WIDTH_MODCOD 5
1230#define STB0899_MODCOD_SEL (0x01 << 1)
1231#define STB0899_OFFST_MODCOD_SEL 1
1232#define STB0899_WIDTH_MODCOD_SEL 1
1233#define STB0899_CONFIG_MODE (0x01 << 0)
1234#define STB0899_OFFST_CONFIG_MODE 0
1235#define STB0899_WIDTH_CONFIG_MODE 1
1236
1237#define STB0899_OFF0_LDPC_STAT 0xfa04
1238#define STB0899_BASE_LDPC_STAT 0x00000800
1239#define STB0899_ITERATION (0xff << 3)
1240#define STB0899_OFFST_ITERATION 3
1241#define STB0899_WIDTH_ITERATION 8
1242#define STB0899_LDPC_DEC_STATE (0x07 << 0)
1243#define STB0899_OFFST_LDPC_DEC_STATE 0
1244#define STB0899_WIDTH_LDPC_DEC_STATE 3
1245
1246#define STB0899_OFF0_ITER_SCALE 0xfa08
1247#define STB0899_BASE_ITER_SCALE 0x00000800
1248#define STB0899_ITERATION_SCALE (0xff << 0)
1249#define STB0899_OFFST_ITERATION_SCALE 0
1250#define STB0899_WIDTH_ITERATION_SCALE 8
1251
1252#define STB0899_OFF0_INPUT_MODE 0xfa0c
1253#define STB0899_BASE_INPUT_MODE 0x00000800
1254#define STB0899_SD_BLOCK1_STREAM0 (0x01 << 0)
1255#define STB0899_OFFST_SD_BLOCK1_STREAM0 0
1256#define STB0899_WIDTH_SD_BLOCK1_STREAM0 1
1257
1258#define STB0899_OFF0_LDPCDECRST 0xfa10
1259#define STB0899_BASE_LDPCDECRST 0x00000800
1260#define STB0899_LDPC_DEC_RST (0x01 << 0)
1261#define STB0899_OFFST_LDPC_DEC_RST 0
1262#define STB0899_WIDTH_LDPC_DEC_RST 1
1263
1264#define STB0899_OFF0_CLK_PER_BYTE_RW 0xfa14
1265#define STB0899_BASE_CLK_PER_BYTE_RW 0x00000800
1266#define STB0899_CLKS_PER_BYTE (0x0f << 0)
1267#define STB0899_OFFST_CLKS_PER_BYTE 0
1268#define STB0899_WIDTH_CLKS_PER_BYTE 5
1269
1270#define STB0899_OFF0_BCH_ERRORS 0xfa18
1271#define STB0899_BASE_BCH_ERRORS 0x00000800
1272#define STB0899_BCH_ERRORS (0x0f << 0)
1273#define STB0899_OFFST_BCH_ERRORS 0
1274#define STB0899_WIDTH_BCH_ERRORS 4
1275
1276#define STB0899_OFF0_LDPC_ERRORS 0xfa1c
1277#define STB0899_BASE_LDPC_ERRORS 0x00000800
1278#define STB0899_LDPC_ERRORS (0xffff << 0)
1279#define STB0899_OFFST_LDPC_ERRORS 0
1280#define STB0899_WIDTH_LDPC_ERRORS 16
1281
1282#define STB0899_OFF0_BCH_MODE 0xfa20
1283#define STB0899_BASE_BCH_MODE 0x00000800
1284#define STB0899_BCH_CORRECT_N (0x01 << 1)
1285#define STB0899_OFFST_BCH_CORRECT_N 1
1286#define STB0899_WIDTH_BCH_CORRECT_N 1
1287#define STB0899_FULL_BYPASS (0x01 << 0)
1288#define STB0899_OFFST_FULL_BYPASS 0
1289#define STB0899_WIDTH_FULL_BYPASS 1
1290
1291#define STB0899_OFF0_ERR_ACC_PER 0xfa24
1292#define STB0899_BASE_ERR_ACC_PER 0x00000800
1293#define STB0899_BCH_ERR_ACC_PERIOD (0x0f << 0)
1294#define STB0899_OFFST_BCH_ERR_ACC_PERIOD 0
1295#define STB0899_WIDTH_BCH_ERR_ACC_PERIOD 4
1296
1297#define STB0899_OFF0_BCH_ERR_ACC 0xfa28
1298#define STB0899_BASE_BCH_ERR_ACC 0x00000800
1299#define STB0899_BCH_ERR_ACCUM (0xff << 0)
1300#define STB0899_OFFST_BCH_ERR_ACCUM 0
1301#define STB0899_WIDTH_BCH_ERR_ACCUM 8
1302
1303#define STB0899_OFF0_FEC_CORE_ID_REG 0xfa2c
1304#define STB0899_BASE_FEC_CORE_ID_REG 0x00000800
1305#define STB0899_FEC_CORE_ID (0xffffffff << 0)
1306#define STB0899_OFFST_FEC_CORE_ID 0
1307#define STB0899_WIDTH_FEC_CORE_ID 32
1308
1309#define STB0899_OFF0_FEC_VER_ID_REG 0xfa34
1310#define STB0899_BASE_FEC_VER_ID_REG 0x00000800
1311#define STB0899_FEC_VER_ID (0xff << 0)
1312#define STB0899_OFFST_FEC_VER_ID 0
1313#define STB0899_WIDTH_FEC_VER_ID 8
1314
1315#define STB0899_OFF0_FEC_TP_SEL 0xfa38
1316#define STB0899_BASE_FEC_TP_SEL 0x00000800
1317
1318#define STB0899_OFF0_CSM_CNTRL1 0xf310
1319#define STB0899_BASE_CSM_CNTRL1 0x00000400
1320#define STB0899_CSM_FORCE_FREQLOCK (0x01 << 19)
1321#define STB0899_OFFST_CSM_FORCE_FREQLOCK 19
1322#define STB0899_WIDTH_CSM_FORCE_FREQLOCK 1
1323#define STB0899_CSM_FREQ_LOCKSTATE (0x01 << 18)
1324#define STB0899_OFFST_CSM_FREQ_LOCKSTATE 18
1325#define STB0899_WIDTH_CSM_FREQ_LOCKSTATE 1
1326#define STB0899_CSM_AUTO_PARAM (0x01 << 17)
1327#define STB0899_OFFST_CSM_AUTO_PARAM 17
1328#define STB0899_WIDTH_CSM_AUTO_PARAM 1
1329#define STB0899_FE_LOOP_SHIFT (0x07 << 14)
1330#define STB0899_OFFST_FE_LOOP_SHIFT 14
1331#define STB0899_WIDTH_FE_LOOP_SHIFT 3
1332#define STB0899_CSM_AGC_SHIFT (0x07 << 11)
1333#define STB0899_OFFST_CSM_AGC_SHIFT 11
1334#define STB0899_WIDTH_CSM_AGC_SHIFT 3
1335#define STB0899_CSM_AGC_GAIN (0x1ff << 2)
1336#define STB0899_OFFST_CSM_AGC_GAIN 2
1337#define STB0899_WIDTH_CSM_AGC_GAIN 9
1338#define STB0899_CSM_TWO_PASS (0x01 << 1)
1339#define STB0899_OFFST_CSM_TWO_PASS 1
1340#define STB0899_WIDTH_CSM_TWO_PASS 1
1341#define STB0899_CSM_DVT_TABLE (0x01 << 0)
1342#define STB0899_OFFST_CSM_DVT_TABLE 0
1343#define STB0899_WIDTH_CSM_DVT_TABLE 1
1344
1345#define STB0899_OFF0_CSM_CNTRL2 0xf314
1346#define STB0899_BASE_CSM_CNTRL2 0x00000400
1347#define STB0899_CSM_GAMMA_RHO_ACQ (0x1ff << 9)
1348#define STB0899_OFFST_CSM_GAMMA_RHOACQ 9
1349#define STB0899_WIDTH_CSM_GAMMA_RHOACQ 9
1350#define STB0899_CSM_GAMMA_ACQ (0x1ff << 0)
1351#define STB0899_OFFST_CSM_GAMMA_ACQ 0
1352#define STB0899_WIDTH_CSM_GAMMA_ACQ 9
1353
1354#define STB0899_OFF0_CSM_CNTRL3 0xf318
1355#define STB0899_BASE_CSM_CNTRL3 0x00000400
1356#define STB0899_CSM_GAMMA_RHO_TRACK (0x1ff << 9)
1357#define STB0899_OFFST_CSM_GAMMA_RHOTRACK 9
1358#define STB0899_WIDTH_CSM_GAMMA_RHOTRACK 9
1359#define STB0899_CSM_GAMMA_TRACK (0x1ff << 0)
1360#define STB0899_OFFST_CSM_GAMMA_TRACK 0
1361#define STB0899_WIDTH_CSM_GAMMA_TRACK 9
1362
1363#define STB0899_OFF0_CSM_CNTRL4 0xf31c
1364#define STB0899_BASE_CSM_CNTRL4 0x00000400
1365#define STB0899_CSM_PHASEDIFF_THRESH (0x0f << 8)
1366#define STB0899_OFFST_CSM_PHASEDIFF_THRESH 8
1367#define STB0899_WIDTH_CSM_PHASEDIFF_THRESH 4
1368#define STB0899_CSM_LOCKCOUNT_THRESH (0xff << 0)
1369#define STB0899_OFFST_CSM_LOCKCOUNT_THRESH 0
1370#define STB0899_WIDTH_CSM_LOCKCOUNT_THRESH 8
1371
1372/* Check on chapter 8 page 42 */
1373#define STB0899_ERRCTRL1 0xf574
1374#define STB0899_ERRCTRL2 0xf575
1375#define STB0899_ERRCTRL3 0xf576
1376#define STB0899_ERR_SRC_S1 (0x1f << 3)
1377#define STB0899_OFFST_ERR_SRC_S1 3
1378#define STB0899_WIDTH_ERR_SRC_S1 5
1379#define STB0899_ERR_SRC_S2 (0x0f << 0)
1380#define STB0899_OFFST_ERR_SRC_S2 0
1381#define STB0899_WIDTH_ERR_SRC_S2 4
1382#define STB0899_NOE (0x07 << 0)
1383#define STB0899_OFFST_NOE 0
1384#define STB0899_WIDTH_NOE 3
1385
1386#define STB0899_ECNT1M 0xf524
1387#define STB0899_ECNT1L 0xf525
1388#define STB0899_ECNT2M 0xf526
1389#define STB0899_ECNT2L 0xf527
1390#define STB0899_ECNT3M 0xf528
1391#define STB0899_ECNT3L 0xf529
1392
1393#define STB0899_DMONMSK1 0xf57b
1394#define STB0899_DMONMSK1_WAIT_1STEP (1 << 7)
1395#define STB0899_DMONMSK1_FREE_14 (1 << 6)
1396#define STB0899_DMONMSK1_AVRGVIT_CALC (1 << 5)
1397#define STB0899_DMONMSK1_FREE_12 (1 << 4)
1398#define STB0899_DMONMSK1_FREE_11 (1 << 3)
1399#define STB0899_DMONMSK1_B0DIV_CALC (1 << 2)
1400#define STB0899_DMONMSK1_KDIVB1_CALC (1 << 1)
1401#define STB0899_DMONMSK1_KDIVB2_CALC (1 << 0)
1402
1403#define STB0899_DMONMSK0 0xf57c
1404#define STB0899_DMONMSK0_SMOTTH_CALC (1 << 7)
1405#define STB0899_DMONMSK0_FREE_6 (1 << 6)
1406#define STB0899_DMONMSK0_SIGPOWER_CALC (1 << 5)
1407#define STB0899_DMONMSK0_QSEUIL_CALC (1 << 4)
1408#define STB0899_DMONMSK0_FREE_3 (1 << 3)
1409#define STB0899_DMONMSK0_FREE_2 (1 << 2)
1410#define STB0899_DMONMSK0_KVDIVB1_CALC (1 << 1)
1411#define STB0899_DMONMSK0_KVDIVB2_CALC (1 << 0)
1412
1413#define STB0899_TSULC 0xf549
1414#define STB0899_ULNOSYNCBYTES (0x01 << 7)
1415#define STB0899_OFFST_ULNOSYNCBYTES 7
1416#define STB0899_WIDTH_ULNOSYNCBYTES 1
1417#define STB0899_ULPARITY_ON (0x01 << 6)
1418#define STB0899_OFFST_ULPARITY_ON 6
1419#define STB0899_WIDTH_ULPARITY_ON 1
1420#define STB0899_ULSYNCOUTRS (0x01 << 5)
1421#define STB0899_OFFST_ULSYNCOUTRS 5
1422#define STB0899_WIDTH_ULSYNCOUTRS 1
1423#define STB0899_ULDSS_PACKETS (0x01 << 0)
1424#define STB0899_OFFST_ULDSS_PACKETS 0
1425#define STB0899_WIDTH_ULDSS_PACKETS 1
1426
1427#define STB0899_TSLPL 0xf54b
1428#define STB0899_LLDVBS2_MODE (0x01 << 4)
1429#define STB0899_OFFST_LLDVBS2_MODE 4
1430#define STB0899_WIDTH_LLDVBS2_MODE 1
1431#define STB0899_LLISSYI_ON (0x01 << 3)
1432#define STB0899_OFFST_LLISSYI_ON 3
1433#define STB0899_WIDTH_LLISSYI_ON 1
1434#define STB0899_LLNPD_ON (0x01 << 2)
1435#define STB0899_OFFST_LLNPD_ON 2
1436#define STB0899_WIDTH_LLNPD_ON 1
1437#define STB0899_LLCRC8_ON (0x01 << 1)
1438#define STB0899_OFFST_LLCRC8_ON 1
1439#define STB0899_WIDTH_LLCRC8_ON 1
1440
1441#define STB0899_TSCFGH 0xf54c
1442#define STB0899_OUTRS_PS (0x01 << 6)
1443#define STB0899_OFFST_OUTRS_PS 6
1444#define STB0899_WIDTH_OUTRS_PS 1
1445#define STB0899_SYNCBYTE (0x01 << 5)
1446#define STB0899_OFFST_SYNCBYTE 5
1447#define STB0899_WIDTH_SYNCBYTE 1
1448#define STB0899_PFBIT (0x01 << 4)
1449#define STB0899_OFFST_PFBIT 4
1450#define STB0899_WIDTH_PFBIT 1
1451#define STB0899_ERR_BIT (0x01 << 3)
1452#define STB0899_OFFST_ERR_BIT 3
1453#define STB0899_WIDTH_ERR_BIT 1
1454#define STB0899_MPEG (0x01 << 2)
1455#define STB0899_OFFST_MPEG 2
1456#define STB0899_WIDTH_MPEG 1
1457#define STB0899_CLK_POL (0x01 << 1)
1458#define STB0899_OFFST_CLK_POL 1
1459#define STB0899_WIDTH_CLK_POL 1
1460#define STB0899_FORCE0 (0x01 << 0)
1461#define STB0899_OFFST_FORCE0 0
1462#define STB0899_WIDTH_FORCE0 1
1463
1464#define STB0899_TSCFGM 0xf54d
1465#define STB0899_LLPRIORITY (0x01 << 3)
1466#define STB0899_OFFST_LLPRIORIY 3
1467#define STB0899_WIDTH_LLPRIORITY 1
1468#define STB0899_EN188 (0x01 << 2)
1469#define STB0899_OFFST_EN188 2
1470#define STB0899_WIDTH_EN188 1
1471
1472#define STB0899_TSCFGL 0xf54e
1473#define STB0899_DEL_ERRPCK (0x01 << 7)
1474#define STB0899_OFFST_DEL_ERRPCK 7
1475#define STB0899_WIDTH_DEL_ERRPCK 1
1476#define STB0899_ERRFLAGSTD (0x01 << 5)
1477#define STB0899_OFFST_ERRFLAGSTD 5
1478#define STB0899_WIDTH_ERRFLAGSTD 1
1479#define STB0899_MPEGERR (0x01 << 4)
1480#define STB0899_OFFST_MPEGERR 4
1481#define STB0899_WIDTH_MPEGERR 1
1482#define STB0899_BCH_CHK (0x01 << 3)
1483#define STB0899_OFFST_BCH_CHK 5
1484#define STB0899_WIDTH_BCH_CHK 1
1485#define STB0899_CRC8CHK (0x01 << 2)
1486#define STB0899_OFFST_CRC8CHK 2
1487#define STB0899_WIDTH_CRC8CHK 1
1488#define STB0899_SPEC_INFO (0x01 << 1)
1489#define STB0899_OFFST_SPEC_INFO 1
1490#define STB0899_WIDTH_SPEC_INFO 1
1491#define STB0899_LOW_PRIO_CLK (0x01 << 0)
1492#define STB0899_OFFST_LOW_PRIO_CLK 0
1493#define STB0899_WIDTH_LOW_PRIO_CLK 1
1494#define STB0899_ERROR_NORM (0x00 << 0)
1495#define STB0899_OFFST_ERROR_NORM 0
1496#define STB0899_WIDTH_ERROR_NORM 0
1497
1498#define STB0899_TSOUT 0xf54f
1499#define STB0899_RSSYNCDEL 0xf550
1500#define STB0899_TSINHDELH 0xf551
1501#define STB0899_TSINHDELM 0xf552
1502#define STB0899_TSINHDELL 0xf553
1503#define STB0899_TSLLSTKM 0xf55a
1504#define STB0899_TSLLSTKL 0xf55b
1505#define STB0899_TSULSTKM 0xf55c
1506#define STB0899_TSULSTKL 0xf55d
1507#define STB0899_TSSTATUS 0xf561
1508
1509#define STB0899_PDELCTRL 0xf600
1510#define STB0899_INVERT_RES (0x01 << 7)
1511#define STB0899_OFFST_INVERT_RES 7
1512#define STB0899_WIDTH_INVERT_RES 1
1513#define STB0899_FORCE_ACCEPTED (0x01 << 6)
1514#define STB0899_OFFST_FORCE_ACCEPTED 6
1515#define STB0899_WIDTH_FORCE_ACCEPTED 1
1516#define STB0899_FILTER_EN (0x01 << 5)
1517#define STB0899_OFFST_FILTER_EN 5
1518#define STB0899_WIDTH_FILTER_EN 1
1519#define STB0899_LOCKFALL_THRESH (0x01 << 4)
1520#define STB0899_OFFST_LOCKFALL_THRESH 4
1521#define STB0899_WIDTH_LOCKFALL_THRESH 1
1522#define STB0899_HYST_EN (0x01 << 3)
1523#define STB0899_OFFST_HYST_EN 3
1524#define STB0899_WIDTH_HYST_EN 1
1525#define STB0899_HYST_SWRST (0x01 << 2)
1526#define STB0899_OFFST_HYST_SWRST 2
1527#define STB0899_WIDTH_HYST_SWRST 1
1528#define STB0899_ALGO_EN (0x01 << 1)
1529#define STB0899_OFFST_ALGO_EN 1
1530#define STB0899_WIDTH_ALGO_EN 1
1531#define STB0899_ALGO_SWRST (0x01 << 0)
1532#define STB0899_OFFST_ALGO_SWRST 0
1533#define STB0899_WIDTH_ALGO_SWRST 1
1534
1535#define STB0899_PDELCTRL2 0xf601
1536#define STB0899_BBHCTRL1 0xf602
1537#define STB0899_BBHCTRL2 0xf603
1538#define STB0899_HYSTTHRESH 0xf604
1539
1540#define STB0899_MATCSTM 0xf605
1541#define STB0899_MATCSTL 0xf606
1542#define STB0899_UPLCSTM 0xf607
1543#define STB0899_UPLCSTL 0xf608
1544#define STB0899_DFLCSTM 0xf609
1545#define STB0899_DFLCSTL 0xf60a
1546#define STB0899_SYNCCST 0xf60b
1547#define STB0899_SYNCDCSTM 0xf60c
1548#define STB0899_SYNCDCSTL 0xf60d
1549#define STB0899_ISI_ENTRY 0xf60e
1550#define STB0899_ISI_BIT_EN 0xf60f
1551#define STB0899_MATSTRM 0xf610
1552#define STB0899_MATSTRL 0xf611
1553#define STB0899_UPLSTRM 0xf612
1554#define STB0899_UPLSTRL 0xf613
1555#define STB0899_DFLSTRM 0xf614
1556#define STB0899_DFLSTRL 0xf615
1557#define STB0899_SYNCSTR 0xf616
1558#define STB0899_SYNCDSTRM 0xf617
1559#define STB0899_SYNCDSTRL 0xf618
1560
1561#define STB0899_CFGPDELSTATUS1 0xf619
1562#define STB0899_BADDFL (0x01 << 6)
1563#define STB0899_OFFST_BADDFL 6
1564#define STB0899_WIDTH_BADDFL 1
1565#define STB0899_CONTINUOUS_STREAM (0x01 << 5)
1566#define STB0899_OFFST_CONTINUOUS_STREAM 5
1567#define STB0899_WIDTH_CONTINUOUS_STREAM 1
1568#define STB0899_ACCEPTED_STREAM (0x01 << 4)
1569#define STB0899_OFFST_ACCEPTED_STREAM 4
1570#define STB0899_WIDTH_ACCEPTED_STREAM 1
1571#define STB0899_BCH_ERRFLAG (0x01 << 3)
1572#define STB0899_OFFST_BCH_ERRFLAG 3
1573#define STB0899_WIDTH_BCH_ERRFLAG 1
1574#define STB0899_CRCRES (0x01 << 2)
1575#define STB0899_OFFST_CRCRES 2
1576#define STB0899_WIDTH_CRCRES 1
1577#define STB0899_CFGPDELSTATUS_LOCK (0x01 << 1)
1578#define STB0899_OFFST_CFGPDELSTATUS_LOCK 1
1579#define STB0899_WIDTH_CFGPDELSTATUS_LOCK 1
1580#define STB0899_1STLOCK (0x01 << 0)
1581#define STB0899_OFFST_1STLOCK 0
1582#define STB0899_WIDTH_1STLOCK 1
1583
1584#define STB0899_CFGPDELSTATUS2 0xf61a
1585#define STB0899_BBFERRORM 0xf61b
1586#define STB0899_BBFERRORL 0xf61c
1587#define STB0899_UPKTERRORM 0xf61d
1588#define STB0899_UPKTERRORL 0xf61e
1589
1590#define STB0899_TSTCK 0xff10
1591
1592#define STB0899_TSTRES 0xff11
1593#define STB0899_FRESLDPC (0x01 << 7)
1594#define STB0899_OFFST_FRESLDPC 7
1595#define STB0899_WIDTH_FRESLDPC 1
1596#define STB0899_FRESRS (0x01 << 6)
1597#define STB0899_OFFST_FRESRS 6
1598#define STB0899_WIDTH_FRESRS 1
1599#define STB0899_FRESVIT (0x01 << 5)
1600#define STB0899_OFFST_FRESVIT 5
1601#define STB0899_WIDTH_FRESVIT 1
1602#define STB0899_FRESMAS1_2 (0x01 << 4)
1603#define STB0899_OFFST_FRESMAS1_2 4
1604#define STB0899_WIDTH_FRESMAS1_2 1
1605#define STB0899_FRESACS (0x01 << 3)
1606#define STB0899_OFFST_FRESACS 3
1607#define STB0899_WIDTH_FRESACS 1
1608#define STB0899_FRESSYM (0x01 << 2)
1609#define STB0899_OFFST_FRESSYM 2
1610#define STB0899_WIDTH_FRESSYM 1
1611#define STB0899_FRESMAS (0x01 << 1)
1612#define STB0899_OFFST_FRESMAS 1
1613#define STB0899_WIDTH_FRESMAS 1
1614#define STB0899_FRESINT (0x01 << 0)
1615#define STB0899_OFFST_FRESINIT 0
1616#define STB0899_WIDTH_FRESINIT 1
1617
1618#define STB0899_TSTOUT 0xff12
1619#define STB0899_EN_SIGNATURE (0x01 << 7)
1620#define STB0899_OFFST_EN_SIGNATURE 7
1621#define STB0899_WIDTH_EN_SIGNATURE 1
1622#define STB0899_BCLK_CLK (0x01 << 6)
1623#define STB0899_OFFST_BCLK_CLK 6
1624#define STB0899_WIDTH_BCLK_CLK 1
1625#define STB0899_SGNL_OUT (0x01 << 5)
1626#define STB0899_OFFST_SGNL_OUT 5
1627#define STB0899_WIDTH_SGNL_OUT 1
1628#define STB0899_TS (0x01 << 4)
1629#define STB0899_OFFST_TS 4
1630#define STB0899_WIDTH_TS 1
1631#define STB0899_CTEST (0x01 << 0)
1632#define STB0899_OFFST_CTEST 0
1633#define STB0899_WIDTH_CTEST 1
1634
1635#define STB0899_TSTIN 0xff13
1636#define STB0899_TEST_IN (0x01 << 7)
1637#define STB0899_OFFST_TEST_IN 7
1638#define STB0899_WIDTH_TEST_IN 1
1639#define STB0899_EN_ADC (0x01 << 6)
1640#define STB0899_OFFST_EN_ADC 6
1641#define STB0899_WIDTH_ENADC 1
1642#define STB0899_SGN_ADC (0x01 << 5)
1643#define STB0899_OFFST_SGN_ADC 5
1644#define STB0899_WIDTH_SGN_ADC 1
1645#define STB0899_BCLK_IN (0x01 << 4)
1646#define STB0899_OFFST_BCLK_IN 4
1647#define STB0899_WIDTH_BCLK_IN 1
1648#define STB0899_JETONIN_MODE (0x01 << 3)
1649#define STB0899_OFFST_JETONIN_MODE 3
1650#define STB0899_WIDTH_JETONIN_MODE 1
1651#define STB0899_BCLK_VALUE (0x01 << 2)
1652#define STB0899_OFFST_BCLK_VALUE 2
1653#define STB0899_WIDTH_BCLK_VALUE 1
1654#define STB0899_SGNRST_T12 (0x01 << 1)
1655#define STB0899_OFFST_SGNRST_T12 1
1656#define STB0899_WIDTH_SGNRST_T12 1
1657#define STB0899_LOWSP_ENAX (0x01 << 0)
1658#define STB0899_OFFST_LOWSP_ENAX 0
1659#define STB0899_WIDTH_LOWSP_ENAX 1
1660
1661#define STB0899_TSTSYS 0xff14
1662#define STB0899_TSTCHIP 0xff15
1663#define STB0899_TSTFREE 0xff16
1664#define STB0899_TSTI2C 0xff17
1665#define STB0899_BITSPEEDM 0xff1c
1666#define STB0899_BITSPEEDL 0xff1d
1667#define STB0899_TBUSBIT 0xff1e
1668#define STB0899_TSTDIS 0xff24
1669#define STB0899_TSTDISRX 0xff25
1670#define STB0899_TSTJETON 0xff28
1671#define STB0899_TSTDCADJ 0xff40
1672#define STB0899_TSTAGC1 0xff41
1673#define STB0899_TSTAGC1N 0xff42
1674#define STB0899_TSTPOLYPH 0xff48
1675#define STB0899_TSTR 0xff49
1676#define STB0899_TSTAGC2 0xff4a
1677#define STB0899_TSTCTL1 0xff4b
1678#define STB0899_TSTCTL2 0xff4c
1679#define STB0899_TSTCTL3 0xff4d
1680#define STB0899_TSTDEMAP 0xff50
1681#define STB0899_TSTDEMAP2 0xff51
1682#define STB0899_TSTDEMMON 0xff52
1683#define STB0899_TSTRATE 0xff53
1684#define STB0899_TSTSELOUT 0xff54
1685#define STB0899_TSYNC 0xff55
1686#define STB0899_TSTERR 0xff56
1687#define STB0899_TSTRAM1 0xff58
1688#define STB0899_TSTVSELOUT 0xff59
1689#define STB0899_TSTFORCEIN 0xff5a
1690#define STB0899_TSTRS1 0xff5c
1691#define STB0899_TSTRS2 0xff5d
1692#define STB0899_TSTRS3 0xff53
1693
1694#define STB0899_INTBUFSTATUS 0xf200
1695#define STB0899_INTBUFCTRL 0xf201
1696#define STB0899_PCKLENUL 0xf55e
1697#define STB0899_PCKLENLL 0xf55f
1698#define STB0899_RSPCKLEN 0xf560
1699
1700/* 2 registers */
1701#define STB0899_SYNCDCST 0xf60c
1702
1703/* DiSEqC */
1704#define STB0899_DISCNTRL1 0xf0a0
1705#define STB0899_TIMOFF (0x01 << 7)
1706#define STB0899_OFFST_TIMOFF 7
1707#define STB0899_WIDTH_TIMOFF 1
1708#define STB0899_DISEQCRESET (0x01 << 6)
1709#define STB0899_OFFST_DISEQCRESET 6
1710#define STB0899_WIDTH_DISEQCRESET 1
1711#define STB0899_TIMCMD (0x03 << 4)
1712#define STB0899_OFFST_TIMCMD 4
1713#define STB0899_WIDTH_TIMCMD 2
1714#define STB0899_DISPRECHARGE (0x01 << 2)
1715#define STB0899_OFFST_DISPRECHARGE 2
1716#define STB0899_WIDTH_DISPRECHARGE 1
1717#define STB0899_DISEQCMODE (0x03 << 0)
1718#define STB0899_OFFST_DISEQCMODE 0
1719#define STB0899_WIDTH_DISEQCMODE 2
1720
1721#define STB0899_DISCNTRL2 0xf0a1
1722#define STB0899_RECEIVER_ON (0x01 << 7)
1723#define STB0899_OFFST_RECEIVER_ON 7
1724#define STB0899_WIDTH_RECEIVER_ON 1
1725#define STB0899_IGNO_SHORT_22K (0x01 << 6)
1726#define STB0899_OFFST_IGNO_SHORT_22K 6
1727#define STB0899_WIDTH_IGNO_SHORT_22K 1
1728#define STB0899_ONECHIP_TRX (0x01 << 5)
1729#define STB0899_OFFST_ONECHIP_TRX 5
1730#define STB0899_WIDTH_ONECHIP_TRX 1
1731#define STB0899_EXT_ENVELOP (0x01 << 4)
1732#define STB0899_OFFST_EXT_ENVELOP 4
1733#define STB0899_WIDTH_EXT_ENVELOP 1
1734#define STB0899_PIN_SELECT (0x03 << 2)
1735#define STB0899_OFFST_PIN_SELCT 2
1736#define STB0899_WIDTH_PIN_SELCT 2
1737#define STB0899_IRQ_RXEND (0x01 << 1)
1738#define STB0899_OFFST_IRQ_RXEND 1
1739#define STB0899_WIDTH_IRQ_RXEND 1
1740#define STB0899_IRQ_4NBYTES (0x01 << 0)
1741#define STB0899_OFFST_IRQ_4NBYTES 0
1742#define STB0899_WIDTH_IRQ_4NBYTES 1
1743
1744#define STB0899_DISRX_ST0 0xf0a4
1745#define STB0899_RXEND (0x01 << 7)
1746#define STB0899_OFFST_RXEND 7
1747#define STB0899_WIDTH_RXEND 1
1748#define STB0899_RXACTIVE (0x01 << 6)
1749#define STB0899_OFFST_RXACTIVE 6
1750#define STB0899_WIDTH_RXACTIVE 1
1751#define STB0899_SHORT22K (0x01 << 5)
1752#define STB0899_OFFST_SHORT22K 5
1753#define STB0899_WIDTH_SHORT22K 1
1754#define STB0899_CONTTONE (0x01 << 4)
1755#define STB0899_OFFST_CONTTONE 4
1756#define STB0899_WIDTH_CONTONE 1
1757#define STB0899_4BFIFOREDY (0x01 << 3)
1758#define STB0899_OFFST_4BFIFOREDY 3
1759#define STB0899_WIDTH_4BFIFOREDY 1
1760#define STB0899_FIFOEMPTY (0x01 << 2)
1761#define STB0899_OFFST_FIFOEMPTY 2
1762#define STB0899_WIDTH_FIFOEMPTY 1
1763#define STB0899_ABORTTRX (0x01 << 0)
1764#define STB0899_OFFST_ABORTTRX 0
1765#define STB0899_WIDTH_ABORTTRX 1
1766
1767#define STB0899_DISRX_ST1 0xf0a5
1768#define STB0899_RXFAIL (0x01 << 7)
1769#define STB0899_OFFST_RXFAIL 7
1770#define STB0899_WIDTH_RXFAIL 1
1771#define STB0899_FIFOPFAIL (0x01 << 6)
1772#define STB0899_OFFST_FIFOPFAIL 6
1773#define STB0899_WIDTH_FIFOPFAIL 1
1774#define STB0899_RXNONBYTES (0x01 << 5)
1775#define STB0899_OFFST_RXNONBYTES 5
1776#define STB0899_WIDTH_RXNONBYTES 1
1777#define STB0899_FIFOOVF (0x01 << 4)
1778#define STB0899_OFFST_FIFOOVF 4
1779#define STB0899_WIDTH_FIFOOVF 1
1780#define STB0899_FIFOBYTENBR (0x0f << 0)
1781#define STB0899_OFFST_FIFOBYTENBR 0
1782#define STB0899_WIDTH_FIFOBYTENBR 4
1783
1784#define STB0899_DISPARITY 0xf0a6
1785
1786#define STB0899_DISFIFO 0xf0a7
1787
1788#define STB0899_DISSTATUS 0xf0a8
1789#define STB0899_FIFOFULL (0x01 << 6)
1790#define STB0899_OFFST_FIFOFULL 6
1791#define STB0899_WIDTH_FIFOFULL 1
1792#define STB0899_TXIDLE (0x01 << 5)
1793#define STB0899_OFFST_TXIDLE 5
1794#define STB0899_WIDTH_TXIDLE 1
1795#define STB0899_GAPBURST (0x01 << 4)
1796#define STB0899_OFFST_GAPBURST 4
1797#define STB0899_WIDTH_GAPBURST 1
1798#define STB0899_TXFIFOBYTES (0x0f << 0)
1799#define STB0899_OFFST_TXFIFOBYTES 0
1800#define STB0899_WIDTH_TXFIFOBYTES 4
1801#define STB0899_DISF22 0xf0a9
1802
1803#define STB0899_DISF22RX 0xf0aa
1804
1805/* General Purpose */
1806#define STB0899_SYSREG 0xf101
1807#define STB0899_ACRPRESC 0xf110
1808#define STB0899_OFFST_RSVD2 7
1809#define STB0899_WIDTH_RSVD2 1
1810#define STB0899_OFFST_ACRPRESC 4
1811#define STB0899_WIDTH_ACRPRESC 3
1812#define STB0899_OFFST_RSVD1 3
1813#define STB0899_WIDTH_RSVD1 1
1814#define STB0899_OFFST_ACRPRESC2 0
1815#define STB0899_WIDTH_ACRPRESC2 3
1816
1817#define STB0899_ACRDIV1 0xf111
1818#define STB0899_ACRDIV2 0xf112
1819#define STB0899_DACR1 0xf113
1820#define STB0899_DACR2 0xf114
1821#define STB0899_OUTCFG 0xf11c
1822#define STB0899_MODECFG 0xf11d
1823#define STB0899_NCOARSE 0xf1b3
1824
1825#define STB0899_SYNTCTRL 0xf1b6
1826#define STB0899_STANDBY (0x01 << 7)
1827#define STB0899_OFFST_STANDBY 7
1828#define STB0899_WIDTH_STANDBY 1
1829#define STB0899_BYPASSPLL (0x01 << 6)
1830#define STB0899_OFFST_BYPASSPLL 6
1831#define STB0899_WIDTH_BYPASSPLL 1
1832#define STB0899_SEL1XRATIO (0x01 << 5)
1833#define STB0899_OFFST_SEL1XRATIO 5
1834#define STB0899_WIDTH_SEL1XRATIO 1
1835#define STB0899_SELOSCI (0x01 << 1)
1836#define STB0899_OFFST_SELOSCI 1
1837#define STB0899_WIDTH_SELOSCI 1
1838
1839#define STB0899_FILTCTRL 0xf1b7
1840#define STB0899_SYSCTRL 0xf1b8
1841
1842#define STB0899_STOPCLK1 0xf1c2
1843#define STB0899_STOP_CKINTBUF108 (0x01 << 7)
1844#define STB0899_OFFST_STOP_CKINTBUF108 7
1845#define STB0899_WIDTH_STOP_CKINTBUF108 1
1846#define STB0899_STOP_CKINTBUF216 (0x01 << 6)
1847#define STB0899_OFFST_STOP_CKINTBUF216 6
1848#define STB0899_WIDTH_STOP_CKINTBUF216 1
1849#define STB0899_STOP_CHK8PSK (0x01 << 5)
1850#define STB0899_OFFST_STOP_CHK8PSK 5
1851#define STB0899_WIDTH_STOP_CHK8PSK 1
1852#define STB0899_STOP_CKFEC108 (0x01 << 4)
1853#define STB0899_OFFST_STOP_CKFEC108 4
1854#define STB0899_WIDTH_STOP_CKFEC108 1
1855#define STB0899_STOP_CKFEC216 (0x01 << 3)
1856#define STB0899_OFFST_STOP_CKFEC216 3
1857#define STB0899_WIDTH_STOP_CKFEC216 1
1858#define STB0899_STOP_CKCORE216 (0x01 << 2)
1859#define STB0899_OFFST_STOP_CKCORE216 2
1860#define STB0899_WIDTH_STOP_CKCORE216 1
1861#define STB0899_STOP_CKADCI108 (0x01 << 1)
1862#define STB0899_OFFST_STOP_CKADCI108 1
1863#define STB0899_WIDTH_STOP_CKADCI108 1
1864#define STB0899_STOP_INVCKADCI108 (0x01 << 0)
1865#define STB0899_OFFST_STOP_INVCKADCI108 0
1866#define STB0899_WIDTH_STOP_INVCKADCI108 1
1867
1868#define STB0899_STOPCLK2 0xf1c3
1869#define STB0899_STOP_CKS2DMD108 (0x01 << 2)
1870#define STB0899_OFFST_STOP_CKS2DMD108 2
1871#define STB0899_WIDTH_STOP_CKS2DMD108 1
1872#define STB0899_STOP_CKPKDLIN108 (0x01 << 1)
1873#define STB0899_OFFST_STOP_CKPKDLIN108 1
1874#define STB0899_WIDTH_STOP_CKPKDLIN108 1
1875#define STB0899_STOP_CKPKDLIN216 (0x01 << 0)
1876#define STB0899_OFFST_STOP_CKPKDLIN216 0
1877#define STB0899_WIDTH_STOP_CKPKDLIN216 1
1878
1879#define STB0899_TSTTNR1 0xf1e0
1880#define STB0899_BYPASS_ADC (0x01 << 7)
1881#define STB0899_OFFST_BYPASS_ADC 7
1882#define STB0899_WIDTH_BYPASS_ADC 1
1883#define STB0899_INVADCICKOUT (0x01 << 6)
1884#define STB0899_OFFST_INVADCICKOUT 6
1885#define STB0899_WIDTH_INVADCICKOUT 1
1886#define STB0899_ADCTEST_VOLTAGE (0x03 << 4)
1887#define STB0899_OFFST_ADCTEST_VOLTAGE 4
1888#define STB0899_WIDTH_ADCTEST_VOLTAGE 1
1889#define STB0899_ADC_RESET (0x01 << 3)
1890#define STB0899_OFFST_ADC_RESET 3
1891#define STB0899_WIDTH_ADC_RESET 1
1892#define STB0899_TSTTNR1_2 (0x01 << 2)
1893#define STB0899_OFFST_TSTTNR1_2 2
1894#define STB0899_WIDTH_TSTTNR1_2 1
1895#define STB0899_ADCPON (0x01 << 1)
1896#define STB0899_OFFST_ADCPON 1
1897#define STB0899_WIDTH_ADCPON 1
1898#define STB0899_ADCIN_MODE (0x01 << 0)
1899#define STB0899_OFFST_ADCIN_MODE 0
1900#define STB0899_WIDTH_ADCIN_MODE 1
1901
1902#define STB0899_TSTTNR2 0xf1e1
1903#define STB0899_TSTTNR2_7 (0x01 << 7)
1904#define STB0899_OFFST_TSTTNR2_7 7
1905#define STB0899_WIDTH_TSTTNR2_7 1
1906#define STB0899_NOT_DISRX_WIRED (0x01 << 6)
1907#define STB0899_OFFST_NOT_DISRX_WIRED 6
1908#define STB0899_WIDTH_NOT_DISRX_WIRED 1
1909#define STB0899_DISEQC_DCURRENT (0x01 << 5)
1910#define STB0899_OFFST_DISEQC_DCURRENT 5
1911#define STB0899_WIDTH_DISEQC_DCURRENT 1
1912#define STB0899_DISEQC_ZCURRENT (0x01 << 4)
1913#define STB0899_OFFST_DISEQC_ZCURRENT 4
1914#define STB0899_WIDTH_DISEQC_ZCURRENT 1
1915#define STB0899_DISEQC_SINC_SOURCE (0x03 << 2)
1916#define STB0899_OFFST_DISEQC_SINC_SOURCE 2
1917#define STB0899_WIDTH_DISEQC_SINC_SOURCE 2
1918#define STB0899_SELIQSRC (0x03 << 0)
1919#define STB0899_OFFST_SELIQSRC 0
1920#define STB0899_WIDTH_SELIQSRC 2
1921
1922#define STB0899_TSTTNR3 0xf1e2
1923
1924#define STB0899_I2CCFG 0xf129
1925#define STB0899_I2CCFGRSVD (0x0f << 4)
1926#define STB0899_OFFST_I2CCFGRSVD 4
1927#define STB0899_WIDTH_I2CCFGRSVD 4
1928#define STB0899_I2CFASTMODE (0x01 << 3)
1929#define STB0899_OFFST_I2CFASTMODE 3
1930#define STB0899_WIDTH_I2CFASTMODE 1
1931#define STB0899_STATUSWR (0x01 << 2)
1932#define STB0899_OFFST_STATUSWR 2
1933#define STB0899_WIDTH_STATUSWR 1
1934#define STB0899_I2CADDRINC (0x03 << 0)
1935#define STB0899_OFFST_I2CADDRINC 0
1936#define STB0899_WIDTH_I2CADDRINC 2
1937
1938#define STB0899_I2CRPT 0xf12a
1939#define STB0899_I2CTON (0x01 << 7)
1940#define STB0899_OFFST_I2CTON 7
1941#define STB0899_WIDTH_I2CTON 1
1942#define STB0899_ENARPTLEVEL (0x01 << 6)
1943#define STB0899_OFFST_ENARPTLEVEL 6
1944#define STB0899_WIDTH_ENARPTLEVEL 2
1945#define STB0899_SCLTDELAY (0x01 << 3)
1946#define STB0899_OFFST_SCLTDELAY 3
1947#define STB0899_WIDTH_SCLTDELAY 1
1948#define STB0899_STOPENA (0x01 << 2)
1949#define STB0899_OFFST_STOPENA 2
1950#define STB0899_WIDTH_STOPENA 1
1951#define STB0899_STOPSDAT2SDA (0x01 << 1)
1952#define STB0899_OFFST_STOPSDAT2SDA 1
1953#define STB0899_WIDTH_STOPSDAT2SDA 1
1954
1955#define STB0899_IOPVALUE8 0xf136
1956#define STB0899_IOPVALUE7 0xf137
1957#define STB0899_IOPVALUE6 0xf138
1958#define STB0899_IOPVALUE5 0xf139
1959#define STB0899_IOPVALUE4 0xf13a
1960#define STB0899_IOPVALUE3 0xf13b
1961#define STB0899_IOPVALUE2 0xf13c
1962#define STB0899_IOPVALUE1 0xf13d
1963#define STB0899_IOPVALUE0 0xf13e
1964
1965#define STB0899_GPIO00CFG 0xf140
1966
1967#define STB0899_GPIO01CFG 0xf141
1968#define STB0899_GPIO02CFG 0xf142
1969#define STB0899_GPIO03CFG 0xf143
1970#define STB0899_GPIO04CFG 0xf144
1971#define STB0899_GPIO05CFG 0xf145
1972#define STB0899_GPIO06CFG 0xf146
1973#define STB0899_GPIO07CFG 0xf147
1974#define STB0899_GPIO08CFG 0xf148
1975#define STB0899_GPIO09CFG 0xf149
1976#define STB0899_GPIO10CFG 0xf14a
1977#define STB0899_GPIO11CFG 0xf14b
1978#define STB0899_GPIO12CFG 0xf14c
1979#define STB0899_GPIO13CFG 0xf14d
1980#define STB0899_GPIO14CFG 0xf14e
1981#define STB0899_GPIO15CFG 0xf14f
1982#define STB0899_GPIO16CFG 0xf150
1983#define STB0899_GPIO17CFG 0xf151
1984#define STB0899_GPIO18CFG 0xf152
1985#define STB0899_GPIO19CFG 0xf153
1986#define STB0899_GPIO20CFG 0xf154
1987
1988#define STB0899_SDATCFG 0xf155
1989#define STB0899_SCLTCFG 0xf156
1990#define STB0899_AGCRFCFG 0xf157
1991#define STB0899_GPIO22 0xf158 /* AGCBB2CFG */
1992#define STB0899_GPIO21 0xf159 /* AGCBB1CFG */
1993#define STB0899_DIRCLKCFG 0xf15a
1994#define STB0899_CLKOUT27CFG 0xf15b
1995#define STB0899_STDBYCFG 0xf15c
1996#define STB0899_CS0CFG 0xf15d
1997#define STB0899_CS1CFG 0xf15e
1998#define STB0899_DISEQCOCFG 0xf15f
1999
2000#define STB0899_GPIO32CFG 0xf160
2001#define STB0899_GPIO33CFG 0xf161
2002#define STB0899_GPIO34CFG 0xf162
2003#define STB0899_GPIO35CFG 0xf163
2004#define STB0899_GPIO36CFG 0xf164
2005#define STB0899_GPIO37CFG 0xf165
2006#define STB0899_GPIO38CFG 0xf166
2007#define STB0899_GPIO39CFG 0xf167
2008
2009#define STB0899_IRQSTATUS_3 0xf120
2010#define STB0899_IRQSTATUS_2 0xf121
2011#define STB0899_IRQSTATUS_1 0xf122
2012#define STB0899_IRQSTATUS_0 0xf123
2013
2014#define STB0899_IRQMSK_3 0xf124
2015#define STB0899_IRQMSK_2 0xf125
2016#define STB0899_IRQMSK_1 0xf126
2017#define STB0899_IRQMSK_0 0xf127
2018
2019#define STB0899_IRQCFG 0xf128
2020
2021#define STB0899_GHOSTREG 0xf000
2022
2023#define STB0899_S2DEMOD 0xf3fc
2024#define STB0899_S2FEC 0xfafc
2025
2026
2027#endif
diff --git a/drivers/media/dvb/frontends/stb6100.c b/drivers/media/dvb/frontends/stb6100.c
new file mode 100644
index 000000000000..ff39275ab49c
--- /dev/null
+++ b/drivers/media/dvb/frontends/stb6100.c
@@ -0,0 +1,545 @@
1/*
2 STB6100 Silicon Tuner
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/string.h>
26
27#include "dvb_frontend.h"
28#include "stb6100.h"
29
30static unsigned int verbose;
31module_param(verbose, int, 0644);
32
33
34#define FE_ERROR 0
35#define FE_NOTICE 1
36#define FE_INFO 2
37#define FE_DEBUG 3
38
39#define dprintk(x, y, z, format, arg...) do { \
40 if (z) { \
41 if ((x > FE_ERROR) && (x > y)) \
42 printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
43 else if ((x > FE_NOTICE) && (x > y)) \
44 printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
45 else if ((x > FE_INFO) && (x > y)) \
46 printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
47 else if ((x > FE_DEBUG) && (x > y)) \
48 printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
49 } else { \
50 if (x > y) \
51 printk(format, ##arg); \
52 } \
53} while(0)
54
55struct stb6100_lkup {
56 u32 val_low;
57 u32 val_high;
58 u8 reg;
59};
60
61static int stb6100_release(struct dvb_frontend *fe);
62
63static const struct stb6100_lkup lkup[] = {
64 { 0, 950000, 0x0a },
65 { 950000, 1000000, 0x0a },
66 { 1000000, 1075000, 0x0c },
67 { 1075000, 1200000, 0x00 },
68 { 1200000, 1300000, 0x01 },
69 { 1300000, 1370000, 0x02 },
70 { 1370000, 1470000, 0x04 },
71 { 1470000, 1530000, 0x05 },
72 { 1530000, 1650000, 0x06 },
73 { 1650000, 1800000, 0x08 },
74 { 1800000, 1950000, 0x0a },
75 { 1950000, 2150000, 0x0c },
76 { 2150000, 9999999, 0x0c },
77 { 0, 0, 0x00 }
78};
79
80/* Register names for easy debugging. */
81static const char *stb6100_regnames[] = {
82 [STB6100_LD] = "LD",
83 [STB6100_VCO] = "VCO",
84 [STB6100_NI] = "NI",
85 [STB6100_NF_LSB] = "NF",
86 [STB6100_K] = "K",
87 [STB6100_G] = "G",
88 [STB6100_F] = "F",
89 [STB6100_DLB] = "DLB",
90 [STB6100_TEST1] = "TEST1",
91 [STB6100_FCCK] = "FCCK",
92 [STB6100_LPEN] = "LPEN",
93 [STB6100_TEST3] = "TEST3",
94};
95
96/* Template for normalisation, i.e. setting unused or undocumented
97 * bits as required according to the documentation.
98 */
99struct stb6100_regmask {
100 u8 mask;
101 u8 set;
102};
103
104static const struct stb6100_regmask stb6100_template[] = {
105 [STB6100_LD] = { 0xff, 0x00 },
106 [STB6100_VCO] = { 0xff, 0x00 },
107 [STB6100_NI] = { 0xff, 0x00 },
108 [STB6100_NF_LSB] = { 0xff, 0x00 },
109 [STB6100_K] = { 0xc7, 0x38 },
110 [STB6100_G] = { 0xef, 0x10 },
111 [STB6100_F] = { 0x1f, 0xc0 },
112 [STB6100_DLB] = { 0x38, 0xc4 },
113 [STB6100_TEST1] = { 0x00, 0x8f },
114 [STB6100_FCCK] = { 0x40, 0x0d },
115 [STB6100_LPEN] = { 0xf0, 0x0b },
116 [STB6100_TEST3] = { 0x00, 0xde },
117};
118
119static void stb6100_normalise_regs(u8 regs[])
120{
121 int i;
122
123 for (i = 0; i < STB6100_NUMREGS; i++)
124 regs[i] = (regs[i] & stb6100_template[i].mask) | stb6100_template[i].set;
125}
126
127static int stb6100_read_regs(struct stb6100_state *state, u8 regs[])
128{
129 int rc;
130 struct i2c_msg msg = {
131 .addr = state->config->tuner_address,
132 .flags = I2C_M_RD,
133 .buf = regs,
134 .len = STB6100_NUMREGS
135 };
136
137 rc = i2c_transfer(state->i2c, &msg, 1);
138 if (unlikely(rc != 1)) {
139 dprintk(verbose, FE_ERROR, 1, "Read (0x%x) err, rc=[%d]",
140 state->config->tuner_address, rc);
141
142 return -EREMOTEIO;
143 }
144 if (unlikely(verbose > FE_DEBUG)) {
145 int i;
146
147 dprintk(verbose, FE_DEBUG, 1, " Read from 0x%02x", state->config->tuner_address);
148 for (i = 0; i < STB6100_NUMREGS; i++)
149 dprintk(verbose, FE_DEBUG, 1, " %s: 0x%02x", stb6100_regnames[i], regs[i]);
150 }
151 return 0;
152}
153
154static int stb6100_read_reg(struct stb6100_state *state, u8 reg)
155{
156 u8 regs[STB6100_NUMREGS];
157 int rc;
158
159 if (unlikely(reg >= STB6100_NUMREGS)) {
160 dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg);
161 return -EINVAL;
162 }
163 if ((rc = stb6100_read_regs(state, regs)) < 0)
164 return rc;
165 return (unsigned int)regs[reg];
166}
167
168static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int start, int len)
169{
170 int rc;
171 u8 cmdbuf[len + 1];
172 struct i2c_msg msg = {
173 .addr = state->config->tuner_address,
174 .flags = 0,
175 .buf = cmdbuf,
176 .len = len + 1
177 };
178
179 if (unlikely(start < 1 || start + len > STB6100_NUMREGS)) {
180 dprintk(verbose, FE_ERROR, 1, "Invalid register range %d:%d",
181 start, len);
182 return -EINVAL;
183 }
184 memcpy(&cmdbuf[1], buf, len);
185 cmdbuf[0] = start;
186
187 if (unlikely(verbose > FE_DEBUG)) {
188 int i;
189
190 dprintk(verbose, FE_DEBUG, 1, " Write @ 0x%02x: [%d:%d]", state->config->tuner_address, start, len);
191 for (i = 0; i < len; i++)
192 dprintk(verbose, FE_DEBUG, 1, " %s: 0x%02x", stb6100_regnames[start + i], buf[i]);
193 }
194 rc = i2c_transfer(state->i2c, &msg, 1);
195 if (unlikely(rc != 1)) {
196 dprintk(verbose, FE_ERROR, 1, "(0x%x) write err [%d:%d], rc=[%d]",
197 (unsigned int)state->config->tuner_address, start, len, rc);
198 return -EREMOTEIO;
199 }
200 return 0;
201}
202
203static int stb6100_write_reg(struct stb6100_state *state, u8 reg, u8 data)
204{
205 if (unlikely(reg >= STB6100_NUMREGS)) {
206 dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg);
207 return -EREMOTEIO;
208 }
209 data = (data & stb6100_template[reg].mask) | stb6100_template[reg].set;
210 return stb6100_write_reg_range(state, &data, reg, 1);
211}
212
213static int stb6100_write_regs(struct stb6100_state *state, u8 regs[])
214{
215 stb6100_normalise_regs(regs);
216 return stb6100_write_reg_range(state, &regs[1], 1, STB6100_NUMREGS - 1);
217}
218
219static int stb6100_get_status(struct dvb_frontend *fe, u32 *status)
220{
221 int rc;
222 struct stb6100_state *state = fe->tuner_priv;
223
224 if ((rc = stb6100_read_reg(state, STB6100_LD)) < 0)
225 return rc;
226
227 return (rc & STB6100_LD_LOCK) ? TUNER_STATUS_LOCKED : 0;
228}
229
230static int stb6100_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
231{
232 int rc;
233 u8 f;
234 struct stb6100_state *state = fe->tuner_priv;
235
236 if ((rc = stb6100_read_reg(state, STB6100_F)) < 0)
237 return rc;
238 f = rc & STB6100_F_F;
239
240 state->status.bandwidth = (f + 5) * 2000; /* x2 for ZIF */
241
242 *bandwidth = state->bandwidth = state->status.bandwidth * 1000;
243 dprintk(verbose, FE_DEBUG, 1, "bandwidth = %u Hz", state->bandwidth);
244 return 0;
245}
246
247static int stb6100_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth)
248{
249 u32 tmp;
250 int rc;
251 struct stb6100_state *state = fe->tuner_priv;
252
253 dprintk(verbose, FE_DEBUG, 1, "set bandwidth to %u Hz", bandwidth);
254
255 bandwidth /= 2; /* ZIF */
256
257 if (bandwidth >= 36000000) /* F[4:0] BW/2 max =31+5=36 mhz for F=31 */
258 tmp = 31;
259 else if (bandwidth <= 5000000) /* bw/2 min = 5Mhz for F=0 */
260 tmp = 0;
261 else /* if 5 < bw/2 < 36 */
262 tmp = (bandwidth + 500000) / 1000000 - 5;
263
264 /* Turn on LPF bandwidth setting clock control,
265 * set bandwidth, wait 10ms, turn off.
266 */
267 if ((rc = stb6100_write_reg(state, STB6100_FCCK, 0x0d | STB6100_FCCK_FCCK)) < 0)
268 return rc;
269 if ((rc = stb6100_write_reg(state, STB6100_F, 0xc0 | tmp)) < 0)
270 return rc;
271 msleep(1);
272 if ((rc = stb6100_write_reg(state, STB6100_FCCK, 0x0d)) < 0)
273 return rc;
274
275 return 0;
276}
277
278static int stb6100_get_frequency(struct dvb_frontend *fe, u32 *frequency)
279{
280 int rc;
281 u32 nint, nfrac, fvco;
282 int psd2, odiv;
283 struct stb6100_state *state = fe->tuner_priv;
284 u8 regs[STB6100_NUMREGS];
285
286 if ((rc = stb6100_read_regs(state, regs)) < 0)
287 return rc;
288
289 odiv = (regs[STB6100_VCO] & STB6100_VCO_ODIV) >> STB6100_VCO_ODIV_SHIFT;
290 psd2 = (regs[STB6100_K] & STB6100_K_PSD2) >> STB6100_K_PSD2_SHIFT;
291 nint = regs[STB6100_NI];
292 nfrac = ((regs[STB6100_K] & STB6100_K_NF_MSB) << 8) | regs[STB6100_NF_LSB];
293 fvco = (nfrac * state->reference >> (9 - psd2)) + (nint * state->reference << psd2);
294 *frequency = state->frequency = fvco >> (odiv + 1);
295
296 dprintk(verbose, FE_DEBUG, 1,
297 "frequency = %u kHz, odiv = %u, psd2 = %u, fxtal = %u kHz, fvco = %u kHz, N(I) = %u, N(F) = %u",
298 state->frequency, odiv, psd2, state->reference, fvco, nint, nfrac);
299 return 0;
300}
301
302
303static int stb6100_set_frequency(struct dvb_frontend *fe, u32 frequency)
304{
305 int rc;
306 const struct stb6100_lkup *ptr;
307 struct stb6100_state *state = fe->tuner_priv;
308 struct dvb_frontend_parameters p;
309
310 u32 srate = 0, fvco, nint, nfrac;
311 u8 regs[STB6100_NUMREGS];
312 u8 g, psd2, odiv;
313
314 if ((rc = stb6100_read_regs(state, regs)) < 0)
315 return rc;
316
317 if (fe->ops.get_frontend) {
318 dprintk(verbose, FE_DEBUG, 1, "Get frontend parameters");
319 fe->ops.get_frontend(fe, &p);
320 }
321 srate = p.u.qpsk.symbol_rate;
322
323 regs[STB6100_DLB] = 0xdc;
324 /* Disable LPEN */
325 regs[STB6100_LPEN] &= ~STB6100_LPEN_LPEN; /* PLL Loop disabled */
326
327 if ((rc = stb6100_write_regs(state, regs)) < 0)
328 return rc;
329
330 /* Baseband gain. */
331 if (srate >= 15000000)
332 g = 9; // +4 dB
333 else if (srate >= 5000000)
334 g = 11; // +8 dB
335 else
336 g = 14; // +14 dB
337
338 regs[STB6100_G] = (regs[STB6100_G] & ~STB6100_G_G) | g;
339 regs[STB6100_G] &= ~STB6100_G_GCT; /* mask GCT */
340 regs[STB6100_G] |= (1 << 5); /* 2Vp-p Mode */
341
342 /* VCO divide ratio (LO divide ratio, VCO prescaler enable). */
343 if (frequency <= 1075000)
344 odiv = 1;
345 else
346 odiv = 0;
347 regs[STB6100_VCO] = (regs[STB6100_VCO] & ~STB6100_VCO_ODIV) | (odiv << STB6100_VCO_ODIV_SHIFT);
348
349 if ((frequency > 1075000) && (frequency <= 1325000))
350 psd2 = 0;
351 else
352 psd2 = 1;
353 regs[STB6100_K] = (regs[STB6100_K] & ~STB6100_K_PSD2) | (psd2 << STB6100_K_PSD2_SHIFT);
354
355 /* OSM */
356 for (ptr = lkup;
357 (ptr->val_high != 0) && !CHKRANGE(frequency, ptr->val_low, ptr->val_high);
358 ptr++);
359 if (ptr->val_high == 0) {
360 printk(KERN_ERR "%s: frequency out of range: %u kHz\n", __func__, frequency);
361 return -EINVAL;
362 }
363 regs[STB6100_VCO] = (regs[STB6100_VCO] & ~STB6100_VCO_OSM) | ptr->reg;
364
365 /* F(VCO) = F(LO) * (ODIV == 0 ? 2 : 4) */
366 fvco = frequency << (1 + odiv);
367 /* N(I) = floor(f(VCO) / (f(XTAL) * (PSD2 ? 2 : 1))) */
368 nint = fvco / (state->reference << psd2);
369 /* N(F) = round(f(VCO) / f(XTAL) * (PSD2 ? 2 : 1) - N(I)) * 2 ^ 9 */
370 nfrac = (((fvco - (nint * state->reference << psd2)) << (9 - psd2)) + state->reference / 2) / state->reference;
371 dprintk(verbose, FE_DEBUG, 1,
372 "frequency = %u, srate = %u, g = %u, odiv = %u, psd2 = %u, fxtal = %u, osm = %u, fvco = %u, N(I) = %u, N(F) = %u",
373 frequency, srate, (unsigned int)g, (unsigned int)odiv,
374 (unsigned int)psd2, state->reference,
375 ptr->reg, fvco, nint, nfrac);
376 regs[STB6100_NI] = nint;
377 regs[STB6100_NF_LSB] = nfrac;
378 regs[STB6100_K] = (regs[STB6100_K] & ~STB6100_K_NF_MSB) | ((nfrac >> 8) & STB6100_K_NF_MSB);
379 regs[STB6100_VCO] |= STB6100_VCO_OSCH; /* VCO search enabled */
380 regs[STB6100_VCO] |= STB6100_VCO_OCK; /* VCO search clock off */
381 regs[STB6100_FCCK] |= STB6100_FCCK_FCCK; /* LPF BW setting clock enabled */
382 regs[STB6100_LPEN] &= ~STB6100_LPEN_LPEN; /* PLL loop disabled */
383 /* Power up. */
384 regs[STB6100_LPEN] |= STB6100_LPEN_SYNP | STB6100_LPEN_OSCP | STB6100_LPEN_BEN;
385
386 msleep(2);
387 if ((rc = stb6100_write_regs(state, regs)) < 0)
388 return rc;
389
390 msleep(2);
391 regs[STB6100_LPEN] |= STB6100_LPEN_LPEN; /* PLL loop enabled */
392 if ((rc = stb6100_write_reg(state, STB6100_LPEN, regs[STB6100_LPEN])) < 0)
393 return rc;
394
395 regs[STB6100_VCO] &= ~STB6100_VCO_OCK; /* VCO fast search */
396 if ((rc = stb6100_write_reg(state, STB6100_VCO, regs[STB6100_VCO])) < 0)
397 return rc;
398
399 msleep(10); /* wait for LO to lock */
400 regs[STB6100_VCO] &= ~STB6100_VCO_OSCH; /* vco search disabled */
401 regs[STB6100_VCO] |= STB6100_VCO_OCK; /* search clock off */
402 if ((rc = stb6100_write_reg(state, STB6100_VCO, regs[STB6100_VCO])) < 0)
403 return rc;
404 regs[STB6100_FCCK] &= ~STB6100_FCCK_FCCK; /* LPF BW clock disabled */
405 stb6100_normalise_regs(regs);
406 if ((rc = stb6100_write_reg_range(state, &regs[1], 1, STB6100_NUMREGS - 3)) < 0)
407 return rc;
408
409 msleep(100);
410
411 return 0;
412}
413
414static int stb6100_sleep(struct dvb_frontend *fe)
415{
416 /* TODO: power down */
417 return 0;
418}
419
420static int stb6100_init(struct dvb_frontend *fe)
421{
422 struct stb6100_state *state = fe->tuner_priv;
423 struct tuner_state *status = &state->status;
424
425 status->tunerstep = 125000;
426 status->ifreq = 0;
427 status->refclock = 27000000; /* Hz */
428 status->iqsense = 1;
429 status->bandwidth = 36000; /* kHz */
430 state->bandwidth = status->bandwidth * 1000; /* MHz */
431 state->reference = status->refclock / 1000; /* kHz */
432
433 /* Set default bandwidth. */
434 return stb6100_set_bandwidth(fe, status->bandwidth);
435}
436
437static int stb6100_get_state(struct dvb_frontend *fe,
438 enum tuner_param param,
439 struct tuner_state *state)
440{
441 switch (param) {
442 case DVBFE_TUNER_FREQUENCY:
443 stb6100_get_frequency(fe, &state->frequency);
444 break;
445 case DVBFE_TUNER_TUNERSTEP:
446 break;
447 case DVBFE_TUNER_IFFREQ:
448 break;
449 case DVBFE_TUNER_BANDWIDTH:
450 stb6100_get_bandwidth(fe, &state->bandwidth);
451 break;
452 case DVBFE_TUNER_REFCLOCK:
453 break;
454 default:
455 break;
456 }
457
458 return 0;
459}
460
461static int stb6100_set_state(struct dvb_frontend *fe,
462 enum tuner_param param,
463 struct tuner_state *state)
464{
465 struct stb6100_state *tstate = fe->tuner_priv;
466
467 switch (param) {
468 case DVBFE_TUNER_FREQUENCY:
469 stb6100_set_frequency(fe, state->frequency);
470 tstate->frequency = state->frequency;
471 break;
472 case DVBFE_TUNER_TUNERSTEP:
473 break;
474 case DVBFE_TUNER_IFFREQ:
475 break;
476 case DVBFE_TUNER_BANDWIDTH:
477 stb6100_set_bandwidth(fe, state->bandwidth);
478 tstate->bandwidth = state->bandwidth;
479 break;
480 case DVBFE_TUNER_REFCLOCK:
481 break;
482 default:
483 break;
484 }
485
486 return 0;
487}
488
489static struct dvb_tuner_ops stb6100_ops = {
490 .info = {
491 .name = "STB6100 Silicon Tuner",
492 .frequency_min = 950000,
493 .frequency_max = 2150000,
494 .frequency_step = 0,
495 },
496
497 .init = stb6100_init,
498 .sleep = stb6100_sleep,
499 .get_status = stb6100_get_status,
500 .get_state = stb6100_get_state,
501 .set_state = stb6100_set_state,
502 .release = stb6100_release
503};
504
505struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
506 struct stb6100_config *config,
507 struct i2c_adapter *i2c)
508{
509 struct stb6100_state *state = NULL;
510
511 state = kzalloc(sizeof (struct stb6100_state), GFP_KERNEL);
512 if (state == NULL)
513 goto error;
514
515 state->config = config;
516 state->i2c = i2c;
517 state->frontend = fe;
518 state->reference = config->refclock / 1000; /* kHz */
519 fe->tuner_priv = state;
520 fe->ops.tuner_ops = stb6100_ops;
521
522 printk("%s: Attaching STB6100 \n", __func__);
523 return fe;
524
525error:
526 kfree(state);
527 return NULL;
528}
529
530static int stb6100_release(struct dvb_frontend *fe)
531{
532 struct stb6100_state *state = fe->tuner_priv;
533
534 fe->tuner_priv = NULL;
535 kfree(state);
536
537 return 0;
538}
539
540EXPORT_SYMBOL(stb6100_attach);
541MODULE_PARM_DESC(verbose, "Set Verbosity level");
542
543MODULE_AUTHOR("Manu Abraham");
544MODULE_DESCRIPTION("STB6100 Silicon tuner");
545MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/stb6100.h b/drivers/media/dvb/frontends/stb6100.h
new file mode 100644
index 000000000000..395d056599a6
--- /dev/null
+++ b/drivers/media/dvb/frontends/stb6100.h
@@ -0,0 +1,115 @@
1/*
2 STB6100 Silicon Tuner
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#ifndef __STB_6100_REG_H
23#define __STB_6100_REG_H
24
25#include <linux/dvb/frontend.h>
26#include "dvb_frontend.h"
27
28#define STB6100_LD 0x00
29#define STB6100_LD_LOCK (1 << 0)
30
31#define STB6100_VCO 0x01
32#define STB6100_VCO_OSCH (0x01 << 7)
33#define STB6100_VCO_OSCH_SHIFT 7
34#define STB6100_VCO_OCK (0x03 << 5)
35#define STB6100_VCO_OCK_SHIFT 5
36#define STB6100_VCO_ODIV (0x01 << 4)
37#define STB6100_VCO_ODIV_SHIFT 4
38#define STB6100_VCO_OSM (0x0f << 0)
39
40#define STB6100_NI 0x02
41#define STB6100_NF_LSB 0x03
42
43#define STB6100_K 0x04
44#define STB6100_K_PSD2 (0x01 << 2)
45#define STB6100_K_PSD2_SHIFT 2
46#define STB6100_K_NF_MSB (0x03 << 0)
47
48#define STB6100_G 0x05
49#define STB6100_G_G (0x0f << 0)
50#define STB6100_G_GCT (0x07 << 5)
51
52#define STB6100_F 0x06
53#define STB6100_F_F (0x1f << 0)
54
55#define STB6100_DLB 0x07
56
57#define STB6100_TEST1 0x08
58
59#define STB6100_FCCK 0x09
60#define STB6100_FCCK_FCCK (0x01 << 6)
61
62#define STB6100_LPEN 0x0a
63#define STB6100_LPEN_LPEN (0x01 << 4)
64#define STB6100_LPEN_SYNP (0x01 << 5)
65#define STB6100_LPEN_OSCP (0x01 << 6)
66#define STB6100_LPEN_BEN (0x01 << 7)
67
68#define STB6100_TEST3 0x0b
69
70#define STB6100_NUMREGS 0x0c
71
72
73#define INRANGE(val, x, y) (((x <= val) && (val <= y)) || \
74 ((y <= val) && (val <= x)) ? 1 : 0)
75
76#define CHKRANGE(val, x, y) (((val >= x) && (val < y)) ? 1 : 0)
77
78struct stb6100_config {
79 u8 tuner_address;
80 u32 refclock;
81};
82
83struct stb6100_state {
84 struct i2c_adapter *i2c;
85
86 const struct stb6100_config *config;
87 struct dvb_tuner_ops ops;
88 struct dvb_frontend *frontend;
89 struct tuner_state status;
90
91 u32 frequency;
92 u32 srate;
93 u32 bandwidth;
94 u32 reference;
95};
96
97#if defined(CONFIG_DVB_STB6100) || (defined(CONFIG_DVB_STB6100_MODULE) && defined(MODULE))
98
99extern struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
100 struct stb6100_config *config,
101 struct i2c_adapter *i2c);
102
103#else
104
105static inline struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
106 struct stb6100_config *config,
107 struct i2c_adapter *i2c)
108{
109 printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
110 return NULL;
111}
112
113#endif //CONFIG_DVB_STB6100
114
115#endif
diff --git a/drivers/media/dvb/frontends/stb6100_cfg.h b/drivers/media/dvb/frontends/stb6100_cfg.h
new file mode 100644
index 000000000000..d3133405dc03
--- /dev/null
+++ b/drivers/media/dvb/frontends/stb6100_cfg.h
@@ -0,0 +1,108 @@
1/*
2 STB6100 Silicon Tuner
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22static int stb6100_get_frequency(struct dvb_frontend *fe, u32 *frequency)
23{
24 struct dvb_frontend_ops *frontend_ops = NULL;
25 struct dvb_tuner_ops *tuner_ops = NULL;
26 struct tuner_state t_state;
27 int err = 0;
28
29 if (&fe->ops)
30 frontend_ops = &fe->ops;
31 if (&frontend_ops->tuner_ops)
32 tuner_ops = &frontend_ops->tuner_ops;
33 if (tuner_ops->get_state) {
34 if ((err = tuner_ops->get_state(fe, DVBFE_TUNER_FREQUENCY, &t_state)) < 0) {
35 printk("%s: Invalid parameter\n", __func__);
36 return err;
37 }
38 *frequency = t_state.frequency;
39 printk("%s: Frequency=%d\n", __func__, t_state.frequency);
40 }
41 return 0;
42}
43
44static int stb6100_set_frequency(struct dvb_frontend *fe, u32 frequency)
45{
46 struct dvb_frontend_ops *frontend_ops = NULL;
47 struct dvb_tuner_ops *tuner_ops = NULL;
48 struct tuner_state t_state;
49 int err = 0;
50
51 t_state.frequency = frequency;
52 if (&fe->ops)
53 frontend_ops = &fe->ops;
54 if (&frontend_ops->tuner_ops)
55 tuner_ops = &frontend_ops->tuner_ops;
56 if (tuner_ops->set_state) {
57 if ((err = tuner_ops->set_state(fe, DVBFE_TUNER_FREQUENCY, &t_state)) < 0) {
58 printk("%s: Invalid parameter\n", __func__);
59 return err;
60 }
61 }
62 printk("%s: Frequency=%d\n", __func__, t_state.frequency);
63 return 0;
64}
65
66static int stb6100_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
67{
68 struct dvb_frontend_ops *frontend_ops = &fe->ops;
69 struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops;
70 struct tuner_state t_state;
71 int err = 0;
72
73 if (&fe->ops)
74 frontend_ops = &fe->ops;
75 if (&frontend_ops->tuner_ops)
76 tuner_ops = &frontend_ops->tuner_ops;
77 if (tuner_ops->get_state) {
78 if ((err = tuner_ops->get_state(fe, DVBFE_TUNER_BANDWIDTH, &t_state)) < 0) {
79 printk("%s: Invalid parameter\n", __func__);
80 return err;
81 }
82 *bandwidth = t_state.bandwidth;
83 }
84 printk("%s: Bandwidth=%d\n", __func__, t_state.bandwidth);
85 return 0;
86}
87
88static int stb6100_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth)
89{
90 struct dvb_frontend_ops *frontend_ops = NULL;
91 struct dvb_tuner_ops *tuner_ops = NULL;
92 struct tuner_state t_state;
93 int err = 0;
94
95 t_state.bandwidth = bandwidth;
96 if (&fe->ops)
97 frontend_ops = &fe->ops;
98 if (&frontend_ops->tuner_ops)
99 tuner_ops = &frontend_ops->tuner_ops;
100 if (tuner_ops->set_state) {
101 if ((err = tuner_ops->set_state(fe, DVBFE_TUNER_BANDWIDTH, &t_state)) < 0) {
102 printk("%s: Invalid parameter\n", __func__);
103 return err;
104 }
105 }
106 printk("%s: Bandwidth=%d\n", __func__, t_state.bandwidth);
107 return 0;
108}
diff --git a/drivers/media/dvb/frontends/tda8261.c b/drivers/media/dvb/frontends/tda8261.c
new file mode 100644
index 000000000000..b6d177799104
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda8261.c
@@ -0,0 +1,230 @@
1/*
2 TDA8261 8PSK/QPSK tuner driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/
19
20
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24
25#include "dvb_frontend.h"
26#include "tda8261.h"
27
28struct tda8261_state {
29 struct dvb_frontend *fe;
30 struct i2c_adapter *i2c;
31 const struct tda8261_config *config;
32
33 /* state cache */
34 u32 frequency;
35 u32 bandwidth;
36};
37
38static int tda8261_read(struct tda8261_state *state, u8 *buf)
39{
40 const struct tda8261_config *config = state->config;
41 int err = 0;
42 struct i2c_msg msg = { .addr = config->addr, .flags = I2C_M_RD,.buf = buf, .len = 2 };
43
44 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1)
45 printk("%s: read error, err=%d\n", __func__, err);
46
47 return err;
48}
49
50static int tda8261_write(struct tda8261_state *state, u8 *buf)
51{
52 const struct tda8261_config *config = state->config;
53 int err = 0;
54 struct i2c_msg msg = { .addr = config->addr, .flags = 0, .buf = buf, .len = 4 };
55
56 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1)
57 printk("%s: write error, err=%d\n", __func__, err);
58
59 return err;
60}
61
62static int tda8261_get_status(struct dvb_frontend *fe, u32 *status)
63{
64 struct tda8261_state *state = fe->tuner_priv;
65 u8 result = 0;
66 int err = 0;
67
68 *status = 0;
69
70 if ((err = tda8261_read(state, &result)) < 0) {
71 printk("%s: I/O Error\n", __func__);
72 return err;
73 }
74 if ((result >> 6) & 0x01) {
75 printk("%s: Tuner Phase Locked\n", __func__);
76 *status = 1;
77 }
78
79 return err;
80}
81
82static const u32 div_tab[] = { 2000, 1000, 500, 250, 125 }; /* kHz */
83static const u8 ref_div[] = { 0x00, 0x01, 0x02, 0x05, 0x07 };
84
85static int tda8261_get_state(struct dvb_frontend *fe,
86 enum tuner_param param,
87 struct tuner_state *tstate)
88{
89 struct tda8261_state *state = fe->tuner_priv;
90 int err = 0;
91
92 switch (param) {
93 case DVBFE_TUNER_FREQUENCY:
94 tstate->frequency = state->frequency;
95 break;
96 case DVBFE_TUNER_BANDWIDTH:
97 tstate->bandwidth = 40000000; /* FIXME! need to calculate Bandwidth */
98 break;
99 default:
100 printk("%s: Unknown parameter (param=%d)\n", __func__, param);
101 err = -EINVAL;
102 break;
103 }
104
105 return err;
106}
107
108static int tda8261_set_state(struct dvb_frontend *fe,
109 enum tuner_param param,
110 struct tuner_state *tstate)
111{
112 struct tda8261_state *state = fe->tuner_priv;
113 const struct tda8261_config *config = state->config;
114 u32 frequency, N, status = 0;
115 u8 buf[4];
116 int err = 0;
117
118 if (param & DVBFE_TUNER_FREQUENCY) {
119 /**
120 * N = Max VCO Frequency / Channel Spacing
121 * Max VCO Frequency = VCO frequency + (channel spacing - 1)
122 * (to account for half channel spacing on either side)
123 */
124 frequency = tstate->frequency;
125 if ((frequency < 950000) || (frequency > 2150000)) {
126 printk("%s: Frequency beyond limits, frequency=%d\n", __func__, frequency);
127 return -EINVAL;
128 }
129 N = (frequency + (div_tab[config->step_size] - 1)) / div_tab[config->step_size];
130 printk("%s: Step size=%d, Divider=%d, PG=0x%02x (%d)\n",
131 __func__, config->step_size, div_tab[config->step_size], N, N);
132
133 buf[0] = (N >> 8) & 0xff;
134 buf[1] = N & 0xff;
135 buf[2] = (0x01 << 7) | ((ref_div[config->step_size] & 0x07) << 1);
136
137 if (frequency < 1450000)
138 buf[3] = 0x00;
139 if (frequency < 2000000)
140 buf[3] = 0x40;
141 if (frequency < 2150000)
142 buf[3] = 0x80;
143
144 /* Set params */
145 if ((err = tda8261_write(state, buf)) < 0) {
146 printk("%s: I/O Error\n", __func__);
147 return err;
148 }
149 /* sleep for some time */
150 printk("%s: Waiting to Phase LOCK\n", __func__);
151 msleep(20);
152 /* check status */
153 if ((err = tda8261_get_status(fe, &status)) < 0) {
154 printk("%s: I/O Error\n", __func__);
155 return err;
156 }
157 if (status == 1) {
158 printk("%s: Tuner Phase locked: status=%d\n", __func__, status);
159 state->frequency = frequency; /* cache successful state */
160 } else {
161 printk("%s: No Phase lock: status=%d\n", __func__, status);
162 }
163 } else {
164 printk("%s: Unknown parameter (param=%d)\n", __func__, param);
165 return -EINVAL;
166 }
167
168 return 0;
169}
170
171static int tda8261_release(struct dvb_frontend *fe)
172{
173 struct tda8261_state *state = fe->tuner_priv;
174
175 fe->tuner_priv = NULL;
176 kfree(state);
177 return 0;
178}
179
180static struct dvb_tuner_ops tda8261_ops = {
181
182 .info = {
183 .name = "TDA8261",
184// .tuner_name = NULL,
185 .frequency_min = 950000,
186 .frequency_max = 2150000,
187 .frequency_step = 0
188 },
189
190 .set_state = tda8261_set_state,
191 .get_state = tda8261_get_state,
192 .get_status = tda8261_get_status,
193 .release = tda8261_release
194};
195
196struct dvb_frontend *tda8261_attach(struct dvb_frontend *fe,
197 const struct tda8261_config *config,
198 struct i2c_adapter *i2c)
199{
200 struct tda8261_state *state = NULL;
201
202 if ((state = kzalloc(sizeof (struct tda8261_state), GFP_KERNEL)) == NULL)
203 goto exit;
204
205 state->config = config;
206 state->i2c = i2c;
207 state->fe = fe;
208 fe->tuner_priv = state;
209 fe->ops.tuner_ops = tda8261_ops;
210
211 fe->ops.tuner_ops.info.frequency_step = div_tab[config->step_size];
212// fe->ops.tuner_ops.tuner_name = &config->buf;
213
214// printk("%s: Attaching %s TDA8261 8PSK/QPSK tuner\n",
215// __func__, fe->ops.tuner_ops.tuner_name);
216 printk("%s: Attaching TDA8261 8PSK/QPSK tuner\n", __func__);
217
218 return fe;
219
220exit:
221 kfree(state);
222 return NULL;
223}
224
225EXPORT_SYMBOL(tda8261_attach);
226MODULE_PARM_DESC(verbose, "Set verbosity level");
227
228MODULE_AUTHOR("Manu Abraham");
229MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner");
230MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/tda8261.h b/drivers/media/dvb/frontends/tda8261.h
new file mode 100644
index 000000000000..006e45351b94
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda8261.h
@@ -0,0 +1,55 @@
1/*
2 TDA8261 8PSK/QPSK tuner driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/
19
20#ifndef __TDA8261_H
21#define __TDA8261_H
22
23enum tda8261_step {
24 TDA8261_STEP_2000 = 0, /* 2000 kHz */
25 TDA8261_STEP_1000, /* 1000 kHz */
26 TDA8261_STEP_500, /* 500 kHz */
27 TDA8261_STEP_250, /* 250 kHz */
28 TDA8261_STEP_125 /* 125 kHz */
29};
30
31struct tda8261_config {
32// u8 buf[16];
33 u8 addr;
34 enum tda8261_step step_size;
35};
36
37#if defined(CONFIG_DVB_TDA8261) || (defined(CONFIG_DVB_TDA8261_MODULE) && defined(MODULE))
38
39extern struct dvb_frontend *tda8261_attach(struct dvb_frontend *fe,
40 const struct tda8261_config *config,
41 struct i2c_adapter *i2c);
42
43#else
44
45static inline struct dvb_frontend *tda8261_attach(struct dvb_frontend *fe,
46 const struct tda8261_config *config,
47 struct i2c_adapter *i2c)
48{
49 printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
50 return NULL;
51}
52
53#endif //CONFIG_DVB_TDA8261
54
55#endif// __TDA8261_H
diff --git a/drivers/media/dvb/frontends/tda8261_cfg.h b/drivers/media/dvb/frontends/tda8261_cfg.h
new file mode 100644
index 000000000000..1af1ee49b542
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda8261_cfg.h
@@ -0,0 +1,84 @@
1/*
2 TDA8261 8PSK/QPSK tuner driver
3 Copyright (C) Manu Abraham (abraham.manu@gmail.com)
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/
19
20static int tda8261_get_frequency(struct dvb_frontend *fe, u32 *frequency)
21{
22 struct dvb_frontend_ops *frontend_ops = NULL;
23 struct dvb_tuner_ops *tuner_ops = NULL;
24 struct tuner_state t_state;
25 int err = 0;
26
27 if (&fe->ops)
28 frontend_ops = &fe->ops;
29 if (&frontend_ops->tuner_ops)
30 tuner_ops = &frontend_ops->tuner_ops;
31 if (tuner_ops->get_state) {
32 if ((err = tuner_ops->get_state(fe, DVBFE_TUNER_FREQUENCY, &t_state)) < 0) {
33 printk("%s: Invalid parameter\n", __func__);
34 return err;
35 }
36 *frequency = t_state.frequency;
37 printk("%s: Frequency=%d\n", __func__, t_state.frequency);
38 }
39 return 0;
40}
41
42static int tda8261_set_frequency(struct dvb_frontend *fe, u32 frequency)
43{
44 struct dvb_frontend_ops *frontend_ops = NULL;
45 struct dvb_tuner_ops *tuner_ops = NULL;
46 struct tuner_state t_state;
47 int err = 0;
48
49 t_state.frequency = frequency;
50 if (&fe->ops)
51 frontend_ops = &fe->ops;
52 if (&frontend_ops->tuner_ops)
53 tuner_ops = &frontend_ops->tuner_ops;
54 if (tuner_ops->set_state) {
55 if ((err = tuner_ops->set_state(fe, DVBFE_TUNER_FREQUENCY, &t_state)) < 0) {
56 printk("%s: Invalid parameter\n", __func__);
57 return err;
58 }
59 }
60 printk("%s: Frequency=%d\n", __func__, t_state.frequency);
61 return 0;
62}
63
64static int tda8261_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
65{
66 struct dvb_frontend_ops *frontend_ops = &fe->ops;
67 struct dvb_tuner_ops *tuner_ops = &frontend_ops->tuner_ops;
68 struct tuner_state t_state;
69 int err = 0;
70
71 if (&fe->ops)
72 frontend_ops = &fe->ops;
73 if (&frontend_ops->tuner_ops)
74 tuner_ops = &frontend_ops->tuner_ops;
75 if (tuner_ops->get_state) {
76 if ((err = tuner_ops->get_state(fe, DVBFE_TUNER_BANDWIDTH, &t_state)) < 0) {
77 printk("%s: Invalid parameter\n", __func__);
78 return err;
79 }
80 *bandwidth = t_state.bandwidth;
81 }
82 printk("%s: Bandwidth=%d\n", __func__, t_state.bandwidth);
83 return 0;
84}
diff --git a/drivers/media/dvb/frontends/zl10353.c b/drivers/media/dvb/frontends/zl10353.c
index 36a5a1c101d5..5506f80e180e 100644
--- a/drivers/media/dvb/frontends/zl10353.c
+++ b/drivers/media/dvb/frontends/zl10353.c
@@ -220,15 +220,18 @@ static int zl10353_set_parameters(struct dvb_frontend *fe,
220 /* These are extrapolated from the 7 and 8MHz values */ 220 /* These are extrapolated from the 7 and 8MHz values */
221 zl10353_single_write(fe, MCLK_RATIO, 0x97); 221 zl10353_single_write(fe, MCLK_RATIO, 0x97);
222 zl10353_single_write(fe, 0x64, 0x34); 222 zl10353_single_write(fe, 0x64, 0x34);
223 zl10353_single_write(fe, 0xcc, 0xdd);
223 break; 224 break;
224 case BANDWIDTH_7_MHZ: 225 case BANDWIDTH_7_MHZ:
225 zl10353_single_write(fe, MCLK_RATIO, 0x86); 226 zl10353_single_write(fe, MCLK_RATIO, 0x86);
226 zl10353_single_write(fe, 0x64, 0x35); 227 zl10353_single_write(fe, 0x64, 0x35);
228 zl10353_single_write(fe, 0xcc, 0x73);
227 break; 229 break;
228 case BANDWIDTH_8_MHZ: 230 case BANDWIDTH_8_MHZ:
229 default: 231 default:
230 zl10353_single_write(fe, MCLK_RATIO, 0x75); 232 zl10353_single_write(fe, MCLK_RATIO, 0x75);
231 zl10353_single_write(fe, 0x64, 0x36); 233 zl10353_single_write(fe, 0x64, 0x36);
234 zl10353_single_write(fe, 0xcc, 0x73);
232 } 235 }
233 236
234 zl10353_calc_nominal_rate(fe, op->bandwidth, &nominal_rate); 237 zl10353_calc_nominal_rate(fe, op->bandwidth, &nominal_rate);
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c
index e98d6caf2c23..fd62e0b85621 100644
--- a/drivers/media/dvb/siano/sms-cards.c
+++ b/drivers/media/dvb/siano/sms-cards.c
@@ -38,6 +38,16 @@ struct usb_device_id smsusb_id_table[] = {
38 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_OKEMO_A }, 38 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_OKEMO_A },
39 { USB_DEVICE(0x2040, 0x1801), 39 { USB_DEVICE(0x2040, 0x1801),
40 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B }, 40 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B },
41 { USB_DEVICE(0x2040, 0x2000),
42 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD },
43 { USB_DEVICE(0x2040, 0x2009),
44 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2 },
45 { USB_DEVICE(0x2040, 0x200a),
46 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD },
47 { USB_DEVICE(0x2040, 0x2010),
48 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD },
49 { USB_DEVICE(0x2040, 0x2019),
50 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD },
41 { USB_DEVICE(0x2040, 0x5500), 51 { USB_DEVICE(0x2040, 0x5500),
42 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, 52 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
43 { USB_DEVICE(0x2040, 0x5510), 53 { USB_DEVICE(0x2040, 0x5510),
@@ -96,6 +106,21 @@ static struct sms_board sms_boards[] = {
96 .name = "Hauppauge WinTV MiniStick", 106 .name = "Hauppauge WinTV MiniStick",
97 .type = SMS_NOVA_B0, 107 .type = SMS_NOVA_B0,
98 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw", 108 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw",
109 .led_power = 26,
110 .led_lo = 27,
111 .led_hi = 28,
112 },
113 [SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD] = {
114 .name = "Hauppauge WinTV MiniCard",
115 .type = SMS_NOVA_B0,
116 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw",
117 .lna_ctrl = 29,
118 },
119 [SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2] = {
120 .name = "Hauppauge WinTV MiniCard",
121 .type = SMS_NOVA_B0,
122 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw",
123 .lna_ctrl = 1,
99 }, 124 },
100}; 125};
101 126
@@ -106,3 +131,88 @@ struct sms_board *sms_get_board(int id)
106 return &sms_boards[id]; 131 return &sms_boards[id];
107} 132}
108 133
134static int sms_set_gpio(struct smscore_device_t *coredev, u32 pin, int enable)
135{
136 int ret;
137 struct smscore_gpio_config gpioconfig = {
138 .direction = SMS_GPIO_DIRECTION_OUTPUT,
139 .pullupdown = SMS_GPIO_PULLUPDOWN_NONE,
140 .inputcharacteristics = SMS_GPIO_INPUTCHARACTERISTICS_NORMAL,
141 .outputslewrate = SMS_GPIO_OUTPUTSLEWRATE_FAST,
142 .outputdriving = SMS_GPIO_OUTPUTDRIVING_4mA,
143 };
144
145 if (pin == 0)
146 return -EINVAL;
147
148 ret = smscore_configure_gpio(coredev, pin, &gpioconfig);
149
150 if (ret < 0)
151 return ret;
152
153 return smscore_set_gpio(coredev, pin, enable);
154}
155
156int sms_board_setup(struct smscore_device_t *coredev)
157{
158 int board_id = smscore_get_board_id(coredev);
159 struct sms_board *board = sms_get_board(board_id);
160
161 switch (board_id) {
162 case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
163 /* turn off all LEDs */
164 sms_set_gpio(coredev, board->led_power, 0);
165 sms_set_gpio(coredev, board->led_hi, 0);
166 sms_set_gpio(coredev, board->led_lo, 0);
167 break;
168 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
169 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
170 /* turn off LNA */
171 sms_set_gpio(coredev, board->lna_ctrl, 0);
172 break;
173 }
174 return 0;
175}
176
177int sms_board_power(struct smscore_device_t *coredev, int onoff)
178{
179 int board_id = smscore_get_board_id(coredev);
180 struct sms_board *board = sms_get_board(board_id);
181
182 switch (board_id) {
183 case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
184 /* power LED */
185 sms_set_gpio(coredev,
186 board->led_power, onoff ? 1 : 0);
187 break;
188 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
189 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
190 /* LNA */
191 sms_set_gpio(coredev,
192 board->lna_ctrl, onoff ? 1 : 0);
193 break;
194 }
195 return 0;
196}
197
198int sms_board_led_feedback(struct smscore_device_t *coredev, int led)
199{
200 int board_id = smscore_get_board_id(coredev);
201 struct sms_board *board = sms_get_board(board_id);
202
203 /* dont touch GPIO if LEDs are already set */
204 if (smscore_led_state(coredev, -1) == led)
205 return 0;
206
207 switch (board_id) {
208 case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
209 sms_set_gpio(coredev,
210 board->led_lo, (led & SMS_LED_LO) ? 1 : 0);
211 sms_set_gpio(coredev,
212 board->led_hi, (led & SMS_LED_HI) ? 1 : 0);
213
214 smscore_led_state(coredev, led);
215 break;
216 }
217 return 0;
218}
diff --git a/drivers/media/dvb/siano/sms-cards.h b/drivers/media/dvb/siano/sms-cards.h
index c8f3da6f9bc1..8e0fe9fd2610 100644
--- a/drivers/media/dvb/siano/sms-cards.h
+++ b/drivers/media/dvb/siano/sms-cards.h
@@ -32,14 +32,27 @@
32#define SMS1XXX_BOARD_HAUPPAUGE_OKEMO_A 6 32#define SMS1XXX_BOARD_HAUPPAUGE_OKEMO_A 6
33#define SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B 7 33#define SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B 7
34#define SMS1XXX_BOARD_HAUPPAUGE_WINDHAM 8 34#define SMS1XXX_BOARD_HAUPPAUGE_WINDHAM 8
35#define SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD 9
36#define SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2 10
35 37
36struct sms_board { 38struct sms_board {
37 enum sms_device_type_st type; 39 enum sms_device_type_st type;
38 char *name, *fw[DEVICE_MODE_MAX]; 40 char *name, *fw[DEVICE_MODE_MAX];
41
42 /* gpios */
43 int led_power, led_hi, led_lo, lna_ctrl;
39}; 44};
40 45
41struct sms_board *sms_get_board(int id); 46struct sms_board *sms_get_board(int id);
42 47
48int sms_board_setup(struct smscore_device_t *coredev);
49
50#define SMS_LED_OFF 0
51#define SMS_LED_LO 1
52#define SMS_LED_HI 2
53int sms_board_led_feedback(struct smscore_device_t *coredev, int led);
54int sms_board_power(struct smscore_device_t *coredev, int onoff);
55
43extern struct usb_device_id smsusb_id_table[]; 56extern struct usb_device_id smsusb_id_table[];
44 57
45#endif /* __SMS_CARDS_H__ */ 58#endif /* __SMS_CARDS_H__ */
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 6576fbb40fc6..cf613f22fb8d 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -91,6 +91,7 @@ struct smscore_device_t {
91 struct completion init_device_done, reload_start_done, resume_done; 91 struct completion init_device_done, reload_start_done, resume_done;
92 92
93 int board_id; 93 int board_id;
94 int led_state;
94}; 95};
95 96
96void smscore_set_board_id(struct smscore_device_t *core, int id) 97void smscore_set_board_id(struct smscore_device_t *core, int id)
@@ -98,6 +99,13 @@ void smscore_set_board_id(struct smscore_device_t *core, int id)
98 core->board_id = id; 99 core->board_id = id;
99} 100}
100 101
102int smscore_led_state(struct smscore_device_t *core, int led)
103{
104 if (led >= 0)
105 core->led_state = led;
106 return core->led_state;
107}
108
101int smscore_get_board_id(struct smscore_device_t *core) 109int smscore_get_board_id(struct smscore_device_t *core)
102{ 110{
103 return core->board_id; 111 return core->board_id;
@@ -1187,6 +1195,76 @@ int smsclient_sendrequest(struct smscore_client_t *client,
1187} 1195}
1188 1196
1189 1197
1198int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin,
1199 struct smscore_gpio_config *pinconfig)
1200{
1201 struct {
1202 struct SmsMsgHdr_ST hdr;
1203 u32 data[6];
1204 } msg;
1205
1206 if (coredev->device_flags & SMS_DEVICE_FAMILY2) {
1207 msg.hdr.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
1208 msg.hdr.msgDstId = HIF_TASK;
1209 msg.hdr.msgFlags = 0;
1210 msg.hdr.msgType = MSG_SMS_GPIO_CONFIG_EX_REQ;
1211 msg.hdr.msgLength = sizeof(msg);
1212
1213 msg.data[0] = pin;
1214 msg.data[1] = pinconfig->pullupdown;
1215
1216 /* Convert slew rate for Nova: Fast(0) = 3 / Slow(1) = 0; */
1217 msg.data[2] = pinconfig->outputslewrate == 0 ? 3 : 0;
1218
1219 switch (pinconfig->outputdriving) {
1220 case SMS_GPIO_OUTPUTDRIVING_16mA:
1221 msg.data[3] = 7; /* Nova - 16mA */
1222 break;
1223 case SMS_GPIO_OUTPUTDRIVING_12mA:
1224 msg.data[3] = 5; /* Nova - 11mA */
1225 break;
1226 case SMS_GPIO_OUTPUTDRIVING_8mA:
1227 msg.data[3] = 3; /* Nova - 7mA */
1228 break;
1229 case SMS_GPIO_OUTPUTDRIVING_4mA:
1230 default:
1231 msg.data[3] = 2; /* Nova - 4mA */
1232 break;
1233 }
1234
1235 msg.data[4] = pinconfig->direction;
1236 msg.data[5] = 0;
1237 } else /* TODO: SMS_DEVICE_FAMILY1 */
1238 return -EINVAL;
1239
1240 return coredev->sendrequest_handler(coredev->context,
1241 &msg, sizeof(msg));
1242}
1243
1244int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level)
1245{
1246 struct {
1247 struct SmsMsgHdr_ST hdr;
1248 u32 data[3];
1249 } msg;
1250
1251 if (pin > MAX_GPIO_PIN_NUMBER)
1252 return -EINVAL;
1253
1254 msg.hdr.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
1255 msg.hdr.msgDstId = HIF_TASK;
1256 msg.hdr.msgFlags = 0;
1257 msg.hdr.msgType = MSG_SMS_GPIO_SET_LEVEL_REQ;
1258 msg.hdr.msgLength = sizeof(msg);
1259
1260 msg.data[0] = pin;
1261 msg.data[1] = level ? 1 : 0;
1262 msg.data[2] = 0;
1263
1264 return coredev->sendrequest_handler(coredev->context,
1265 &msg, sizeof(msg));
1266}
1267
1190static int __init smscore_module_init(void) 1268static int __init smscore_module_init(void)
1191{ 1269{
1192 int rc = 0; 1270 int rc = 0;
diff --git a/drivers/media/dvb/siano/smscoreapi.h b/drivers/media/dvb/siano/smscoreapi.h
index 8d973f726fb8..760e233fcbc5 100644
--- a/drivers/media/dvb/siano/smscoreapi.h
+++ b/drivers/media/dvb/siano/smscoreapi.h
@@ -186,6 +186,8 @@ struct smsclient_params_t {
186#define MSG_SW_RELOAD_EXEC_REQ 704 186#define MSG_SW_RELOAD_EXEC_REQ 704
187#define MSG_SW_RELOAD_EXEC_RES 705 187#define MSG_SW_RELOAD_EXEC_RES 705
188#define MSG_SMS_SPI_INT_LINE_SET_REQ 710 188#define MSG_SMS_SPI_INT_LINE_SET_REQ 710
189#define MSG_SMS_GPIO_CONFIG_EX_REQ 712
190#define MSG_SMS_GPIO_CONFIG_EX_RES 713
189#define MSG_SMS_ISDBT_TUNE_REQ 776 191#define MSG_SMS_ISDBT_TUNE_REQ 776
190#define MSG_SMS_ISDBT_TUNE_RES 777 192#define MSG_SMS_ISDBT_TUNE_RES 777
191 193
@@ -341,6 +343,32 @@ struct SmsMsgStatisticsInfo_ST {
341}; 343};
342 344
343 345
346struct smscore_gpio_config {
347#define SMS_GPIO_DIRECTION_INPUT 0
348#define SMS_GPIO_DIRECTION_OUTPUT 1
349 u8 direction;
350
351#define SMS_GPIO_PULLUPDOWN_NONE 0
352#define SMS_GPIO_PULLUPDOWN_PULLDOWN 1
353#define SMS_GPIO_PULLUPDOWN_PULLUP 2
354#define SMS_GPIO_PULLUPDOWN_KEEPER 3
355 u8 pullupdown;
356
357#define SMS_GPIO_INPUTCHARACTERISTICS_NORMAL 0
358#define SMS_GPIO_INPUTCHARACTERISTICS_SCHMITT 1
359 u8 inputcharacteristics;
360
361#define SMS_GPIO_OUTPUTSLEWRATE_FAST 0
362#define SMS_GPIO_OUTPUTSLEWRATE_SLOW 1
363 u8 outputslewrate;
364
365#define SMS_GPIO_OUTPUTDRIVING_4mA 0
366#define SMS_GPIO_OUTPUTDRIVING_8mA 1
367#define SMS_GPIO_OUTPUTDRIVING_12mA 2
368#define SMS_GPIO_OUTPUTDRIVING_16mA 3
369 u8 outputdriving;
370};
371
344struct smsdvb_client_t { 372struct smsdvb_client_t {
345 struct list_head entry; 373 struct list_head entry;
346 374
@@ -353,7 +381,7 @@ struct smsdvb_client_t {
353 struct dvb_frontend frontend; 381 struct dvb_frontend frontend;
354 382
355 fe_status_t fe_status; 383 fe_status_t fe_status;
356 int fe_ber, fe_snr, fe_signal_strength; 384 int fe_ber, fe_snr, fe_unc, fe_signal_strength;
357 385
358 struct completion tune_done, stat_done; 386 struct completion tune_done, stat_done;
359 387
@@ -396,9 +424,15 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev);
396extern void smscore_putbuffer(struct smscore_device_t *coredev, 424extern void smscore_putbuffer(struct smscore_device_t *coredev,
397 struct smscore_buffer_t *cb); 425 struct smscore_buffer_t *cb);
398 426
427int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin,
428 struct smscore_gpio_config *pinconfig);
429int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level);
430
399void smscore_set_board_id(struct smscore_device_t *core, int id); 431void smscore_set_board_id(struct smscore_device_t *core, int id);
400int smscore_get_board_id(struct smscore_device_t *core); 432int smscore_get_board_id(struct smscore_device_t *core);
401 433
434int smscore_led_state(struct smscore_device_t *core, int led);
435
402/* smsdvb.c */ 436/* smsdvb.c */
403int smsdvb_register(void); 437int smsdvb_register(void);
404void smsdvb_unregister(void); 438void smsdvb_unregister(void);
diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c
index 8d490e133f35..2da953a4f4f5 100644
--- a/drivers/media/dvb/siano/smsdvb.c
+++ b/drivers/media/dvb/siano/smsdvb.c
@@ -60,6 +60,7 @@ static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
60 60
61 client->fe_snr = p->Stat.SNR; 61 client->fe_snr = p->Stat.SNR;
62 client->fe_ber = p->Stat.BER; 62 client->fe_ber = p->Stat.BER;
63 client->fe_unc = p->Stat.BERErrorCount;
63 64
64 if (p->Stat.InBandPwr < -95) 65 if (p->Stat.InBandPwr < -95)
65 client->fe_signal_strength = 0; 66 client->fe_signal_strength = 0;
@@ -72,6 +73,7 @@ static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
72 client->fe_status = 0; 73 client->fe_status = 0;
73 client->fe_snr = 74 client->fe_snr =
74 client->fe_ber = 75 client->fe_ber =
76 client->fe_unc =
75 client->fe_signal_strength = 0; 77 client->fe_signal_strength = 0;
76 } 78 }
77 79
@@ -165,8 +167,18 @@ static int smsdvb_send_statistics_request(struct smsdvb_client_t *client)
165 struct SmsMsgHdr_ST Msg = { MSG_SMS_GET_STATISTICS_REQ, 167 struct SmsMsgHdr_ST Msg = { MSG_SMS_GET_STATISTICS_REQ,
166 DVBT_BDA_CONTROL_MSG_ID, 168 DVBT_BDA_CONTROL_MSG_ID,
167 HIF_TASK, sizeof(struct SmsMsgHdr_ST), 0 }; 169 HIF_TASK, sizeof(struct SmsMsgHdr_ST), 0 };
168 return smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg), 170 int ret = smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
169 &client->stat_done); 171 &client->stat_done);
172 if (ret < 0)
173 return ret;
174
175 if (client->fe_status & FE_HAS_LOCK)
176 sms_board_led_feedback(client->coredev,
177 (client->fe_unc == 0) ?
178 SMS_LED_HI : SMS_LED_LO);
179 else
180 sms_board_led_feedback(client->coredev, SMS_LED_OFF);
181 return ret;
170} 182}
171 183
172static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat) 184static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat)
@@ -217,6 +229,18 @@ static int smsdvb_read_snr(struct dvb_frontend *fe, u16 *snr)
217 return rc; 229 return rc;
218} 230}
219 231
232static int smsdvb_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
233{
234 struct smsdvb_client_t *client =
235 container_of(fe, struct smsdvb_client_t, frontend);
236 int rc = smsdvb_send_statistics_request(client);
237
238 if (!rc)
239 *ucblocks = client->fe_unc;
240
241 return rc;
242}
243
220static int smsdvb_get_tune_settings(struct dvb_frontend *fe, 244static int smsdvb_get_tune_settings(struct dvb_frontend *fe,
221 struct dvb_frontend_tune_settings *tune) 245 struct dvb_frontend_tune_settings *tune)
222{ 246{
@@ -273,6 +297,28 @@ static int smsdvb_get_frontend(struct dvb_frontend *fe,
273 /* todo: */ 297 /* todo: */
274 memcpy(fep, &client->fe_params, 298 memcpy(fep, &client->fe_params,
275 sizeof(struct dvb_frontend_parameters)); 299 sizeof(struct dvb_frontend_parameters));
300
301 return 0;
302}
303
304static int smsdvb_init(struct dvb_frontend *fe)
305{
306 struct smsdvb_client_t *client =
307 container_of(fe, struct smsdvb_client_t, frontend);
308
309 sms_board_power(client->coredev, 1);
310
311 return 0;
312}
313
314static int smsdvb_sleep(struct dvb_frontend *fe)
315{
316 struct smsdvb_client_t *client =
317 container_of(fe, struct smsdvb_client_t, frontend);
318
319 sms_board_led_feedback(client->coredev, SMS_LED_OFF);
320 sms_board_power(client->coredev, 0);
321
276 return 0; 322 return 0;
277} 323}
278 324
@@ -308,6 +354,10 @@ static struct dvb_frontend_ops smsdvb_fe_ops = {
308 .read_ber = smsdvb_read_ber, 354 .read_ber = smsdvb_read_ber,
309 .read_signal_strength = smsdvb_read_signal_strength, 355 .read_signal_strength = smsdvb_read_signal_strength,
310 .read_snr = smsdvb_read_snr, 356 .read_snr = smsdvb_read_snr,
357 .read_ucblocks = smsdvb_read_ucblocks,
358
359 .init = smsdvb_init,
360 .sleep = smsdvb_sleep,
311}; 361};
312 362
313static int smsdvb_hotplug(struct smscore_device_t *coredev, 363static int smsdvb_hotplug(struct smscore_device_t *coredev,
@@ -402,6 +452,8 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
402 452
403 sms_info("success"); 453 sms_info("success");
404 454
455 sms_board_setup(coredev);
456
405 return 0; 457 return 0;
406 458
407client_error: 459client_error:
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 87a3c24454b9..5d7ca3417719 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -432,11 +432,56 @@ static void smsusb_disconnect(struct usb_interface *intf)
432 smsusb_term_device(intf); 432 smsusb_term_device(intf);
433} 433}
434 434
435static int smsusb_suspend(struct usb_interface *intf, pm_message_t msg)
436{
437 struct smsusb_device_t *dev =
438 (struct smsusb_device_t *)usb_get_intfdata(intf);
439 printk(KERN_INFO "%s Entering status %d.\n", __func__, msg.event);
440 smsusb_stop_streaming(dev);
441 return 0;
442}
443
444static int smsusb_resume(struct usb_interface *intf)
445{
446 int rc, i;
447 struct smsusb_device_t *dev =
448 (struct smsusb_device_t *)usb_get_intfdata(intf);
449 struct usb_device *udev = interface_to_usbdev(intf);
450
451 printk(KERN_INFO "%s Entering.\n", __func__);
452 usb_clear_halt(udev, usb_rcvbulkpipe(udev, 0x81));
453 usb_clear_halt(udev, usb_rcvbulkpipe(udev, 0x02));
454
455 for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++)
456 printk(KERN_INFO "endpoint %d %02x %02x %d\n", i,
457 intf->cur_altsetting->endpoint[i].desc.bEndpointAddress,
458 intf->cur_altsetting->endpoint[i].desc.bmAttributes,
459 intf->cur_altsetting->endpoint[i].desc.wMaxPacketSize);
460
461 if (intf->num_altsetting > 0) {
462 rc = usb_set_interface(udev,
463 intf->cur_altsetting->desc.
464 bInterfaceNumber, 0);
465 if (rc < 0) {
466 printk(KERN_INFO
467 "%s usb_set_interface failed, rc %d\n",
468 __func__, rc);
469 return rc;
470 }
471 }
472
473 smsusb_start_streaming(dev);
474 return 0;
475}
476
435static struct usb_driver smsusb_driver = { 477static struct usb_driver smsusb_driver = {
436 .name = "sms1xxx", 478 .name = "sms1xxx",
437 .probe = smsusb_probe, 479 .probe = smsusb_probe,
438 .disconnect = smsusb_disconnect, 480 .disconnect = smsusb_disconnect,
439 .id_table = smsusb_id_table, 481 .id_table = smsusb_id_table,
482
483 .suspend = smsusb_suspend,
484 .resume = smsusb_resume,
440}; 485};
441 486
442int smsusb_register(void) 487int smsusb_register(void)
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index 401a04effc06..ab0bcd208c78 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -104,6 +104,8 @@ config DVB_BUDGET_CI
104 select DVB_STV0297 if !DVB_FE_CUSTOMISE 104 select DVB_STV0297 if !DVB_FE_CUSTOMISE
105 select DVB_STV0299 if !DVB_FE_CUSTOMISE 105 select DVB_STV0299 if !DVB_FE_CUSTOMISE
106 select DVB_TDA1004X if !DVB_FE_CUSTOMISE 106 select DVB_TDA1004X if !DVB_FE_CUSTOMISE
107 select DVB_STB0899 if !DVB_FE_CUSTOMISE
108 select DVB_STB6100 if !DVB_FE_CUSTOMISE
107 select DVB_LNBP21 if !DVB_FE_CUSTOMISE 109 select DVB_LNBP21 if !DVB_FE_CUSTOMISE
108 select DVB_TDA10023 if !DVB_FE_CUSTOMISE 110 select DVB_TDA10023 if !DVB_FE_CUSTOMISE
109 select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMIZE 111 select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMIZE
@@ -131,6 +133,8 @@ config DVB_BUDGET_AV
131 select DVB_TDA1004X if !DVB_FE_CUSTOMISE 133 select DVB_TDA1004X if !DVB_FE_CUSTOMISE
132 select DVB_TDA10021 if !DVB_FE_CUSTOMISE 134 select DVB_TDA10021 if !DVB_FE_CUSTOMISE
133 select DVB_TDA10023 if !DVB_FE_CUSTOMISE 135 select DVB_TDA10023 if !DVB_FE_CUSTOMISE
136 select DVB_STB0899 if !DVB_FE_CUSTOMISE
137 select DVB_TDA8261 if !DVB_FE_CUSTOMISE
134 select DVB_TUA6100 if !DVB_FE_CUSTOMISE 138 select DVB_TUA6100 if !DVB_FE_CUSTOMISE
135 help 139 help
136 Support for simple SAA7146 based DVB cards 140 Support for simple SAA7146 based DVB cards
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 1032ea77837e..f996cef79ec1 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -35,6 +35,11 @@
35 35
36#include "budget.h" 36#include "budget.h"
37#include "stv0299.h" 37#include "stv0299.h"
38#include "stb0899_drv.h"
39#include "stb0899_reg.h"
40#include "stb0899_cfg.h"
41#include "tda8261.h"
42#include "tda8261_cfg.h"
38#include "tda1002x.h" 43#include "tda1002x.h"
39#include "tda1004x.h" 44#include "tda1004x.h"
40#include "tua6100.h" 45#include "tua6100.h"
@@ -882,6 +887,281 @@ static struct stv0299_config philips_sd1878_config = {
882 .set_symbol_rate = philips_sd1878_ci_set_symbol_rate, 887 .set_symbol_rate = philips_sd1878_ci_set_symbol_rate,
883}; 888};
884 889
890/* KNC1 DVB-S (STB0899) Inittab */
891static const struct stb0899_s1_reg knc1_stb0899_s1_init_1[] = {
892
893 { STB0899_DEV_ID , 0x81 },
894 { STB0899_DISCNTRL1 , 0x32 },
895 { STB0899_DISCNTRL2 , 0x80 },
896 { STB0899_DISRX_ST0 , 0x04 },
897 { STB0899_DISRX_ST1 , 0x00 },
898 { STB0899_DISPARITY , 0x00 },
899 { STB0899_DISFIFO , 0x00 },
900 { STB0899_DISSTATUS , 0x20 },
901 { STB0899_DISF22 , 0x8c },
902 { STB0899_DISF22RX , 0x9a },
903 { STB0899_SYSREG , 0x0b },
904 { STB0899_ACRPRESC , 0x11 },
905 { STB0899_ACRDIV1 , 0x0a },
906 { STB0899_ACRDIV2 , 0x05 },
907 { STB0899_DACR1 , 0x00 },
908 { STB0899_DACR2 , 0x00 },
909 { STB0899_OUTCFG , 0x00 },
910 { STB0899_MODECFG , 0x00 },
911 { STB0899_IRQSTATUS_3 , 0x30 },
912 { STB0899_IRQSTATUS_2 , 0x00 },
913 { STB0899_IRQSTATUS_1 , 0x00 },
914 { STB0899_IRQSTATUS_0 , 0x00 },
915 { STB0899_IRQMSK_3 , 0xf3 },
916 { STB0899_IRQMSK_2 , 0xfc },
917 { STB0899_IRQMSK_1 , 0xff },
918 { STB0899_IRQMSK_0 , 0xff },
919 { STB0899_IRQCFG , 0x00 },
920 { STB0899_I2CCFG , 0x88 },
921 { STB0899_I2CRPT , 0x58 }, /* Repeater=8, Stop=disabled */
922 { STB0899_IOPVALUE5 , 0x00 },
923 { STB0899_IOPVALUE4 , 0x20 },
924 { STB0899_IOPVALUE3 , 0xc9 },
925 { STB0899_IOPVALUE2 , 0x90 },
926 { STB0899_IOPVALUE1 , 0x40 },
927 { STB0899_IOPVALUE0 , 0x00 },
928 { STB0899_GPIO00CFG , 0x82 },
929 { STB0899_GPIO01CFG , 0x82 },
930 { STB0899_GPIO02CFG , 0x82 },
931 { STB0899_GPIO03CFG , 0x82 },
932 { STB0899_GPIO04CFG , 0x82 },
933 { STB0899_GPIO05CFG , 0x82 },
934 { STB0899_GPIO06CFG , 0x82 },
935 { STB0899_GPIO07CFG , 0x82 },
936 { STB0899_GPIO08CFG , 0x82 },
937 { STB0899_GPIO09CFG , 0x82 },
938 { STB0899_GPIO10CFG , 0x82 },
939 { STB0899_GPIO11CFG , 0x82 },
940 { STB0899_GPIO12CFG , 0x82 },
941 { STB0899_GPIO13CFG , 0x82 },
942 { STB0899_GPIO14CFG , 0x82 },
943 { STB0899_GPIO15CFG , 0x82 },
944 { STB0899_GPIO16CFG , 0x82 },
945 { STB0899_GPIO17CFG , 0x82 },
946 { STB0899_GPIO18CFG , 0x82 },
947 { STB0899_GPIO19CFG , 0x82 },
948 { STB0899_GPIO20CFG , 0x82 },
949 { STB0899_SDATCFG , 0xb8 },
950 { STB0899_SCLTCFG , 0xba },
951 { STB0899_AGCRFCFG , 0x08 }, /* 0x1c */
952 { STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
953 { STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
954 { STB0899_DIRCLKCFG , 0x82 },
955 { STB0899_CLKOUT27CFG , 0x7e },
956 { STB0899_STDBYCFG , 0x82 },
957 { STB0899_CS0CFG , 0x82 },
958 { STB0899_CS1CFG , 0x82 },
959 { STB0899_DISEQCOCFG , 0x20 },
960 { STB0899_GPIO32CFG , 0x82 },
961 { STB0899_GPIO33CFG , 0x82 },
962 { STB0899_GPIO34CFG , 0x82 },
963 { STB0899_GPIO35CFG , 0x82 },
964 { STB0899_GPIO36CFG , 0x82 },
965 { STB0899_GPIO37CFG , 0x82 },
966 { STB0899_GPIO38CFG , 0x82 },
967 { STB0899_GPIO39CFG , 0x82 },
968 { STB0899_NCOARSE , 0x15 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
969 { STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
970 { STB0899_FILTCTRL , 0x00 },
971 { STB0899_SYSCTRL , 0x00 },
972 { STB0899_STOPCLK1 , 0x20 },
973 { STB0899_STOPCLK2 , 0x00 },
974 { STB0899_INTBUFSTATUS , 0x00 },
975 { STB0899_INTBUFCTRL , 0x0a },
976 { 0xffff , 0xff },
977};
978
979static const struct stb0899_s1_reg knc1_stb0899_s1_init_3[] = {
980 { STB0899_DEMOD , 0x00 },
981 { STB0899_RCOMPC , 0xc9 },
982 { STB0899_AGC1CN , 0x41 },
983 { STB0899_AGC1REF , 0x08 },
984 { STB0899_RTC , 0x7a },
985 { STB0899_TMGCFG , 0x4e },
986 { STB0899_AGC2REF , 0x33 },
987 { STB0899_TLSR , 0x84 },
988 { STB0899_CFD , 0xee },
989 { STB0899_ACLC , 0x87 },
990 { STB0899_BCLC , 0x94 },
991 { STB0899_EQON , 0x41 },
992 { STB0899_LDT , 0xdd },
993 { STB0899_LDT2 , 0xc9 },
994 { STB0899_EQUALREF , 0xb4 },
995 { STB0899_TMGRAMP , 0x10 },
996 { STB0899_TMGTHD , 0x30 },
997 { STB0899_IDCCOMP , 0xfb },
998 { STB0899_QDCCOMP , 0x03 },
999 { STB0899_POWERI , 0x3b },
1000 { STB0899_POWERQ , 0x3d },
1001 { STB0899_RCOMP , 0x81 },
1002 { STB0899_AGCIQIN , 0x80 },
1003 { STB0899_AGC2I1 , 0x04 },
1004 { STB0899_AGC2I2 , 0xf5 },
1005 { STB0899_TLIR , 0x25 },
1006 { STB0899_RTF , 0x80 },
1007 { STB0899_DSTATUS , 0x00 },
1008 { STB0899_LDI , 0xca },
1009 { STB0899_CFRM , 0xf1 },
1010 { STB0899_CFRL , 0xf3 },
1011 { STB0899_NIRM , 0x2a },
1012 { STB0899_NIRL , 0x05 },
1013 { STB0899_ISYMB , 0x17 },
1014 { STB0899_QSYMB , 0xfa },
1015 { STB0899_SFRH , 0x2f },
1016 { STB0899_SFRM , 0x68 },
1017 { STB0899_SFRL , 0x40 },
1018 { STB0899_SFRUPH , 0x2f },
1019 { STB0899_SFRUPM , 0x68 },
1020 { STB0899_SFRUPL , 0x40 },
1021 { STB0899_EQUAI1 , 0xfd },
1022 { STB0899_EQUAQ1 , 0x04 },
1023 { STB0899_EQUAI2 , 0x0f },
1024 { STB0899_EQUAQ2 , 0xff },
1025 { STB0899_EQUAI3 , 0xdf },
1026 { STB0899_EQUAQ3 , 0xfa },
1027 { STB0899_EQUAI4 , 0x37 },
1028 { STB0899_EQUAQ4 , 0x0d },
1029 { STB0899_EQUAI5 , 0xbd },
1030 { STB0899_EQUAQ5 , 0xf7 },
1031 { STB0899_DSTATUS2 , 0x00 },
1032 { STB0899_VSTATUS , 0x00 },
1033 { STB0899_VERROR , 0xff },
1034 { STB0899_IQSWAP , 0x2a },
1035 { STB0899_ECNT1M , 0x00 },
1036 { STB0899_ECNT1L , 0x00 },
1037 { STB0899_ECNT2M , 0x00 },
1038 { STB0899_ECNT2L , 0x00 },
1039 { STB0899_ECNT3M , 0x00 },
1040 { STB0899_ECNT3L , 0x00 },
1041 { STB0899_FECAUTO1 , 0x06 },
1042 { STB0899_FECM , 0x01 },
1043 { STB0899_VTH12 , 0xf0 },
1044 { STB0899_VTH23 , 0xa0 },
1045 { STB0899_VTH34 , 0x78 },
1046 { STB0899_VTH56 , 0x4e },
1047 { STB0899_VTH67 , 0x48 },
1048 { STB0899_VTH78 , 0x38 },
1049 { STB0899_PRVIT , 0xff },
1050 { STB0899_VITSYNC , 0x19 },
1051 { STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
1052 { STB0899_TSULC , 0x42 },
1053 { STB0899_RSLLC , 0x40 },
1054 { STB0899_TSLPL , 0x12 },
1055 { STB0899_TSCFGH , 0x0c },
1056 { STB0899_TSCFGM , 0x00 },
1057 { STB0899_TSCFGL , 0x0c },
1058 { STB0899_TSOUT , 0x0d }, /* 0x0d for CAM */
1059 { STB0899_RSSYNCDEL , 0x00 },
1060 { STB0899_TSINHDELH , 0x02 },
1061 { STB0899_TSINHDELM , 0x00 },
1062 { STB0899_TSINHDELL , 0x00 },
1063 { STB0899_TSLLSTKM , 0x00 },
1064 { STB0899_TSLLSTKL , 0x00 },
1065 { STB0899_TSULSTKM , 0x00 },
1066 { STB0899_TSULSTKL , 0xab },
1067 { STB0899_PCKLENUL , 0x00 },
1068 { STB0899_PCKLENLL , 0xcc },
1069 { STB0899_RSPCKLEN , 0xcc },
1070 { STB0899_TSSTATUS , 0x80 },
1071 { STB0899_ERRCTRL1 , 0xb6 },
1072 { STB0899_ERRCTRL2 , 0x96 },
1073 { STB0899_ERRCTRL3 , 0x89 },
1074 { STB0899_DMONMSK1 , 0x27 },
1075 { STB0899_DMONMSK0 , 0x03 },
1076 { STB0899_DEMAPVIT , 0x5c },
1077 { STB0899_PLPARM , 0x1f },
1078 { STB0899_PDELCTRL , 0x48 },
1079 { STB0899_PDELCTRL2 , 0x00 },
1080 { STB0899_BBHCTRL1 , 0x00 },
1081 { STB0899_BBHCTRL2 , 0x00 },
1082 { STB0899_HYSTTHRESH , 0x77 },
1083 { STB0899_MATCSTM , 0x00 },
1084 { STB0899_MATCSTL , 0x00 },
1085 { STB0899_UPLCSTM , 0x00 },
1086 { STB0899_UPLCSTL , 0x00 },
1087 { STB0899_DFLCSTM , 0x00 },
1088 { STB0899_DFLCSTL , 0x00 },
1089 { STB0899_SYNCCST , 0x00 },
1090 { STB0899_SYNCDCSTM , 0x00 },
1091 { STB0899_SYNCDCSTL , 0x00 },
1092 { STB0899_ISI_ENTRY , 0x00 },
1093 { STB0899_ISI_BIT_EN , 0x00 },
1094 { STB0899_MATSTRM , 0x00 },
1095 { STB0899_MATSTRL , 0x00 },
1096 { STB0899_UPLSTRM , 0x00 },
1097 { STB0899_UPLSTRL , 0x00 },
1098 { STB0899_DFLSTRM , 0x00 },
1099 { STB0899_DFLSTRL , 0x00 },
1100 { STB0899_SYNCSTR , 0x00 },
1101 { STB0899_SYNCDSTRM , 0x00 },
1102 { STB0899_SYNCDSTRL , 0x00 },
1103 { STB0899_CFGPDELSTATUS1 , 0x10 },
1104 { STB0899_CFGPDELSTATUS2 , 0x00 },
1105 { STB0899_BBFERRORM , 0x00 },
1106 { STB0899_BBFERRORL , 0x00 },
1107 { STB0899_UPKTERRORM , 0x00 },
1108 { STB0899_UPKTERRORL , 0x00 },
1109 { 0xffff , 0xff },
1110};
1111
1112/* STB0899 demodulator config for the KNC1 and clones */
1113static struct stb0899_config knc1_dvbs2_config = {
1114 .init_dev = knc1_stb0899_s1_init_1,
1115 .init_s2_demod = stb0899_s2_init_2,
1116 .init_s1_demod = knc1_stb0899_s1_init_3,
1117 .init_s2_fec = stb0899_s2_init_4,
1118 .init_tst = stb0899_s1_init_5,
1119
1120 .postproc = NULL,
1121
1122 .demod_address = 0x68,
1123// .ts_output_mode = STB0899_OUT_PARALLEL, /* types = SERIAL/PARALLEL */
1124 .block_sync_mode = STB0899_SYNC_FORCED, /* DSS, SYNC_FORCED/UNSYNCED */
1125// .ts_pfbit_toggle = STB0899_MPEG_NORMAL, /* DirecTV, MPEG toggling seq */
1126
1127 .xtal_freq = 27000000,
1128 .inversion = IQ_SWAP_OFF, /* 1 */
1129
1130 .lo_clk = 76500000,
1131 .hi_clk = 90000000,
1132
1133 .esno_ave = STB0899_DVBS2_ESNO_AVE,
1134 .esno_quant = STB0899_DVBS2_ESNO_QUANT,
1135 .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
1136 .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
1137 .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
1138 .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
1139 .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
1140 .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
1141 .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
1142
1143 .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
1144 .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
1145 .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
1146 .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
1147
1148 .tuner_get_frequency = tda8261_get_frequency,
1149 .tuner_set_frequency = tda8261_set_frequency,
1150 .tuner_set_bandwidth = NULL,
1151 .tuner_get_bandwidth = tda8261_get_bandwidth,
1152 .tuner_set_rfsiggain = NULL
1153};
1154
1155/*
1156 * SD1878/SHA tuner config
1157 * 1F, Single I/P, Horizontal mount, High Sensitivity
1158 */
1159static const struct tda8261_config sd1878c_config = {
1160// .name = "SD1878/SHA",
1161 .addr = 0x60,
1162 .step_size = TDA8261_STEP_1000 /* kHz */
1163};
1164
885static u8 read_pwm(struct budget_av *budget_av) 1165static u8 read_pwm(struct budget_av *budget_av)
886{ 1166{
887 u8 b = 0xff; 1167 u8 b = 0xff;
@@ -905,8 +1185,11 @@ static u8 read_pwm(struct budget_av *budget_av)
905#define SUBID_DVBS_TV_STAR 0x0014 1185#define SUBID_DVBS_TV_STAR 0x0014
906#define SUBID_DVBS_TV_STAR_PLUS_X4 0x0015 1186#define SUBID_DVBS_TV_STAR_PLUS_X4 0x0015
907#define SUBID_DVBS_TV_STAR_CI 0x0016 1187#define SUBID_DVBS_TV_STAR_CI 0x0016
1188#define SUBID_DVBS2_KNC1 0x0018
1189#define SUBID_DVBS2_KNC1_OEM 0x0019
908#define SUBID_DVBS_EASYWATCH_1 0x001a 1190#define SUBID_DVBS_EASYWATCH_1 0x001a
909#define SUBID_DVBS_EASYWATCH_2 0x001b 1191#define SUBID_DVBS_EASYWATCH_2 0x001b
1192#define SUBID_DVBS2_EASYWATCH 0x001d
910#define SUBID_DVBS_EASYWATCH 0x001e 1193#define SUBID_DVBS_EASYWATCH 0x001e
911 1194
912#define SUBID_DVBC_EASYWATCH 0x002a 1195#define SUBID_DVBC_EASYWATCH 0x002a
@@ -941,6 +1224,9 @@ static void frontend_init(struct budget_av *budget_av)
941 case SUBID_DVBT_KNC1_PLUS: 1224 case SUBID_DVBT_KNC1_PLUS:
942 case SUBID_DVBC_EASYWATCH: 1225 case SUBID_DVBC_EASYWATCH:
943 case SUBID_DVBC_KNC1_PLUS_MK3: 1226 case SUBID_DVBC_KNC1_PLUS_MK3:
1227 case SUBID_DVBS2_KNC1:
1228 case SUBID_DVBS2_KNC1_OEM:
1229 case SUBID_DVBS2_EASYWATCH:
944 saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTHI); 1230 saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTHI);
945 break; 1231 break;
946 } 1232 }
@@ -993,7 +1279,14 @@ static void frontend_init(struct budget_av *budget_av)
993 fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params; 1279 fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params;
994 } 1280 }
995 break; 1281 break;
1282 case SUBID_DVBS2_KNC1:
1283 case SUBID_DVBS2_KNC1_OEM:
1284 case SUBID_DVBS2_EASYWATCH:
1285 budget_av->reinitialise_demod = 1;
1286 if ((fe = dvb_attach(stb0899_attach, &knc1_dvbs2_config, &budget_av->budget.i2c_adap)))
1287 dvb_attach(tda8261_attach, fe, &sd1878c_config, &budget_av->budget.i2c_adap);
996 1288
1289 break;
997 case SUBID_DVBS_CINERGY1200: 1290 case SUBID_DVBS_CINERGY1200:
998 fe = dvb_attach(stv0299_attach, &cinergy_1200s_config, 1291 fe = dvb_attach(stv0299_attach, &cinergy_1200s_config,
999 &budget_av->budget.i2c_adap); 1292 &budget_av->budget.i2c_adap);
@@ -1260,6 +1553,8 @@ static struct saa7146_ext_vv vv_data = {
1260static struct saa7146_extension budget_extension; 1553static struct saa7146_extension budget_extension;
1261 1554
1262MAKE_BUDGET_INFO(knc1s, "KNC1 DVB-S", BUDGET_KNC1S); 1555MAKE_BUDGET_INFO(knc1s, "KNC1 DVB-S", BUDGET_KNC1S);
1556MAKE_BUDGET_INFO(knc1s2,"KNC1 DVB-S2", BUDGET_KNC1S2);
1557MAKE_BUDGET_INFO(sates2,"Satelco EasyWatch DVB-S2", BUDGET_KNC1S2);
1263MAKE_BUDGET_INFO(knc1c, "KNC1 DVB-C", BUDGET_KNC1C); 1558MAKE_BUDGET_INFO(knc1c, "KNC1 DVB-C", BUDGET_KNC1C);
1264MAKE_BUDGET_INFO(knc1t, "KNC1 DVB-T", BUDGET_KNC1T); 1559MAKE_BUDGET_INFO(knc1t, "KNC1 DVB-T", BUDGET_KNC1T);
1265MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR); 1560MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR);
@@ -1290,6 +1585,9 @@ static struct pci_device_id pci_tbl[] = {
1290 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0014), 1585 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0014),
1291 MAKE_EXTENSION_PCI(knc1spx4, 0x1894, 0x0015), 1586 MAKE_EXTENSION_PCI(knc1spx4, 0x1894, 0x0015),
1292 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016), 1587 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016),
1588 MAKE_EXTENSION_PCI(knc1s2, 0x1894, 0x0018),
1589 MAKE_EXTENSION_PCI(knc1s2, 0x1894, 0x0019),
1590 MAKE_EXTENSION_PCI(sates2, 0x1894, 0x001d),
1293 MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e), 1591 MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e),
1294 MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a), 1592 MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a),
1295 MAKE_EXTENSION_PCI(satewps, 0x1894, 0x001b), 1593 MAKE_EXTENSION_PCI(satewps, 0x1894, 0x001b),
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 0a5aad45435d..3507463fdac9 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -43,6 +43,11 @@
43#include "stv0299.h" 43#include "stv0299.h"
44#include "stv0297.h" 44#include "stv0297.h"
45#include "tda1004x.h" 45#include "tda1004x.h"
46#include "stb0899_drv.h"
47#include "stb0899_reg.h"
48#include "stb0899_cfg.h"
49#include "stb6100.h"
50#include "stb6100_cfg.h"
46#include "lnbp21.h" 51#include "lnbp21.h"
47#include "bsbe1.h" 52#include "bsbe1.h"
48#include "bsru6.h" 53#include "bsru6.h"
@@ -1071,7 +1076,271 @@ static struct tda10023_config tda10023_config = {
1071 .deltaf = 0xa511, 1076 .deltaf = 0xa511,
1072}; 1077};
1073 1078
1079/* TT S2-3200 DVB-S (STB0899) Inittab */
1080static const struct stb0899_s1_reg tt3200_stb0899_s1_init_1[] = {
1081
1082 { STB0899_DEV_ID , 0x81 },
1083 { STB0899_DISCNTRL1 , 0x32 },
1084 { STB0899_DISCNTRL2 , 0x80 },
1085 { STB0899_DISRX_ST0 , 0x04 },
1086 { STB0899_DISRX_ST1 , 0x00 },
1087 { STB0899_DISPARITY , 0x00 },
1088 { STB0899_DISFIFO , 0x00 },
1089 { STB0899_DISSTATUS , 0x20 },
1090 { STB0899_DISF22 , 0x8c },
1091 { STB0899_DISF22RX , 0x9a },
1092 { STB0899_SYSREG , 0x0b },
1093 { STB0899_ACRPRESC , 0x11 },
1094 { STB0899_ACRDIV1 , 0x0a },
1095 { STB0899_ACRDIV2 , 0x05 },
1096 { STB0899_DACR1 , 0x00 },
1097 { STB0899_DACR2 , 0x00 },
1098 { STB0899_OUTCFG , 0x00 },
1099 { STB0899_MODECFG , 0x00 },
1100 { STB0899_IRQSTATUS_3 , 0x30 },
1101 { STB0899_IRQSTATUS_2 , 0x00 },
1102 { STB0899_IRQSTATUS_1 , 0x00 },
1103 { STB0899_IRQSTATUS_0 , 0x00 },
1104 { STB0899_IRQMSK_3 , 0xf3 },
1105 { STB0899_IRQMSK_2 , 0xfc },
1106 { STB0899_IRQMSK_1 , 0xff },
1107 { STB0899_IRQMSK_0 , 0xff },
1108 { STB0899_IRQCFG , 0x00 },
1109 { STB0899_I2CCFG , 0x88 },
1110 { STB0899_I2CRPT , 0x48 }, /* 12k Pullup, Repeater=16, Stop=disabled */
1111 { STB0899_IOPVALUE5 , 0x00 },
1112 { STB0899_IOPVALUE4 , 0x20 },
1113 { STB0899_IOPVALUE3 , 0xc9 },
1114 { STB0899_IOPVALUE2 , 0x90 },
1115 { STB0899_IOPVALUE1 , 0x40 },
1116 { STB0899_IOPVALUE0 , 0x00 },
1117 { STB0899_GPIO00CFG , 0x82 },
1118 { STB0899_GPIO01CFG , 0x82 },
1119 { STB0899_GPIO02CFG , 0x82 },
1120 { STB0899_GPIO03CFG , 0x82 },
1121 { STB0899_GPIO04CFG , 0x82 },
1122 { STB0899_GPIO05CFG , 0x82 },
1123 { STB0899_GPIO06CFG , 0x82 },
1124 { STB0899_GPIO07CFG , 0x82 },
1125 { STB0899_GPIO08CFG , 0x82 },
1126 { STB0899_GPIO09CFG , 0x82 },
1127 { STB0899_GPIO10CFG , 0x82 },
1128 { STB0899_GPIO11CFG , 0x82 },
1129 { STB0899_GPIO12CFG , 0x82 },
1130 { STB0899_GPIO13CFG , 0x82 },
1131 { STB0899_GPIO14CFG , 0x82 },
1132 { STB0899_GPIO15CFG , 0x82 },
1133 { STB0899_GPIO16CFG , 0x82 },
1134 { STB0899_GPIO17CFG , 0x82 },
1135 { STB0899_GPIO18CFG , 0x82 },
1136 { STB0899_GPIO19CFG , 0x82 },
1137 { STB0899_GPIO20CFG , 0x82 },
1138 { STB0899_SDATCFG , 0xb8 },
1139 { STB0899_SCLTCFG , 0xba },
1140 { STB0899_AGCRFCFG , 0x1c }, /* 0x11 */
1141 { STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
1142 { STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
1143 { STB0899_DIRCLKCFG , 0x82 },
1144 { STB0899_CLKOUT27CFG , 0x7e },
1145 { STB0899_STDBYCFG , 0x82 },
1146 { STB0899_CS0CFG , 0x82 },
1147 { STB0899_CS1CFG , 0x82 },
1148 { STB0899_DISEQCOCFG , 0x20 },
1149 { STB0899_GPIO32CFG , 0x82 },
1150 { STB0899_GPIO33CFG , 0x82 },
1151 { STB0899_GPIO34CFG , 0x82 },
1152 { STB0899_GPIO35CFG , 0x82 },
1153 { STB0899_GPIO36CFG , 0x82 },
1154 { STB0899_GPIO37CFG , 0x82 },
1155 { STB0899_GPIO38CFG , 0x82 },
1156 { STB0899_GPIO39CFG , 0x82 },
1157 { STB0899_NCOARSE , 0x15 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
1158 { STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
1159 { STB0899_FILTCTRL , 0x00 },
1160 { STB0899_SYSCTRL , 0x00 },
1161 { STB0899_STOPCLK1 , 0x20 },
1162 { STB0899_STOPCLK2 , 0x00 },
1163 { STB0899_INTBUFSTATUS , 0x00 },
1164 { STB0899_INTBUFCTRL , 0x0a },
1165 { 0xffff , 0xff },
1166};
1167
1168static const struct stb0899_s1_reg tt3200_stb0899_s1_init_3[] = {
1169 { STB0899_DEMOD , 0x00 },
1170 { STB0899_RCOMPC , 0xc9 },
1171 { STB0899_AGC1CN , 0x41 },
1172 { STB0899_AGC1REF , 0x10 },
1173 { STB0899_RTC , 0x7a },
1174 { STB0899_TMGCFG , 0x4e },
1175 { STB0899_AGC2REF , 0x34 },
1176 { STB0899_TLSR , 0x84 },
1177 { STB0899_CFD , 0xc7 },
1178 { STB0899_ACLC , 0x87 },
1179 { STB0899_BCLC , 0x94 },
1180 { STB0899_EQON , 0x41 },
1181 { STB0899_LDT , 0xdd },
1182 { STB0899_LDT2 , 0xc9 },
1183 { STB0899_EQUALREF , 0xb4 },
1184 { STB0899_TMGRAMP , 0x10 },
1185 { STB0899_TMGTHD , 0x30 },
1186 { STB0899_IDCCOMP , 0xfb },
1187 { STB0899_QDCCOMP , 0x03 },
1188 { STB0899_POWERI , 0x3b },
1189 { STB0899_POWERQ , 0x3d },
1190 { STB0899_RCOMP , 0x81 },
1191 { STB0899_AGCIQIN , 0x80 },
1192 { STB0899_AGC2I1 , 0x04 },
1193 { STB0899_AGC2I2 , 0xf5 },
1194 { STB0899_TLIR , 0x25 },
1195 { STB0899_RTF , 0x80 },
1196 { STB0899_DSTATUS , 0x00 },
1197 { STB0899_LDI , 0xca },
1198 { STB0899_CFRM , 0xf1 },
1199 { STB0899_CFRL , 0xf3 },
1200 { STB0899_NIRM , 0x2a },
1201 { STB0899_NIRL , 0x05 },
1202 { STB0899_ISYMB , 0x17 },
1203 { STB0899_QSYMB , 0xfa },
1204 { STB0899_SFRH , 0x2f },
1205 { STB0899_SFRM , 0x68 },
1206 { STB0899_SFRL , 0x40 },
1207 { STB0899_SFRUPH , 0x2f },
1208 { STB0899_SFRUPM , 0x68 },
1209 { STB0899_SFRUPL , 0x40 },
1210 { STB0899_EQUAI1 , 0xfd },
1211 { STB0899_EQUAQ1 , 0x04 },
1212 { STB0899_EQUAI2 , 0x0f },
1213 { STB0899_EQUAQ2 , 0xff },
1214 { STB0899_EQUAI3 , 0xdf },
1215 { STB0899_EQUAQ3 , 0xfa },
1216 { STB0899_EQUAI4 , 0x37 },
1217 { STB0899_EQUAQ4 , 0x0d },
1218 { STB0899_EQUAI5 , 0xbd },
1219 { STB0899_EQUAQ5 , 0xf7 },
1220 { STB0899_DSTATUS2 , 0x00 },
1221 { STB0899_VSTATUS , 0x00 },
1222 { STB0899_VERROR , 0xff },
1223 { STB0899_IQSWAP , 0x2a },
1224 { STB0899_ECNT1M , 0x00 },
1225 { STB0899_ECNT1L , 0x00 },
1226 { STB0899_ECNT2M , 0x00 },
1227 { STB0899_ECNT2L , 0x00 },
1228 { STB0899_ECNT3M , 0x00 },
1229 { STB0899_ECNT3L , 0x00 },
1230 { STB0899_FECAUTO1 , 0x06 },
1231 { STB0899_FECM , 0x01 },
1232 { STB0899_VTH12 , 0xf0 },
1233 { STB0899_VTH23 , 0xa0 },
1234 { STB0899_VTH34 , 0x78 },
1235 { STB0899_VTH56 , 0x4e },
1236 { STB0899_VTH67 , 0x48 },
1237 { STB0899_VTH78 , 0x38 },
1238 { STB0899_PRVIT , 0xff },
1239 { STB0899_VITSYNC , 0x19 },
1240 { STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
1241 { STB0899_TSULC , 0x42 },
1242 { STB0899_RSLLC , 0x40 },
1243 { STB0899_TSLPL , 0x12 },
1244 { STB0899_TSCFGH , 0x0c },
1245 { STB0899_TSCFGM , 0x00 },
1246 { STB0899_TSCFGL , 0x0c },
1247 { STB0899_TSOUT , 0x0d }, /* 0x0d for CAM */
1248 { STB0899_RSSYNCDEL , 0x00 },
1249 { STB0899_TSINHDELH , 0x02 },
1250 { STB0899_TSINHDELM , 0x00 },
1251 { STB0899_TSINHDELL , 0x00 },
1252 { STB0899_TSLLSTKM , 0x00 },
1253 { STB0899_TSLLSTKL , 0x00 },
1254 { STB0899_TSULSTKM , 0x00 },
1255 { STB0899_TSULSTKL , 0xab },
1256 { STB0899_PCKLENUL , 0x00 },
1257 { STB0899_PCKLENLL , 0xcc },
1258 { STB0899_RSPCKLEN , 0xcc },
1259 { STB0899_TSSTATUS , 0x80 },
1260 { STB0899_ERRCTRL1 , 0xb6 },
1261 { STB0899_ERRCTRL2 , 0x96 },
1262 { STB0899_ERRCTRL3 , 0x89 },
1263 { STB0899_DMONMSK1 , 0x27 },
1264 { STB0899_DMONMSK0 , 0x03 },
1265 { STB0899_DEMAPVIT , 0x5c },
1266 { STB0899_PLPARM , 0x1f },
1267 { STB0899_PDELCTRL , 0x48 },
1268 { STB0899_PDELCTRL2 , 0x00 },
1269 { STB0899_BBHCTRL1 , 0x00 },
1270 { STB0899_BBHCTRL2 , 0x00 },
1271 { STB0899_HYSTTHRESH , 0x77 },
1272 { STB0899_MATCSTM , 0x00 },
1273 { STB0899_MATCSTL , 0x00 },
1274 { STB0899_UPLCSTM , 0x00 },
1275 { STB0899_UPLCSTL , 0x00 },
1276 { STB0899_DFLCSTM , 0x00 },
1277 { STB0899_DFLCSTL , 0x00 },
1278 { STB0899_SYNCCST , 0x00 },
1279 { STB0899_SYNCDCSTM , 0x00 },
1280 { STB0899_SYNCDCSTL , 0x00 },
1281 { STB0899_ISI_ENTRY , 0x00 },
1282 { STB0899_ISI_BIT_EN , 0x00 },
1283 { STB0899_MATSTRM , 0x00 },
1284 { STB0899_MATSTRL , 0x00 },
1285 { STB0899_UPLSTRM , 0x00 },
1286 { STB0899_UPLSTRL , 0x00 },
1287 { STB0899_DFLSTRM , 0x00 },
1288 { STB0899_DFLSTRL , 0x00 },
1289 { STB0899_SYNCSTR , 0x00 },
1290 { STB0899_SYNCDSTRM , 0x00 },
1291 { STB0899_SYNCDSTRL , 0x00 },
1292 { STB0899_CFGPDELSTATUS1 , 0x10 },
1293 { STB0899_CFGPDELSTATUS2 , 0x00 },
1294 { STB0899_BBFERRORM , 0x00 },
1295 { STB0899_BBFERRORL , 0x00 },
1296 { STB0899_UPKTERRORM , 0x00 },
1297 { STB0899_UPKTERRORL , 0x00 },
1298 { 0xffff , 0xff },
1299};
1074 1300
1301static struct stb0899_config tt3200_config = {
1302 .init_dev = tt3200_stb0899_s1_init_1,
1303 .init_s2_demod = stb0899_s2_init_2,
1304 .init_s1_demod = tt3200_stb0899_s1_init_3,
1305 .init_s2_fec = stb0899_s2_init_4,
1306 .init_tst = stb0899_s1_init_5,
1307
1308 .postproc = NULL,
1309
1310 .demod_address = 0x68,
1311
1312 .xtal_freq = 27000000,
1313 .inversion = IQ_SWAP_ON, /* 1 */
1314
1315 .lo_clk = 76500000,
1316 .hi_clk = 99000000,
1317
1318 .esno_ave = STB0899_DVBS2_ESNO_AVE,
1319 .esno_quant = STB0899_DVBS2_ESNO_QUANT,
1320 .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
1321 .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
1322 .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
1323 .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
1324 .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
1325 .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
1326 .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
1327
1328 .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
1329 .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
1330 .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
1331 .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
1332
1333 .tuner_get_frequency = stb6100_get_frequency,
1334 .tuner_set_frequency = stb6100_set_frequency,
1335 .tuner_set_bandwidth = stb6100_set_bandwidth,
1336 .tuner_get_bandwidth = stb6100_get_bandwidth,
1337 .tuner_set_rfsiggain = NULL
1338};
1339
1340struct stb6100_config tt3200_stb6100_config = {
1341 .tuner_address = 0x60,
1342 .refclock = 27000000,
1343};
1075 1344
1076static void frontend_init(struct budget_ci *budget_ci) 1345static void frontend_init(struct budget_ci *budget_ci)
1077{ 1346{
@@ -1152,6 +1421,46 @@ static void frontend_init(struct budget_ci *budget_ci)
1152 } 1421 }
1153 } 1422 }
1154 break; 1423 break;
1424
1425 case 0x1019: // TT S2-3200 PCI
1426 /*
1427 * NOTE! on some STB0899 versions, the internal PLL takes a longer time
1428 * to settle, aka LOCK. On the older revisions of the chip, we don't see
1429 * this, as a result on the newer chips the entire clock tree, will not
1430 * be stable after a freshly POWER 'ed up situation.
1431 * In this case, we should RESET the STB0899 (Active LOW) and wait for
1432 * PLL stabilization.
1433 *
1434 * On the TT S2 3200 and clones, the STB0899 demodulator's RESETB is
1435 * connected to the SAA7146 GPIO, GPIO2, Pin 142
1436 */
1437 /* Reset Demodulator */
1438 saa7146_setgpio(budget_ci->budget.dev, 2, SAA7146_GPIO_OUTLO);
1439 /* Wait for everything to die */
1440 msleep(50);
1441 /* Pull it up out of Reset state */
1442 saa7146_setgpio(budget_ci->budget.dev, 2, SAA7146_GPIO_OUTHI);
1443 /* Wait for PLL to stabilize */
1444 msleep(250);
1445 /*
1446 * PLL state should be stable now. Ideally, we should check
1447 * for PLL LOCK status. But well, never mind!
1448 */
1449 budget_ci->budget.dvb_frontend = dvb_attach(stb0899_attach, &tt3200_config, &budget_ci->budget.i2c_adap);
1450 if (budget_ci->budget.dvb_frontend) {
1451 if (dvb_attach(stb6100_attach, budget_ci->budget.dvb_frontend, &tt3200_stb6100_config, &budget_ci->budget.i2c_adap)) {
1452 if (!dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, 0, 0)) {
1453 printk("%s: No LNBP21 found!\n", __FUNCTION__);
1454 dvb_frontend_detach(budget_ci->budget.dvb_frontend);
1455 budget_ci->budget.dvb_frontend = NULL;
1456 }
1457 } else {
1458 dvb_frontend_detach(budget_ci->budget.dvb_frontend);
1459 budget_ci->budget.dvb_frontend = NULL;
1460 }
1461 }
1462 break;
1463
1155 } 1464 }
1156 1465
1157 if (budget_ci->budget.dvb_frontend == NULL) { 1466 if (budget_ci->budget.dvb_frontend == NULL) {
@@ -1242,6 +1551,7 @@ MAKE_BUDGET_INFO(ttbt2, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT);
1242MAKE_BUDGET_INFO(ttbtci, "TT-Budget-T-CI PCI", BUDGET_TT); 1551MAKE_BUDGET_INFO(ttbtci, "TT-Budget-T-CI PCI", BUDGET_TT);
1243MAKE_BUDGET_INFO(ttbcci, "TT-Budget-C-CI PCI", BUDGET_TT); 1552MAKE_BUDGET_INFO(ttbcci, "TT-Budget-C-CI PCI", BUDGET_TT);
1244MAKE_BUDGET_INFO(ttc1501, "TT-Budget C-1501 PCI", BUDGET_TT); 1553MAKE_BUDGET_INFO(ttc1501, "TT-Budget C-1501 PCI", BUDGET_TT);
1554MAKE_BUDGET_INFO(tt3200, "TT-Budget S2-3200 PCI", BUDGET_TT);
1245 1555
1246static struct pci_device_id pci_tbl[] = { 1556static struct pci_device_id pci_tbl[] = {
1247 MAKE_EXTENSION_PCI(ttbci, 0x13c2, 0x100c), 1557 MAKE_EXTENSION_PCI(ttbci, 0x13c2, 0x100c),
@@ -1251,6 +1561,7 @@ static struct pci_device_id pci_tbl[] = {
1251 MAKE_EXTENSION_PCI(ttbtci, 0x13c2, 0x1012), 1561 MAKE_EXTENSION_PCI(ttbtci, 0x13c2, 0x1012),
1252 MAKE_EXTENSION_PCI(ttbs2, 0x13c2, 0x1017), 1562 MAKE_EXTENSION_PCI(ttbs2, 0x13c2, 0x1017),
1253 MAKE_EXTENSION_PCI(ttc1501, 0x13c2, 0x101a), 1563 MAKE_EXTENSION_PCI(ttc1501, 0x13c2, 0x101a),
1564 MAKE_EXTENSION_PCI(tt3200, 0x13c2, 0x1019),
1254 { 1565 {
1255 .vendor = 0, 1566 .vendor = 0,
1256 } 1567 }
diff --git a/drivers/media/dvb/ttpci/budget.h b/drivers/media/dvb/ttpci/budget.h
index 86435bf16260..3ad0c6789ba7 100644
--- a/drivers/media/dvb/ttpci/budget.h
+++ b/drivers/media/dvb/ttpci/budget.h
@@ -103,6 +103,7 @@ static struct saa7146_pci_extension_data x_var = { \
103#define BUDGET_CIN1200C_MK3 15 103#define BUDGET_CIN1200C_MK3 15
104#define BUDGET_KNC1C_MK3 16 104#define BUDGET_KNC1C_MK3 16
105#define BUDGET_KNC1CP_MK3 17 105#define BUDGET_KNC1CP_MK3 17
106#define BUDGET_KNC1S2 18
106 107
107#define BUDGET_VIDEO_PORTA 0 108#define BUDGET_VIDEO_PORTA 0
108#define BUDGET_VIDEO_PORTB 1 109#define BUDGET_VIDEO_PORTB 1
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index a5ca176a7b08..5474a22c1b22 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -1,5 +1,5 @@
1/* A driver for the D-Link DSB-R100 USB radio. The R100 plugs 1/* A driver for the D-Link DSB-R100 USB radio and Gemtek USB Radio 21.
2 into both the USB and an analog audio input, so this thing 2 The device plugs into both the USB and an analog audio input, so this thing
3 only deals with initialisation and frequency setting, the 3 only deals with initialisation and frequency setting, the
4 audio data has to be handled by a sound driver. 4 audio data has to be handled by a sound driver.
5 5
@@ -33,6 +33,10 @@
33 33
34 History: 34 History:
35 35
36 Version 0.44:
37 Add suspend/resume functions, fix unplug of device,
38 a lot of cleanups and fixes by Alexey Klimov <klimov.linux@gmail.com>
39
36 Version 0.43: 40 Version 0.43:
37 Oliver Neukum: avoided DMA coherency issue 41 Oliver Neukum: avoided DMA coherency issue
38 42
@@ -93,8 +97,8 @@
93 */ 97 */
94#include <linux/version.h> /* for KERNEL_VERSION MACRO */ 98#include <linux/version.h> /* for KERNEL_VERSION MACRO */
95 99
96#define DRIVER_VERSION "v0.41" 100#define DRIVER_VERSION "v0.44"
97#define RADIO_VERSION KERNEL_VERSION(0,4,1) 101#define RADIO_VERSION KERNEL_VERSION(0, 4, 4)
98 102
99static struct v4l2_queryctrl radio_qctrl[] = { 103static struct v4l2_queryctrl radio_qctrl[] = {
100 { 104 {
@@ -104,7 +108,27 @@ static struct v4l2_queryctrl radio_qctrl[] = {
104 .maximum = 1, 108 .maximum = 1,
105 .default_value = 1, 109 .default_value = 1,
106 .type = V4L2_CTRL_TYPE_BOOLEAN, 110 .type = V4L2_CTRL_TYPE_BOOLEAN,
107 } 111 },
112/* HINT: the disabled controls are only here to satify kradio and such apps */
113 { .id = V4L2_CID_AUDIO_VOLUME,
114 .flags = V4L2_CTRL_FLAG_DISABLED,
115 },
116 {
117 .id = V4L2_CID_AUDIO_BALANCE,
118 .flags = V4L2_CTRL_FLAG_DISABLED,
119 },
120 {
121 .id = V4L2_CID_AUDIO_BASS,
122 .flags = V4L2_CTRL_FLAG_DISABLED,
123 },
124 {
125 .id = V4L2_CID_AUDIO_TREBLE,
126 .flags = V4L2_CTRL_FLAG_DISABLED,
127 },
128 {
129 .id = V4L2_CID_AUDIO_LOUDNESS,
130 .flags = V4L2_CTRL_FLAG_DISABLED,
131 },
108}; 132};
109 133
110#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>" 134#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>"
@@ -125,12 +149,16 @@ devices, that would be 76 and 91. */
125#define FREQ_MAX 108.0 149#define FREQ_MAX 108.0
126#define FREQ_MUL 16000 150#define FREQ_MUL 16000
127 151
152#define videodev_to_radio(d) container_of(d, struct dsbr100_device, videodev)
128 153
129static int usb_dsbr100_probe(struct usb_interface *intf, 154static int usb_dsbr100_probe(struct usb_interface *intf,
130 const struct usb_device_id *id); 155 const struct usb_device_id *id);
131static void usb_dsbr100_disconnect(struct usb_interface *intf); 156static void usb_dsbr100_disconnect(struct usb_interface *intf);
132static int usb_dsbr100_open(struct inode *inode, struct file *file); 157static int usb_dsbr100_open(struct inode *inode, struct file *file);
133static int usb_dsbr100_close(struct inode *inode, struct file *file); 158static int usb_dsbr100_close(struct inode *inode, struct file *file);
159static int usb_dsbr100_suspend(struct usb_interface *intf,
160 pm_message_t message);
161static int usb_dsbr100_resume(struct usb_interface *intf);
134 162
135static int radio_nr = -1; 163static int radio_nr = -1;
136module_param(radio_nr, int, 0); 164module_param(radio_nr, int, 0);
@@ -138,8 +166,9 @@ module_param(radio_nr, int, 0);
138/* Data for one (physical) device */ 166/* Data for one (physical) device */
139struct dsbr100_device { 167struct dsbr100_device {
140 struct usb_device *usbdev; 168 struct usb_device *usbdev;
141 struct video_device *videodev; 169 struct video_device videodev;
142 u8 *transfer_buffer; 170 u8 *transfer_buffer;
171 struct mutex lock; /* buffer locking */
143 int curfreq; 172 int curfreq;
144 int stereo; 173 int stereo;
145 int users; 174 int users;
@@ -147,7 +176,6 @@ struct dsbr100_device {
147 int muted; 176 int muted;
148}; 177};
149 178
150
151static struct usb_device_id usb_dsbr100_device_table [] = { 179static struct usb_device_id usb_dsbr100_device_table [] = {
152 { USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) }, 180 { USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) },
153 { } /* Terminating entry */ 181 { } /* Terminating entry */
@@ -157,10 +185,14 @@ MODULE_DEVICE_TABLE (usb, usb_dsbr100_device_table);
157 185
158/* USB subsystem interface */ 186/* USB subsystem interface */
159static struct usb_driver usb_dsbr100_driver = { 187static struct usb_driver usb_dsbr100_driver = {
160 .name = "dsbr100", 188 .name = "dsbr100",
161 .probe = usb_dsbr100_probe, 189 .probe = usb_dsbr100_probe,
162 .disconnect = usb_dsbr100_disconnect, 190 .disconnect = usb_dsbr100_disconnect,
163 .id_table = usb_dsbr100_device_table, 191 .id_table = usb_dsbr100_device_table,
192 .suspend = usb_dsbr100_suspend,
193 .resume = usb_dsbr100_resume,
194 .reset_resume = usb_dsbr100_resume,
195 .supports_autosuspend = 0,
164}; 196};
165 197
166/* Low-level device interface begins here */ 198/* Low-level device interface begins here */
@@ -168,95 +200,190 @@ static struct usb_driver usb_dsbr100_driver = {
168/* switch on radio */ 200/* switch on radio */
169static int dsbr100_start(struct dsbr100_device *radio) 201static int dsbr100_start(struct dsbr100_device *radio)
170{ 202{
171 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 203 int retval;
172 USB_REQ_GET_STATUS, 204 int request;
173 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 205
174 0x00, 0xC7, radio->transfer_buffer, 8, 300) < 0 || 206 mutex_lock(&radio->lock);
175 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 207
176 DSB100_ONOFF, 208 retval = usb_control_msg(radio->usbdev,
177 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 209 usb_rcvctrlpipe(radio->usbdev, 0),
178 0x01, 0x00, radio->transfer_buffer, 8, 300) < 0) 210 USB_REQ_GET_STATUS,
179 return -1; 211 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
180 radio->muted=0; 212 0x00, 0xC7, radio->transfer_buffer, 8, 300);
213
214 if (retval < 0) {
215 request = USB_REQ_GET_STATUS;
216 goto usb_control_msg_failed;
217 }
218
219 retval = usb_control_msg(radio->usbdev,
220 usb_rcvctrlpipe(radio->usbdev, 0),
221 DSB100_ONOFF,
222 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
223 0x01, 0x00, radio->transfer_buffer, 8, 300);
224
225 if (retval < 0) {
226 request = DSB100_ONOFF;
227 goto usb_control_msg_failed;
228 }
229
230 radio->muted = 0;
231 mutex_unlock(&radio->lock);
181 return (radio->transfer_buffer)[0]; 232 return (radio->transfer_buffer)[0];
182}
183 233
234usb_control_msg_failed:
235 mutex_unlock(&radio->lock);
236 dev_err(&radio->usbdev->dev,
237 "%s - usb_control_msg returned %i, request %i\n",
238 __func__, retval, request);
239 return retval;
240
241}
184 242
185/* switch off radio */ 243/* switch off radio */
186static int dsbr100_stop(struct dsbr100_device *radio) 244static int dsbr100_stop(struct dsbr100_device *radio)
187{ 245{
188 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 246 int retval;
189 USB_REQ_GET_STATUS, 247 int request;
190 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 248
191 0x16, 0x1C, radio->transfer_buffer, 8, 300) < 0 || 249 mutex_lock(&radio->lock);
192 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 250
193 DSB100_ONOFF, 251 retval = usb_control_msg(radio->usbdev,
194 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 252 usb_rcvctrlpipe(radio->usbdev, 0),
195 0x00, 0x00, radio->transfer_buffer, 8, 300) < 0) 253 USB_REQ_GET_STATUS,
196 return -1; 254 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
197 radio->muted=1; 255 0x16, 0x1C, radio->transfer_buffer, 8, 300);
256
257 if (retval < 0) {
258 request = USB_REQ_GET_STATUS;
259 goto usb_control_msg_failed;
260 }
261
262 retval = usb_control_msg(radio->usbdev,
263 usb_rcvctrlpipe(radio->usbdev, 0),
264 DSB100_ONOFF,
265 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
266 0x00, 0x00, radio->transfer_buffer, 8, 300);
267
268 if (retval < 0) {
269 request = DSB100_ONOFF;
270 goto usb_control_msg_failed;
271 }
272
273 radio->muted = 1;
274 mutex_unlock(&radio->lock);
198 return (radio->transfer_buffer)[0]; 275 return (radio->transfer_buffer)[0];
276
277usb_control_msg_failed:
278 mutex_unlock(&radio->lock);
279 dev_err(&radio->usbdev->dev,
280 "%s - usb_control_msg returned %i, request %i\n",
281 __func__, retval, request);
282 return retval;
283
199} 284}
200 285
201/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */ 286/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
202static int dsbr100_setfreq(struct dsbr100_device *radio, int freq) 287static int dsbr100_setfreq(struct dsbr100_device *radio, int freq)
203{ 288{
289 int retval;
290 int request;
291
204 freq = (freq / 16 * 80) / 1000 + 856; 292 freq = (freq / 16 * 80) / 1000 + 856;
205 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 293 mutex_lock(&radio->lock);
206 DSB100_TUNE, 294
207 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 295 retval = usb_control_msg(radio->usbdev,
208 (freq >> 8) & 0x00ff, freq & 0xff, 296 usb_rcvctrlpipe(radio->usbdev, 0),
209 radio->transfer_buffer, 8, 300) < 0 || 297 DSB100_TUNE,
210 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 298 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
211 USB_REQ_GET_STATUS, 299 (freq >> 8) & 0x00ff, freq & 0xff,
212 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 300 radio->transfer_buffer, 8, 300);
213 0x96, 0xB7, radio->transfer_buffer, 8, 300) < 0 || 301
214 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 302 if (retval < 0) {
215 USB_REQ_GET_STATUS, 303 request = DSB100_TUNE;
216 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 304 goto usb_control_msg_failed;
217 0x00, 0x24, radio->transfer_buffer, 8, 300) < 0) {
218 radio->stereo = -1;
219 return -1;
220 } 305 }
306
307 retval = usb_control_msg(radio->usbdev,
308 usb_rcvctrlpipe(radio->usbdev, 0),
309 USB_REQ_GET_STATUS,
310 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
311 0x96, 0xB7, radio->transfer_buffer, 8, 300);
312
313 if (retval < 0) {
314 request = USB_REQ_GET_STATUS;
315 goto usb_control_msg_failed;
316 }
317
318 retval = usb_control_msg(radio->usbdev,
319 usb_rcvctrlpipe(radio->usbdev, 0),
320 USB_REQ_GET_STATUS,
321 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
322 0x00, 0x24, radio->transfer_buffer, 8, 300);
323
324 if (retval < 0) {
325 request = USB_REQ_GET_STATUS;
326 goto usb_control_msg_failed;
327 }
328
221 radio->stereo = !((radio->transfer_buffer)[0] & 0x01); 329 radio->stereo = !((radio->transfer_buffer)[0] & 0x01);
330 mutex_unlock(&radio->lock);
222 return (radio->transfer_buffer)[0]; 331 return (radio->transfer_buffer)[0];
332
333usb_control_msg_failed:
334 radio->stereo = -1;
335 mutex_unlock(&radio->lock);
336 dev_err(&radio->usbdev->dev,
337 "%s - usb_control_msg returned %i, request %i\n",
338 __func__, retval, request);
339 return retval;
223} 340}
224 341
225/* return the device status. This is, in effect, just whether it 342/* return the device status. This is, in effect, just whether it
226sees a stereo signal or not. Pity. */ 343sees a stereo signal or not. Pity. */
227static void dsbr100_getstat(struct dsbr100_device *radio) 344static void dsbr100_getstat(struct dsbr100_device *radio)
228{ 345{
229 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 346 int retval;
347
348 mutex_lock(&radio->lock);
349
350 retval = usb_control_msg(radio->usbdev,
351 usb_rcvctrlpipe(radio->usbdev, 0),
230 USB_REQ_GET_STATUS, 352 USB_REQ_GET_STATUS,
231 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 353 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
232 0x00 , 0x24, radio->transfer_buffer, 8, 300) < 0) 354 0x00 , 0x24, radio->transfer_buffer, 8, 300);
355
356 if (retval < 0) {
233 radio->stereo = -1; 357 radio->stereo = -1;
234 else 358 dev_err(&radio->usbdev->dev,
359 "%s - usb_control_msg returned %i, request %i\n",
360 __func__, retval, USB_REQ_GET_STATUS);
361 } else {
235 radio->stereo = !(radio->transfer_buffer[0] & 0x01); 362 radio->stereo = !(radio->transfer_buffer[0] & 0x01);
236} 363 }
237 364
365 mutex_unlock(&radio->lock);
366}
238 367
239/* USB subsystem interface begins here */ 368/* USB subsystem interface begins here */
240 369
241/* handle unplugging of the device, release data structures 370/*
242if nothing keeps us from doing it. If something is still 371 * Handle unplugging of the device.
243keeping us busy, the release callback of v4l will take care 372 * We call video_unregister_device in any case.
244of releasing it. */ 373 * The last function called in this procedure is
374 * usb_dsbr100_video_device_release
375 */
245static void usb_dsbr100_disconnect(struct usb_interface *intf) 376static void usb_dsbr100_disconnect(struct usb_interface *intf)
246{ 377{
247 struct dsbr100_device *radio = usb_get_intfdata(intf); 378 struct dsbr100_device *radio = usb_get_intfdata(intf);
248 379
249 usb_set_intfdata (intf, NULL); 380 usb_set_intfdata (intf, NULL);
250 if (radio) { 381
251 video_unregister_device(radio->videodev); 382 mutex_lock(&radio->lock);
252 radio->videodev = NULL; 383 radio->removed = 1;
253 if (radio->users) { 384 mutex_unlock(&radio->lock);
254 kfree(radio->transfer_buffer); 385
255 kfree(radio); 386 video_unregister_device(&radio->videodev);
256 } else {
257 radio->removed = 1;
258 }
259 }
260} 387}
261 388
262 389
@@ -276,6 +403,10 @@ static int vidioc_g_tuner(struct file *file, void *priv,
276{ 403{
277 struct dsbr100_device *radio = video_drvdata(file); 404 struct dsbr100_device *radio = video_drvdata(file);
278 405
406 /* safety check */
407 if (radio->removed)
408 return -EIO;
409
279 if (v->index > 0) 410 if (v->index > 0)
280 return -EINVAL; 411 return -EINVAL;
281 412
@@ -297,6 +428,12 @@ static int vidioc_g_tuner(struct file *file, void *priv,
297static int vidioc_s_tuner(struct file *file, void *priv, 428static int vidioc_s_tuner(struct file *file, void *priv,
298 struct v4l2_tuner *v) 429 struct v4l2_tuner *v)
299{ 430{
431 struct dsbr100_device *radio = video_drvdata(file);
432
433 /* safety check */
434 if (radio->removed)
435 return -EIO;
436
300 if (v->index > 0) 437 if (v->index > 0)
301 return -EINVAL; 438 return -EINVAL;
302 439
@@ -307,9 +444,15 @@ static int vidioc_s_frequency(struct file *file, void *priv,
307 struct v4l2_frequency *f) 444 struct v4l2_frequency *f)
308{ 445{
309 struct dsbr100_device *radio = video_drvdata(file); 446 struct dsbr100_device *radio = video_drvdata(file);
447 int retval;
448
449 /* safety check */
450 if (radio->removed)
451 return -EIO;
310 452
311 radio->curfreq = f->frequency; 453 radio->curfreq = f->frequency;
312 if (dsbr100_setfreq(radio, radio->curfreq) == -1) 454 retval = dsbr100_setfreq(radio, radio->curfreq);
455 if (retval < 0)
313 dev_warn(&radio->usbdev->dev, "Set frequency failed\n"); 456 dev_warn(&radio->usbdev->dev, "Set frequency failed\n");
314 return 0; 457 return 0;
315} 458}
@@ -319,6 +462,10 @@ static int vidioc_g_frequency(struct file *file, void *priv,
319{ 462{
320 struct dsbr100_device *radio = video_drvdata(file); 463 struct dsbr100_device *radio = video_drvdata(file);
321 464
465 /* safety check */
466 if (radio->removed)
467 return -EIO;
468
322 f->type = V4L2_TUNER_RADIO; 469 f->type = V4L2_TUNER_RADIO;
323 f->frequency = radio->curfreq; 470 f->frequency = radio->curfreq;
324 return 0; 471 return 0;
@@ -343,6 +490,10 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
343{ 490{
344 struct dsbr100_device *radio = video_drvdata(file); 491 struct dsbr100_device *radio = video_drvdata(file);
345 492
493 /* safety check */
494 if (radio->removed)
495 return -EIO;
496
346 switch (ctrl->id) { 497 switch (ctrl->id) {
347 case V4L2_CID_AUDIO_MUTE: 498 case V4L2_CID_AUDIO_MUTE:
348 ctrl->value = radio->muted; 499 ctrl->value = radio->muted;
@@ -355,17 +506,24 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
355 struct v4l2_control *ctrl) 506 struct v4l2_control *ctrl)
356{ 507{
357 struct dsbr100_device *radio = video_drvdata(file); 508 struct dsbr100_device *radio = video_drvdata(file);
509 int retval;
510
511 /* safety check */
512 if (radio->removed)
513 return -EIO;
358 514
359 switch (ctrl->id) { 515 switch (ctrl->id) {
360 case V4L2_CID_AUDIO_MUTE: 516 case V4L2_CID_AUDIO_MUTE:
361 if (ctrl->value) { 517 if (ctrl->value) {
362 if (dsbr100_stop(radio) == -1) { 518 retval = dsbr100_stop(radio);
519 if (retval < 0) {
363 dev_warn(&radio->usbdev->dev, 520 dev_warn(&radio->usbdev->dev,
364 "Radio did not respond properly\n"); 521 "Radio did not respond properly\n");
365 return -EBUSY; 522 return -EBUSY;
366 } 523 }
367 } else { 524 } else {
368 if (dsbr100_start(radio) == -1) { 525 retval = dsbr100_start(radio);
526 if (retval < 0) {
369 dev_warn(&radio->usbdev->dev, 527 dev_warn(&radio->usbdev->dev,
370 "Radio did not respond properly\n"); 528 "Radio did not respond properly\n");
371 return -EBUSY; 529 return -EBUSY;
@@ -417,7 +575,8 @@ static int usb_dsbr100_open(struct inode *inode, struct file *file)
417 radio->users = 1; 575 radio->users = 1;
418 radio->muted = 1; 576 radio->muted = 1;
419 577
420 if (dsbr100_start(radio) < 0) { 578 retval = dsbr100_start(radio);
579 if (retval < 0) {
421 dev_warn(&radio->usbdev->dev, 580 dev_warn(&radio->usbdev->dev,
422 "Radio did not start up properly\n"); 581 "Radio did not start up properly\n");
423 radio->users = 0; 582 radio->users = 0;
@@ -426,9 +585,9 @@ static int usb_dsbr100_open(struct inode *inode, struct file *file)
426 } 585 }
427 586
428 retval = dsbr100_setfreq(radio, radio->curfreq); 587 retval = dsbr100_setfreq(radio, radio->curfreq);
429 588 if (retval < 0)
430 if (retval == -1) 589 dev_warn(&radio->usbdev->dev,
431 printk(KERN_WARNING KBUILD_MODNAME ": Set frequency failed\n"); 590 "set frequency failed\n");
432 591
433 unlock_kernel(); 592 unlock_kernel();
434 return 0; 593 return 0;
@@ -437,17 +596,62 @@ static int usb_dsbr100_open(struct inode *inode, struct file *file)
437static int usb_dsbr100_close(struct inode *inode, struct file *file) 596static int usb_dsbr100_close(struct inode *inode, struct file *file)
438{ 597{
439 struct dsbr100_device *radio = video_drvdata(file); 598 struct dsbr100_device *radio = video_drvdata(file);
599 int retval;
440 600
441 if (!radio) 601 if (!radio)
442 return -ENODEV; 602 return -ENODEV;
603
443 radio->users = 0; 604 radio->users = 0;
444 if (radio->removed) { 605 if (!radio->removed) {
445 kfree(radio->transfer_buffer); 606 retval = dsbr100_stop(radio);
446 kfree(radio); 607 if (retval < 0) {
608 dev_warn(&radio->usbdev->dev,
609 "dsbr100_stop failed\n");
610 }
611
447 } 612 }
448 return 0; 613 return 0;
449} 614}
450 615
616/* Suspend device - stop device. */
617static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message)
618{
619 struct dsbr100_device *radio = usb_get_intfdata(intf);
620 int retval;
621
622 retval = dsbr100_stop(radio);
623 if (retval < 0)
624 dev_warn(&intf->dev, "dsbr100_stop failed\n");
625
626 dev_info(&intf->dev, "going into suspend..\n");
627
628 return 0;
629}
630
631/* Resume device - start device. */
632static int usb_dsbr100_resume(struct usb_interface *intf)
633{
634 struct dsbr100_device *radio = usb_get_intfdata(intf);
635 int retval;
636
637 retval = dsbr100_start(radio);
638 if (retval < 0)
639 dev_warn(&intf->dev, "dsbr100_start failed\n");
640
641 dev_info(&intf->dev, "coming out of suspend..\n");
642
643 return 0;
644}
645
646/* free data structures */
647static void usb_dsbr100_video_device_release(struct video_device *videodev)
648{
649 struct dsbr100_device *radio = videodev_to_radio(videodev);
650
651 kfree(radio->transfer_buffer);
652 kfree(radio);
653}
654
451/* File system interface */ 655/* File system interface */
452static const struct file_operations usb_dsbr100_fops = { 656static const struct file_operations usb_dsbr100_fops = {
453 .owner = THIS_MODULE, 657 .owner = THIS_MODULE,
@@ -476,19 +680,19 @@ static const struct v4l2_ioctl_ops usb_dsbr100_ioctl_ops = {
476}; 680};
477 681
478/* V4L2 interface */ 682/* V4L2 interface */
479static struct video_device dsbr100_videodev_template = { 683static struct video_device dsbr100_videodev_data = {
480 .name = "D-Link DSB-R 100", 684 .name = "D-Link DSB-R 100",
481 .fops = &usb_dsbr100_fops, 685 .fops = &usb_dsbr100_fops,
482 .ioctl_ops = &usb_dsbr100_ioctl_ops, 686 .ioctl_ops = &usb_dsbr100_ioctl_ops,
483 .release = video_device_release, 687 .release = usb_dsbr100_video_device_release,
484}; 688};
485 689
486/* check if the device is present and register with v4l and 690/* check if the device is present and register with v4l and usb if it is */
487usb if it is */
488static int usb_dsbr100_probe(struct usb_interface *intf, 691static int usb_dsbr100_probe(struct usb_interface *intf,
489 const struct usb_device_id *id) 692 const struct usb_device_id *id)
490{ 693{
491 struct dsbr100_device *radio; 694 struct dsbr100_device *radio;
695 int retval;
492 696
493 radio = kmalloc(sizeof(struct dsbr100_device), GFP_KERNEL); 697 radio = kmalloc(sizeof(struct dsbr100_device), GFP_KERNEL);
494 698
@@ -501,23 +705,18 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
501 kfree(radio); 705 kfree(radio);
502 return -ENOMEM; 706 return -ENOMEM;
503 } 707 }
504 radio->videodev = video_device_alloc();
505 708
506 if (!(radio->videodev)) { 709 mutex_init(&radio->lock);
507 kfree(radio->transfer_buffer); 710 radio->videodev = dsbr100_videodev_data;
508 kfree(radio); 711
509 return -ENOMEM;
510 }
511 memcpy(radio->videodev, &dsbr100_videodev_template,
512 sizeof(dsbr100_videodev_template));
513 radio->removed = 0; 712 radio->removed = 0;
514 radio->users = 0; 713 radio->users = 0;
515 radio->usbdev = interface_to_usbdev(intf); 714 radio->usbdev = interface_to_usbdev(intf);
516 radio->curfreq = FREQ_MIN * FREQ_MUL; 715 radio->curfreq = FREQ_MIN * FREQ_MUL;
517 video_set_drvdata(radio->videodev, radio); 716 video_set_drvdata(&radio->videodev, radio);
518 if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr) < 0) { 717 retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr);
519 dev_warn(&intf->dev, "Could not register video device\n"); 718 if (retval < 0) {
520 video_device_release(radio->videodev); 719 dev_err(&intf->dev, "couldn't register video device\n");
521 kfree(radio->transfer_buffer); 720 kfree(radio->transfer_buffer);
522 kfree(radio); 721 kfree(radio);
523 return -EIO; 722 return -EIO;
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 9305e958fc66..dd6d3dfcd7d2 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -1,7 +1,7 @@
1/* radiotrack (radioreveal) driver for Linux radio support 1/* radiotrack (radioreveal) driver for Linux radio support
2 * (c) 1997 M. Kirkwood 2 * (c) 1997 M. Kirkwood
3 * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> 3 * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
4 * Converted to new API by Alan Cox <Alan.Cox@linux.org> 4 * Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> 5 * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
6 * 6 *
7 * History: 7 * History:
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 0490a1fa999d..bfd37f38b9ab 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -23,7 +23,7 @@
23 * 2002-01-17 Adam Belay <ambx1@neo.rr.com> 23 * 2002-01-17 Adam Belay <ambx1@neo.rr.com>
24 * Updated to latest pnp code 24 * Updated to latest pnp code
25 * 25 *
26 * 2003-01-31 Alan Cox <alan@redhat.com> 26 * 2003-01-31 Alan Cox <alan@lxorguk.ukuu.org.uk>
27 * Cleaned up locking, delay code, general odds and ends 27 * Cleaned up locking, delay code, general odds and ends
28 * 28 *
29 * 2006-07-30 Hans J. Koch <koch@hjk-az.de> 29 * 2006-07-30 Hans J. Koch <koch@hjk-az.de>
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index d131a5d38128..e13118da307b 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -8,7 +8,7 @@
8 * RadioTrack II driver for Linux radio support (C) 1998 Ben Pfaff 8 * RadioTrack II driver for Linux radio support (C) 1998 Ben Pfaff
9 * 9 *
10 * Based on RadioTrack I/RadioReveal (C) 1997 M. Kirkwood 10 * Based on RadioTrack I/RadioReveal (C) 1997 M. Kirkwood
11 * Converted to new API by Alan Cox <Alan.Cox@linux.org> 11 * Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
12 * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> 12 * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
13 * 13 *
14 * TODO: Allow for more than one of these foolish entities :-) 14 * TODO: Allow for more than one of these foolish entities :-)
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 256cbeffdcb6..e730eddb2bb5 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -72,6 +72,11 @@ MODULE_LICENSE("GPL");
72#define USB_AMRADIO_VENDOR 0x07ca 72#define USB_AMRADIO_VENDOR 0x07ca
73#define USB_AMRADIO_PRODUCT 0xb800 73#define USB_AMRADIO_PRODUCT 0xb800
74 74
75/* dev_warn macro with driver name */
76#define MR800_DRIVER_NAME "radio-mr800"
77#define amradio_dev_warn(dev, fmt, arg...) \
78 dev_warn(dev, MR800_DRIVER_NAME " - " fmt, ##arg)
79
75/* Probably USB_TIMEOUT should be modified in module parameter */ 80/* Probably USB_TIMEOUT should be modified in module parameter */
76#define BUFFER_LENGTH 8 81#define BUFFER_LENGTH 8
77#define USB_TIMEOUT 500 82#define USB_TIMEOUT 500
@@ -154,14 +159,14 @@ MODULE_DEVICE_TABLE(usb, usb_amradio_device_table);
154 159
155/* USB subsystem interface */ 160/* USB subsystem interface */
156static struct usb_driver usb_amradio_driver = { 161static struct usb_driver usb_amradio_driver = {
157 .name = "radio-mr800", 162 .name = MR800_DRIVER_NAME,
158 .probe = usb_amradio_probe, 163 .probe = usb_amradio_probe,
159 .disconnect = usb_amradio_disconnect, 164 .disconnect = usb_amradio_disconnect,
160 .suspend = usb_amradio_suspend, 165 .suspend = usb_amradio_suspend,
161 .resume = usb_amradio_resume, 166 .resume = usb_amradio_resume,
162 .reset_resume = usb_amradio_resume, 167 .reset_resume = usb_amradio_resume,
163 .id_table = usb_amradio_device_table, 168 .id_table = usb_amradio_device_table,
164 .supports_autosuspend = 1, 169 .supports_autosuspend = 0,
165}; 170};
166 171
167/* switch on radio. Send 8 bytes to device. */ 172/* switch on radio. Send 8 bytes to device. */
@@ -202,6 +207,10 @@ static int amradio_stop(struct amradio_device *radio)
202 int retval; 207 int retval;
203 int size; 208 int size;
204 209
210 /* safety check */
211 if (radio->removed)
212 return -EIO;
213
205 mutex_lock(&radio->lock); 214 mutex_lock(&radio->lock);
206 215
207 radio->buffer[0] = 0x00; 216 radio->buffer[0] = 0x00;
@@ -235,6 +244,10 @@ static int amradio_setfreq(struct amradio_device *radio, int freq)
235 int size; 244 int size;
236 unsigned short freq_send = 0x13 + (freq >> 3) / 25; 245 unsigned short freq_send = 0x13 + (freq >> 3) / 25;
237 246
247 /* safety check */
248 if (radio->removed)
249 return -EIO;
250
238 mutex_lock(&radio->lock); 251 mutex_lock(&radio->lock);
239 252
240 radio->buffer[0] = 0x00; 253 radio->buffer[0] = 0x00;
@@ -288,18 +301,12 @@ static void usb_amradio_disconnect(struct usb_interface *intf)
288{ 301{
289 struct amradio_device *radio = usb_get_intfdata(intf); 302 struct amradio_device *radio = usb_get_intfdata(intf);
290 303
291 usb_set_intfdata(intf, NULL); 304 mutex_lock(&radio->lock);
305 radio->removed = 1;
306 mutex_unlock(&radio->lock);
292 307
293 if (radio) { 308 usb_set_intfdata(intf, NULL);
294 video_unregister_device(radio->videodev); 309 video_unregister_device(radio->videodev);
295 radio->videodev = NULL;
296 if (radio->users) {
297 kfree(radio->buffer);
298 kfree(radio);
299 } else {
300 radio->removed = 1;
301 }
302 }
303} 310}
304 311
305/* vidioc_querycap - query device capabilities */ 312/* vidioc_querycap - query device capabilities */
@@ -320,6 +327,10 @@ static int vidioc_g_tuner(struct file *file, void *priv,
320{ 327{
321 struct amradio_device *radio = video_get_drvdata(video_devdata(file)); 328 struct amradio_device *radio = video_get_drvdata(video_devdata(file));
322 329
330 /* safety check */
331 if (radio->removed)
332 return -EIO;
333
323 if (v->index > 0) 334 if (v->index > 0)
324 return -EINVAL; 335 return -EINVAL;
325 336
@@ -346,6 +357,12 @@ static int vidioc_g_tuner(struct file *file, void *priv,
346static int vidioc_s_tuner(struct file *file, void *priv, 357static int vidioc_s_tuner(struct file *file, void *priv,
347 struct v4l2_tuner *v) 358 struct v4l2_tuner *v)
348{ 359{
360 struct amradio_device *radio = video_get_drvdata(video_devdata(file));
361
362 /* safety check */
363 if (radio->removed)
364 return -EIO;
365
349 if (v->index > 0) 366 if (v->index > 0)
350 return -EINVAL; 367 return -EINVAL;
351 return 0; 368 return 0;
@@ -357,9 +374,14 @@ static int vidioc_s_frequency(struct file *file, void *priv,
357{ 374{
358 struct amradio_device *radio = video_get_drvdata(video_devdata(file)); 375 struct amradio_device *radio = video_get_drvdata(video_devdata(file));
359 376
377 /* safety check */
378 if (radio->removed)
379 return -EIO;
380
360 radio->curfreq = f->frequency; 381 radio->curfreq = f->frequency;
361 if (amradio_setfreq(radio, radio->curfreq) < 0) 382 if (amradio_setfreq(radio, radio->curfreq) < 0)
362 warn("Set frequency failed"); 383 amradio_dev_warn(&radio->videodev->dev,
384 "set frequency failed\n");
363 return 0; 385 return 0;
364} 386}
365 387
@@ -369,6 +391,10 @@ static int vidioc_g_frequency(struct file *file, void *priv,
369{ 391{
370 struct amradio_device *radio = video_get_drvdata(video_devdata(file)); 392 struct amradio_device *radio = video_get_drvdata(video_devdata(file));
371 393
394 /* safety check */
395 if (radio->removed)
396 return -EIO;
397
372 f->type = V4L2_TUNER_RADIO; 398 f->type = V4L2_TUNER_RADIO;
373 f->frequency = radio->curfreq; 399 f->frequency = radio->curfreq;
374 return 0; 400 return 0;
@@ -382,8 +408,7 @@ static int vidioc_queryctrl(struct file *file, void *priv,
382 408
383 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) { 409 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
384 if (qc->id && qc->id == radio_qctrl[i].id) { 410 if (qc->id && qc->id == radio_qctrl[i].id) {
385 memcpy(qc, &(radio_qctrl[i]), 411 memcpy(qc, &(radio_qctrl[i]), sizeof(*qc));
386 sizeof(*qc));
387 return 0; 412 return 0;
388 } 413 }
389 } 414 }
@@ -396,6 +421,10 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
396{ 421{
397 struct amradio_device *radio = video_get_drvdata(video_devdata(file)); 422 struct amradio_device *radio = video_get_drvdata(video_devdata(file));
398 423
424 /* safety check */
425 if (radio->removed)
426 return -EIO;
427
399 switch (ctrl->id) { 428 switch (ctrl->id) {
400 case V4L2_CID_AUDIO_MUTE: 429 case V4L2_CID_AUDIO_MUTE:
401 ctrl->value = radio->muted; 430 ctrl->value = radio->muted;
@@ -410,16 +439,22 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
410{ 439{
411 struct amradio_device *radio = video_get_drvdata(video_devdata(file)); 440 struct amradio_device *radio = video_get_drvdata(video_devdata(file));
412 441
442 /* safety check */
443 if (radio->removed)
444 return -EIO;
445
413 switch (ctrl->id) { 446 switch (ctrl->id) {
414 case V4L2_CID_AUDIO_MUTE: 447 case V4L2_CID_AUDIO_MUTE:
415 if (ctrl->value) { 448 if (ctrl->value) {
416 if (amradio_stop(radio) < 0) { 449 if (amradio_stop(radio) < 0) {
417 warn("amradio_stop() failed"); 450 amradio_dev_warn(&radio->videodev->dev,
451 "amradio_stop failed\n");
418 return -1; 452 return -1;
419 } 453 }
420 } else { 454 } else {
421 if (amradio_start(radio) < 0) { 455 if (amradio_start(radio) < 0) {
422 warn("amradio_start() failed"); 456 amradio_dev_warn(&radio->videodev->dev,
457 "amradio_start failed\n");
423 return -1; 458 return -1;
424 } 459 }
425 } 460 }
@@ -475,30 +510,38 @@ static int usb_amradio_open(struct inode *inode, struct file *file)
475 radio->muted = 1; 510 radio->muted = 1;
476 511
477 if (amradio_start(radio) < 0) { 512 if (amradio_start(radio) < 0) {
478 warn("Radio did not start up properly"); 513 amradio_dev_warn(&radio->videodev->dev,
514 "radio did not start up properly\n");
479 radio->users = 0; 515 radio->users = 0;
480 unlock_kernel(); 516 unlock_kernel();
481 return -EIO; 517 return -EIO;
482 } 518 }
483 if (amradio_setfreq(radio, radio->curfreq) < 0) 519 if (amradio_setfreq(radio, radio->curfreq) < 0)
484 warn("Set frequency failed"); 520 amradio_dev_warn(&radio->videodev->dev,
521 "set frequency failed\n");
485 522
486 unlock_kernel(); 523 unlock_kernel();
487 return 0; 524 return 0;
488} 525}
489 526
490/*close device - free driver structures */ 527/*close device */
491static int usb_amradio_close(struct inode *inode, struct file *file) 528static int usb_amradio_close(struct inode *inode, struct file *file)
492{ 529{
493 struct amradio_device *radio = video_get_drvdata(video_devdata(file)); 530 struct amradio_device *radio = video_get_drvdata(video_devdata(file));
531 int retval;
494 532
495 if (!radio) 533 if (!radio)
496 return -ENODEV; 534 return -ENODEV;
535
497 radio->users = 0; 536 radio->users = 0;
498 if (radio->removed) { 537
499 kfree(radio->buffer); 538 if (!radio->removed) {
500 kfree(radio); 539 retval = amradio_stop(radio);
540 if (retval < 0)
541 amradio_dev_warn(&radio->videodev->dev,
542 "amradio_stop failed\n");
501 } 543 }
544
502 return 0; 545 return 0;
503} 546}
504 547
@@ -508,9 +551,9 @@ static int usb_amradio_suspend(struct usb_interface *intf, pm_message_t message)
508 struct amradio_device *radio = usb_get_intfdata(intf); 551 struct amradio_device *radio = usb_get_intfdata(intf);
509 552
510 if (amradio_stop(radio) < 0) 553 if (amradio_stop(radio) < 0)
511 warn("amradio_stop() failed"); 554 dev_warn(&intf->dev, "amradio_stop failed\n");
512 555
513 info("radio-mr800: Going into suspend.."); 556 dev_info(&intf->dev, "going into suspend..\n");
514 557
515 return 0; 558 return 0;
516} 559}
@@ -521,9 +564,9 @@ static int usb_amradio_resume(struct usb_interface *intf)
521 struct amradio_device *radio = usb_get_intfdata(intf); 564 struct amradio_device *radio = usb_get_intfdata(intf);
522 565
523 if (amradio_start(radio) < 0) 566 if (amradio_start(radio) < 0)
524 warn("amradio_start() failed"); 567 dev_warn(&intf->dev, "amradio_start failed\n");
525 568
526 info("radio-mr800: Coming out of suspend.."); 569 dev_info(&intf->dev, "coming out of suspend..\n");
527 570
528 return 0; 571 return 0;
529} 572}
@@ -555,12 +598,24 @@ static const struct v4l2_ioctl_ops usb_amradio_ioctl_ops = {
555 .vidioc_s_input = vidioc_s_input, 598 .vidioc_s_input = vidioc_s_input,
556}; 599};
557 600
601static void usb_amradio_device_release(struct video_device *videodev)
602{
603 struct amradio_device *radio = video_get_drvdata(videodev);
604
605 /* we call v4l to free radio->videodev */
606 video_device_release(videodev);
607
608 /* free rest memory */
609 kfree(radio->buffer);
610 kfree(radio);
611}
612
558/* V4L2 interface */ 613/* V4L2 interface */
559static struct video_device amradio_videodev_template = { 614static struct video_device amradio_videodev_template = {
560 .name = "AverMedia MR 800 USB FM Radio", 615 .name = "AverMedia MR 800 USB FM Radio",
561 .fops = &usb_amradio_fops, 616 .fops = &usb_amradio_fops,
562 .ioctl_ops = &usb_amradio_ioctl_ops, 617 .ioctl_ops = &usb_amradio_ioctl_ops,
563 .release = video_device_release, 618 .release = usb_amradio_device_release,
564}; 619};
565 620
566/* check if the device is present and register with v4l and 621/* check if the device is present and register with v4l and
@@ -602,7 +657,7 @@ static int usb_amradio_probe(struct usb_interface *intf,
602 657
603 video_set_drvdata(radio->videodev, radio); 658 video_set_drvdata(radio->videodev, radio);
604 if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr)) { 659 if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr)) {
605 warn("Could not register video device"); 660 dev_warn(&intf->dev, "could not register video device\n");
606 video_device_release(radio->videodev); 661 video_device_release(radio->videodev);
607 kfree(radio->buffer); 662 kfree(radio->buffer);
608 kfree(radio); 663 kfree(radio);
@@ -617,9 +672,13 @@ static int __init amradio_init(void)
617{ 672{
618 int retval = usb_register(&usb_amradio_driver); 673 int retval = usb_register(&usb_amradio_driver);
619 674
620 info(DRIVER_VERSION " " DRIVER_DESC); 675 pr_info(KBUILD_MODNAME
676 ": version " DRIVER_VERSION " " DRIVER_DESC "\n");
677
621 if (retval) 678 if (retval)
622 err("usb_register failed. Error number %d", retval); 679 pr_err(KBUILD_MODNAME
680 ": usb_register failed. Error number %d\n", retval);
681
623 return retval; 682 return retval;
624} 683}
625 684
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index a67079777419..7704f243b6f0 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -1,7 +1,7 @@
1/* RadioTrack II driver for Linux radio support (C) 1998 Ben Pfaff 1/* RadioTrack II driver for Linux radio support (C) 1998 Ben Pfaff
2 * 2 *
3 * Based on RadioTrack I/RadioReveal (C) 1997 M. Kirkwood 3 * Based on RadioTrack I/RadioReveal (C) 1997 M. Kirkwood
4 * Converted to new API by Alan Cox <Alan.Cox@linux.org> 4 * Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> 5 * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
6 * 6 *
7 * TODO: Allow for more than one of these foolish entities :-) 7 * TODO: Allow for more than one of these foolish entities :-)
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 329c90bddadd..834d43651c70 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -3,7 +3,7 @@
3 * (c) 1997 M. Kirkwood 3 * (c) 1997 M. Kirkwood
4 * (c) 1998 Petr Vandrovec, vandrove@vc.cvut.cz 4 * (c) 1998 Petr Vandrovec, vandrove@vc.cvut.cz
5 * 5 *
6 * Fitted to new interface by Alan Cox <alan.cox@linux.org> 6 * Fitted to new interface by Alan Cox <alan@lxorguk.ukuu.org.uk>
7 * Made working and cleaned up functions <mikael.hedin@irf.se> 7 * Made working and cleaned up functions <mikael.hedin@irf.se>
8 * Support for ISAPnP by Ladislav Michl <ladis@psi.cz> 8 * Support for ISAPnP by Ladislav Michl <ladis@psi.cz>
9 * 9 *
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 057fd7e160c4..19cf3b8f67c4 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -184,7 +184,7 @@ config VIDEO_MSP3400
184 184
185config VIDEO_CS5345 185config VIDEO_CS5345
186 tristate "Cirrus Logic CS5345 audio ADC" 186 tristate "Cirrus Logic CS5345 audio ADC"
187 depends on VIDEO_V4L2 && I2C && EXPERIMENTAL 187 depends on VIDEO_V4L2 && I2C
188 ---help--- 188 ---help---
189 Support for the Cirrus Logic CS5345 24-bit, 192 kHz 189 Support for the Cirrus Logic CS5345 24-bit, 192 kHz
190 stereo A/D converter. 190 stereo A/D converter.
@@ -204,7 +204,7 @@ config VIDEO_CS53L32A
204 204
205config VIDEO_M52790 205config VIDEO_M52790
206 tristate "Mitsubishi M52790 A/V switch" 206 tristate "Mitsubishi M52790 A/V switch"
207 depends on VIDEO_V4L2 && I2C && EXPERIMENTAL 207 depends on VIDEO_V4L2 && I2C
208 ---help--- 208 ---help---
209 Support for the Mitsubishi M52790 A/V switch. 209 Support for the Mitsubishi M52790 A/V switch.
210 210
@@ -242,7 +242,7 @@ config VIDEO_WM8739
242 242
243config VIDEO_VP27SMPX 243config VIDEO_VP27SMPX
244 tristate "Panasonic VP27s internal MPX" 244 tristate "Panasonic VP27s internal MPX"
245 depends on VIDEO_V4L2 && I2C && EXPERIMENTAL 245 depends on VIDEO_V4L2 && I2C
246 ---help--- 246 ---help---
247 Support for the internal MPX of the Panasonic VP27s tuner. 247 Support for the internal MPX of the Panasonic VP27s tuner.
248 248
@@ -361,6 +361,17 @@ config VIDEO_SAA7191
361 To compile this driver as a module, choose M here: the 361 To compile this driver as a module, choose M here: the
362 module will be called saa7191. 362 module will be called saa7191.
363 363
364config VIDEO_TVP514X
365 tristate "Texas Instruments TVP514x video decoder"
366 depends on VIDEO_V4L2 && I2C
367 ---help---
368 This is a Video4Linux2 sensor-level driver for the TI TVP5146/47
369 decoder. It is currently working with the TI OMAP3 camera
370 controller.
371
372 To compile this driver as a module, choose M here: the
373 module will be called tvp514x.
374
364config VIDEO_TVP5150 375config VIDEO_TVP5150
365 tristate "Texas Instruments TVP5150 video decoder" 376 tristate "Texas Instruments TVP5150 video decoder"
366 depends on VIDEO_V4L2 && I2C 377 depends on VIDEO_V4L2 && I2C
@@ -387,7 +398,7 @@ comment "MPEG video encoders"
387 398
388config VIDEO_CX2341X 399config VIDEO_CX2341X
389 tristate "Conexant CX2341x MPEG encoders" 400 tristate "Conexant CX2341x MPEG encoders"
390 depends on VIDEO_V4L2 && EXPERIMENTAL && VIDEO_V4L2_COMMON 401 depends on VIDEO_V4L2 && VIDEO_V4L2_COMMON
391 ---help--- 402 ---help---
392 Support for the Conexant CX23416 MPEG encoders 403 Support for the Conexant CX23416 MPEG encoders
393 and CX23415 MPEG encoder/decoders. 404 and CX23415 MPEG encoder/decoders.
@@ -725,10 +736,16 @@ config MT9M001_PCA9536_SWITCH
725 extender to switch between 8 and 10 bit datawidth modes 736 extender to switch between 8 and 10 bit datawidth modes
726 737
727config SOC_CAMERA_MT9M111 738config SOC_CAMERA_MT9M111
728 tristate "mt9m111 support" 739 tristate "mt9m111 and mt9m112 support"
740 depends on SOC_CAMERA && I2C
741 help
742 This driver supports MT9M111 and MT9M112 cameras from Micron
743
744config SOC_CAMERA_MT9T031
745 tristate "mt9t031 support"
729 depends on SOC_CAMERA && I2C 746 depends on SOC_CAMERA && I2C
730 help 747 help
731 This driver supports MT9M111 cameras from Micron 748 This driver supports MT9T031 cameras from Micron.
732 749
733config SOC_CAMERA_MT9V022 750config SOC_CAMERA_MT9V022
734 tristate "mt9v022 support" 751 tristate "mt9v022 support"
@@ -744,12 +761,24 @@ config MT9V022_PCA9536_SWITCH
744 Select this if your MT9V022 camera uses a PCA9536 I2C GPIO 761 Select this if your MT9V022 camera uses a PCA9536 I2C GPIO
745 extender to switch between 8 and 10 bit datawidth modes 762 extender to switch between 8 and 10 bit datawidth modes
746 763
764config SOC_CAMERA_TW9910
765 tristate "tw9910 support"
766 depends on SOC_CAMERA && I2C
767 help
768 This is a tw9910 video driver
769
747config SOC_CAMERA_PLATFORM 770config SOC_CAMERA_PLATFORM
748 tristate "platform camera support" 771 tristate "platform camera support"
749 depends on SOC_CAMERA 772 depends on SOC_CAMERA
750 help 773 help
751 This is a generic SoC camera platform driver, useful for testing 774 This is a generic SoC camera platform driver, useful for testing
752 775
776config SOC_CAMERA_OV772X
777 tristate "ov772x camera support"
778 depends on SOC_CAMERA && I2C
779 help
780 This is a ov772x camera driver
781
753config VIDEO_PXA27x 782config VIDEO_PXA27x
754 tristate "PXA27x Quick Capture Interface driver" 783 tristate "PXA27x Quick Capture Interface driver"
755 depends on VIDEO_DEV && PXA27x && SOC_CAMERA 784 depends on VIDEO_DEV && PXA27x && SOC_CAMERA
@@ -764,6 +793,13 @@ config VIDEO_SH_MOBILE_CEU
764 ---help--- 793 ---help---
765 This is a v4l2 driver for the SuperH Mobile CEU Interface 794 This is a v4l2 driver for the SuperH Mobile CEU Interface
766 795
796config VIDEO_OMAP2
797 tristate "OMAP2 Camera Capture Interface driver"
798 depends on VIDEO_DEV && ARCH_OMAP2
799 select VIDEOBUF_DMA_SG
800 ---help---
801 This is a v4l2 driver for the TI OMAP2 camera capture interface
802
767# 803#
768# USB Multimedia device configuration 804# USB Multimedia device configuration
769# 805#
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 16962f3aa157..1611c33b1aee 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -8,9 +8,11 @@ msp3400-objs := msp3400-driver.o msp3400-kthreads.o
8 8
9stkwebcam-objs := stk-webcam.o stk-sensor.o 9stkwebcam-objs := stk-webcam.o stk-sensor.o
10 10
11videodev-objs := v4l2-dev.o v4l2-ioctl.o 11omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
12 12
13obj-$(CONFIG_VIDEO_DEV) += videodev.o compat_ioctl32.o v4l2-int-device.o 13videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-subdev.o
14
15obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-compat-ioctl32.o v4l2-int-device.o
14 16
15obj-$(CONFIG_VIDEO_V4L2_COMMON) += v4l2-common.o 17obj-$(CONFIG_VIDEO_V4L2_COMMON) += v4l2-common.o
16 18
@@ -25,6 +27,7 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
25obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o 27obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
26obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o 28obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
27obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o 29obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
30obj-$(CONFIG_SOUND_TVMIXER) += tvmixer.o
28 31
29obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o 32obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
30obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o 33obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o
@@ -66,6 +69,7 @@ obj-$(CONFIG_VIDEO_CX88) += cx88/
66obj-$(CONFIG_VIDEO_EM28XX) += em28xx/ 69obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
67obj-$(CONFIG_VIDEO_USBVISION) += usbvision/ 70obj-$(CONFIG_VIDEO_USBVISION) += usbvision/
68obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o 71obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o
72obj-$(CONFIG_VIDEO_TVP514X) += tvp514x.o
69obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/ 73obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
70obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o 74obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
71obj-$(CONFIG_VIDEO_CS5345) += cs5345.o 75obj-$(CONFIG_VIDEO_CS5345) += cs5345.o
@@ -129,11 +133,15 @@ obj-$(CONFIG_VIDEO_CX23885) += cx23885/
129 133
130obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o 134obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
131obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o 135obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
136obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
132obj-$(CONFIG_SOC_CAMERA) += soc_camera.o 137obj-$(CONFIG_SOC_CAMERA) += soc_camera.o
133obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o 138obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
134obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o 139obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
140obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
135obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o 141obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
142obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
136obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o 143obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
144obj-$(CONFIG_SOC_CAMERA_TW9910) += tw9910.o
137 145
138obj-$(CONFIG_VIDEO_AU0828) += au0828/ 146obj-$(CONFIG_VIDEO_AU0828) += au0828/
139 147
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index e09b00693230..2ba6abd92b6f 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -396,8 +396,7 @@ out_up:
396 return ret; 396 return ret;
397} 397}
398 398
399static int ar_do_ioctl(struct inode *inode, struct file *file, 399static int ar_do_ioctl(struct file *file, unsigned int cmd, void *arg)
400 unsigned int cmd, void *arg)
401{ 400{
402 struct video_device *dev = video_devdata(file); 401 struct video_device *dev = video_devdata(file);
403 struct ar_device *ar = video_get_drvdata(dev); 402 struct ar_device *ar = video_get_drvdata(dev);
@@ -543,7 +542,7 @@ static int ar_do_ioctl(struct inode *inode, struct file *file,
543static int ar_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 542static int ar_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
544 unsigned long arg) 543 unsigned long arg)
545{ 544{
546 return video_usercopy(inode, file, cmd, arg, ar_do_ioctl); 545 return video_usercopy(file, cmd, arg, ar_do_ioctl);
547} 546}
548 547
549#if USE_INT 548#if USE_INT
diff --git a/drivers/media/video/bt8xx/bt832.c b/drivers/media/video/bt8xx/bt832.c
deleted file mode 100644
index 216fc9680e80..000000000000
--- a/drivers/media/video/bt8xx/bt832.c
+++ /dev/null
@@ -1,274 +0,0 @@
1/* Driver for Bt832 CMOS Camera Video Processor
2 i2c-addresses: 0x88 or 0x8a
3
4 The BT832 interfaces to a Quartzsight Digital Camera (352x288, 25 or 30 fps)
5 via a 9 pin connector ( 4-wire SDATA, 2-wire i2c, SCLK, VCC, GND).
6 It outputs an 8-bit 4:2:2 YUV or YCrCb video signal which can be directly
7 connected to bt848/bt878 GPIO pins on this purpose.
8 (see: VLSI Vision Ltd. www.vvl.co.uk for camera datasheets)
9
10 Supported Cards:
11 - Pixelview Rev.4E: 0x8a
12 GPIO 0x400000 toggles Bt832 RESET, and the chip changes to i2c 0x88 !
13
14 (c) Gunther Mayer, 2002
15
16 STATUS:
17 - detect chip and hexdump
18 - reset chip and leave low power mode
19 - detect camera present
20
21 TODO:
22 - make it work (find correct setup for Bt832 and Bt878)
23*/
24
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/i2c.h>
28#include <linux/types.h>
29#include <linux/videodev.h>
30#include <linux/init.h>
31#include <linux/errno.h>
32#include <linux/slab.h>
33#include <media/v4l2-common.h>
34
35#include "bttv.h"
36#include "bt832.h"
37
38MODULE_LICENSE("GPL");
39
40/* Addresses to scan */
41static unsigned short normal_i2c[] = { I2C_ADDR_BT832_ALT1>>1, I2C_ADDR_BT832_ALT2>>1,
42 I2C_CLIENT_END };
43I2C_CLIENT_INSMOD;
44
45int debug; /* debug output */
46module_param(debug, int, 0644);
47
48/* ---------------------------------------------------------------------- */
49
50static int bt832_detach(struct i2c_client *client);
51
52
53static struct i2c_driver driver;
54static struct i2c_client client_template;
55
56struct bt832 {
57 struct i2c_client client;
58};
59
60int bt832_hexdump(struct i2c_client *i2c_client_s, unsigned char *buf)
61{
62 int i,rc;
63 buf[0]=0x80; // start at register 0 with auto-increment
64 if (1 != (rc = i2c_master_send(i2c_client_s,buf,1)))
65 v4l_err(i2c_client_s,"i2c i/o error: rc == %d (should be 1)\n",rc);
66
67 for(i=0;i<65;i++)
68 buf[i]=0;
69 if (65 != (rc=i2c_master_recv(i2c_client_s,buf,65)))
70 v4l_err(i2c_client_s,"i2c i/o error: rc == %d (should be 65)\n",rc);
71
72 // Note: On READ the first byte is the current index
73 // (e.g. 0x80, what we just wrote)
74
75 if(debug>1) {
76 int i;
77 v4l_dbg(2, debug,i2c_client_s,"hexdump:");
78 for(i=1;i<65;i++) {
79 if(i!=1) {
80 if(((i-1)%8)==0) printk(" ");
81 if(((i-1)%16)==0) {
82 printk("\n");
83 v4l_dbg(2, debug,i2c_client_s,"hexdump:");
84 }
85 }
86 printk(" %02x",buf[i]);
87 }
88 printk("\n");
89 }
90 return 0;
91}
92
93// Return: 1 (is a bt832), 0 (No bt832 here)
94int bt832_init(struct i2c_client *i2c_client_s)
95{
96 unsigned char *buf;
97 int rc;
98
99 buf=kmalloc(65,GFP_KERNEL);
100 if (!buf) {
101 v4l_err(&t->client,
102 "Unable to allocate memory. Detaching.\n");
103 return 0;
104 }
105 bt832_hexdump(i2c_client_s,buf);
106
107 if(buf[0x40] != 0x31) {
108 v4l_err(i2c_client_s,"This i2c chip is no bt832 (id=%02x). Detaching.\n",buf[0x40]);
109 kfree(buf);
110 return 0;
111 }
112
113 v4l_err(i2c_client_s,"Write 0 tp VPSTATUS\n");
114 buf[0]=BT832_VP_STATUS; // Reg.52
115 buf[1]= 0x00;
116 if (2 != (rc = i2c_master_send(i2c_client_s,buf,2)))
117 v4l_err(i2c_client_s,"i2c i/o error VPS: rc == %d (should be 2)\n",rc);
118
119 bt832_hexdump(i2c_client_s,buf);
120
121
122 // Leave low power mode:
123 v4l_err(i2c_client_s,"leave low power mode.\n");
124 buf[0]=BT832_CAM_SETUP0; //0x39 57
125 buf[1]=0x08;
126 if (2 != (rc = i2c_master_send(i2c_client_s,buf,2)))
127 v4l_err(i2c_client_s,"i2c i/o error LLPM: rc == %d (should be 2)\n",rc);
128
129 bt832_hexdump(i2c_client_s,buf);
130
131 v4l_info(i2c_client_s,"Write 0 tp VPSTATUS\n");
132 buf[0]=BT832_VP_STATUS; // Reg.52
133 buf[1]= 0x00;
134 if (2 != (rc = i2c_master_send(i2c_client_s,buf,2)))
135 v4l_err(i2c_client_s,"i2c i/o error VPS: rc == %d (should be 2)\n",rc);
136
137 bt832_hexdump(i2c_client_s,buf);
138
139
140 // Enable Output
141 v4l_info(i2c_client_s,"Enable Output\n");
142 buf[0]=BT832_VP_CONTROL1; // Reg.40
143 buf[1]= 0x27 & (~0x01); // Default | !skip
144 if (2 != (rc = i2c_master_send(i2c_client_s,buf,2)))
145 v4l_err(i2c_client_s,"i2c i/o error EO: rc == %d (should be 2)\n",rc);
146
147 bt832_hexdump(i2c_client_s,buf);
148
149
150 // for testing (even works when no camera attached)
151 v4l_info(i2c_client_s,"*** Generate NTSC M Bars *****\n");
152 buf[0]=BT832_VP_TESTCONTROL0; // Reg. 42
153 buf[1]=3; // Generate NTSC System M bars, Generate Frame timing internally
154 if (2 != (rc = i2c_master_send(i2c_client_s,buf,2)))
155 v4l_info(i2c_client_s,"i2c i/o error MBAR: rc == %d (should be 2)\n",rc);
156
157 v4l_info(i2c_client_s,"Camera Present: %s\n",
158 (buf[1+BT832_CAM_STATUS] & BT832_56_CAMERA_PRESENT) ? "yes":"no");
159
160 bt832_hexdump(i2c_client_s,buf);
161 kfree(buf);
162 return 1;
163}
164
165
166
167static int bt832_attach(struct i2c_adapter *adap, int addr, int kind)
168{
169 struct bt832 *t;
170
171 client_template.adapter = adap;
172 client_template.addr = addr;
173
174 if (NULL == (t = kzalloc(sizeof(*t), GFP_KERNEL)))
175 return -ENOMEM;
176 t->client = client_template;
177 i2c_set_clientdata(&t->client, t);
178 i2c_attach_client(&t->client);
179
180 v4l_info(&t->client,"chip found @ 0x%x\n", addr<<1);
181
182 if(! bt832_init(&t->client)) {
183 bt832_detach(&t->client);
184 return -1;
185 }
186
187 return 0;
188}
189
190static int bt832_probe(struct i2c_adapter *adap)
191{
192 if (adap->class & I2C_CLASS_TV_ANALOG)
193 return i2c_probe(adap, &addr_data, bt832_attach);
194 return 0;
195}
196
197static int bt832_detach(struct i2c_client *client)
198{
199 struct bt832 *t = i2c_get_clientdata(client);
200
201 v4l_info(&t->client,"dettach\n");
202 i2c_detach_client(client);
203 kfree(t);
204 return 0;
205}
206
207static int
208bt832_command(struct i2c_client *client, unsigned int cmd, void *arg)
209{
210 struct bt832 *t = i2c_get_clientdata(client);
211
212 if (debug>1)
213 v4l_i2c_print_ioctl(&t->client,cmd);
214
215 switch (cmd) {
216 case BT832_HEXDUMP: {
217 unsigned char *buf;
218 buf = kmalloc(65, GFP_KERNEL);
219 if (!buf) {
220 v4l_err(&t->client,
221 "Unable to allocate memory\n");
222 break;
223 }
224 bt832_hexdump(&t->client,buf);
225 kfree(buf);
226 }
227 break;
228 case BT832_REATTACH:
229 v4l_info(&t->client,"re-attach\n");
230 i2c_del_driver(&driver);
231 i2c_add_driver(&driver);
232 break;
233 }
234 return 0;
235}
236
237/* ----------------------------------------------------------------------- */
238
239static struct i2c_driver driver = {
240 .driver = {
241 .name = "bt832",
242 },
243 .id = 0, /* FIXME */
244 .attach_adapter = bt832_probe,
245 .detach_client = bt832_detach,
246 .command = bt832_command,
247};
248static struct i2c_client client_template =
249{
250 .name = "bt832",
251 .driver = &driver,
252};
253
254
255static int __init bt832_init_module(void)
256{
257 return i2c_add_driver(&driver);
258}
259
260static void __exit bt832_cleanup_module(void)
261{
262 i2c_del_driver(&driver);
263}
264
265module_init(bt832_init_module);
266module_exit(bt832_cleanup_module);
267
268/*
269 * Overrides for Emacs so that we follow Linus's tabbing style.
270 * ---------------------------------------------------------------------------
271 * Local variables:
272 * c-basic-offset: 8
273 * End:
274 */
diff --git a/drivers/media/video/bt8xx/bt832.h b/drivers/media/video/bt8xx/bt832.h
deleted file mode 100644
index 1ce8fa71f7db..000000000000
--- a/drivers/media/video/bt8xx/bt832.h
+++ /dev/null
@@ -1,305 +0,0 @@
1/* Bt832 CMOS Camera Video Processor (VP)
2
3 The Bt832 CMOS Camera Video Processor chip connects a Quartsight CMOS
4 color digital camera directly to video capture devices via an 8-bit,
5 4:2:2 YUV or YCrCb video interface.
6
7 i2c addresses: 0x88 or 0x8a
8 */
9
10/* The 64 registers: */
11
12// Input Processor
13#define BT832_OFFSET 0
14#define BT832_RCOMP 1
15#define BT832_G1COMP 2
16#define BT832_G2COMP 3
17#define BT832_BCOMP 4
18// Exposures:
19#define BT832_FINEH 5
20#define BT832_FINEL 6
21#define BT832_COARSEH 7
22#define BT832_COARSEL 8
23#define BT832_CAMGAIN 9
24// Main Processor:
25#define BT832_M00 10
26#define BT832_M01 11
27#define BT832_M02 12
28#define BT832_M10 13
29#define BT832_M11 14
30#define BT832_M12 15
31#define BT832_M20 16
32#define BT832_M21 17
33#define BT832_M22 18
34#define BT832_APCOR 19
35#define BT832_GAMCOR 20
36// Level Accumulator Inputs
37#define BT832_VPCONTROL2 21
38#define BT832_ZONECODE0 22
39#define BT832_ZONECODE1 23
40#define BT832_ZONECODE2 24
41#define BT832_ZONECODE3 25
42// Level Accumulator Outputs:
43#define BT832_RACC 26
44#define BT832_GACC 27
45#define BT832_BACC 28
46#define BT832_BLACKACC 29
47#define BT832_EXP_AGC 30
48#define BT832_LACC0 31
49#define BT832_LACC1 32
50#define BT832_LACC2 33
51#define BT832_LACC3 34
52#define BT832_LACC4 35
53#define BT832_LACC5 36
54#define BT832_LACC6 37
55#define BT832_LACC7 38
56// System:
57#define BT832_VP_CONTROL0 39
58#define BT832_VP_CONTROL1 40
59#define BT832_THRESH 41
60#define BT832_VP_TESTCONTROL0 42
61#define BT832_VP_DMCODE 43
62#define BT832_ACB_CONFIG 44
63#define BT832_ACB_GNBASE 45
64#define BT832_ACB_MU 46
65#define BT832_CAM_TEST0 47
66#define BT832_AEC_CONFIG 48
67#define BT832_AEC_TL 49
68#define BT832_AEC_TC 50
69#define BT832_AEC_TH 51
70// Status:
71#define BT832_VP_STATUS 52
72#define BT832_VP_LINECOUNT 53
73#define BT832_CAM_DEVICEL 54 // e.g. 0x19
74#define BT832_CAM_DEVICEH 55 // e.g. 0x40 == 0x194 Mask0, 0x194 = 404 decimal (VVL-404 camera)
75#define BT832_CAM_STATUS 56
76 #define BT832_56_CAMERA_PRESENT 0x20
77//Camera Setups:
78#define BT832_CAM_SETUP0 57
79#define BT832_CAM_SETUP1 58
80#define BT832_CAM_SETUP2 59
81#define BT832_CAM_SETUP3 60
82// System:
83#define BT832_DEFCOR 61
84#define BT832_VP_TESTCONTROL1 62
85#define BT832_DEVICE_ID 63
86# define BT832_DEVICE_ID__31 0x31 // Bt832 has ID 0x31
87
88/* STMicroelectronivcs VV5404 camera module
89 i2c: 0x20: sensor address
90 i2c: 0xa0: eeprom for ccd defect map
91 */
92#define VV5404_device_h 0x00 // 0x19
93#define VV5404_device_l 0x01 // 0x40
94#define VV5404_status0 0x02
95#define VV5404_linecountc 0x03 // current line counter
96#define VV5404_linecountl 0x04
97#define VV5404_setup0 0x10
98#define VV5404_setup1 0x11
99#define VV5404_setup2 0x12
100#define VV5404_setup4 0x14
101#define VV5404_setup5 0x15
102#define VV5404_fine_h 0x20 // fine exposure
103#define VV5404_fine_l 0x21
104#define VV5404_coarse_h 0x22 //coarse exposure
105#define VV5404_coarse_l 0x23
106#define VV5404_gain 0x24 // ADC pre-amp gain setting
107#define VV5404_clk_div 0x25
108#define VV5404_cr 0x76 // control register
109#define VV5404_as0 0x77 // ADC setup register
110
111
112// IOCTL
113#define BT832_HEXDUMP _IOR('b',1,int)
114#define BT832_REATTACH _IOR('b',2,int)
115
116/* from BT8x8VXD/capdrv/dialogs.cpp */
117
118/*
119typedef enum { SVI, Logitech, Rockwell } CAMERA;
120
121static COMBOBOX_ENTRY gwCameraOptions[] =
122{
123 { SVI, "Silicon Vision 512N" },
124 { Logitech, "Logitech VideoMan 1.3" },
125 { Rockwell, "Rockwell QuartzSight PCI 1.0" }
126};
127
128// SRAM table values
129//===========================================================================
130typedef enum { TGB_NTSC624, TGB_NTSC780, TGB_NTSC858, TGB_NTSC392 } TimeGenByte;
131
132BYTE SRAMTable[][ 60 ] =
133{
134 // TGB_NTSC624
135 {
136 0x33, // size of table = 51
137 0x0E, 0xC0, 0x00, 0x00, 0x90, 0x02, 0x03, 0x10, 0x03, 0x06,
138 0x10, 0x04, 0x12, 0x12, 0x05, 0x02, 0x13, 0x04, 0x19, 0x00,
139 0x04, 0x39, 0x00, 0x06, 0x59, 0x08, 0x03, 0x85, 0x08, 0x07,
140 0x03, 0x50, 0x00, 0x91, 0x40, 0x00, 0x11, 0x01, 0x01, 0x4D,
141 0x0D, 0x02, 0x03, 0x11, 0x01, 0x05, 0x37, 0x00, 0x37, 0x21, 0x00
142 },
143 // TGB_NTSC780
144 {
145 0x33, // size of table = 51
146 0x0e, 0xc0, 0x00, 0x00, 0x90, 0xe2, 0x03, 0x10, 0x03, 0x06,
147 0x10, 0x34, 0x12, 0x12, 0x65, 0x02, 0x13, 0x24, 0x19, 0x00,
148 0x24, 0x39, 0x00, 0x96, 0x59, 0x08, 0x93, 0x85, 0x08, 0x97,
149 0x03, 0x50, 0x50, 0xaf, 0x40, 0x30, 0x5f, 0x01, 0xf1, 0x7f,
150 0x0d, 0xf2, 0x03, 0x11, 0xf1, 0x05, 0x37, 0x30, 0x85, 0x21, 0x50
151 },
152 // TGB_NTSC858
153 {
154 0x33, // size of table = 51
155 0x0c, 0xc0, 0x00, 0x00, 0x90, 0xc2, 0x03, 0x10, 0x03, 0x06,
156 0x10, 0x34, 0x12, 0x12, 0x65, 0x02, 0x13, 0x24, 0x19, 0x00,
157 0x24, 0x39, 0x00, 0x96, 0x59, 0x08, 0x93, 0x83, 0x08, 0x97,
158 0x03, 0x50, 0x30, 0xc0, 0x40, 0x30, 0x86, 0x01, 0x01, 0xa6,
159 0x0d, 0x62, 0x03, 0x11, 0x61, 0x05, 0x37, 0x30, 0xac, 0x21, 0x50
160 },
161 // TGB_NTSC392
162 // This table has been modified to be used for Fusion Rev D
163 {
164 0x2A, // size of table = 42
165 0x06, 0x08, 0x04, 0x0a, 0xc0, 0x00, 0x18, 0x08, 0x03, 0x24,
166 0x08, 0x07, 0x02, 0x90, 0x02, 0x08, 0x10, 0x04, 0x0c, 0x10,
167 0x05, 0x2c, 0x11, 0x04, 0x55, 0x48, 0x00, 0x05, 0x50, 0x00,
168 0xbf, 0x0c, 0x02, 0x2f, 0x3d, 0x00, 0x2f, 0x3f, 0x00, 0xc3,
169 0x20, 0x00
170 }
171};
172
173//===========================================================================
174// This is the structure of the camera specifications
175//===========================================================================
176typedef struct tag_cameraSpec
177{
178 SignalFormat signal; // which digital signal format the camera has
179 VideoFormat vidFormat; // video standard
180 SyncVideoRef syncRef; // which sync video reference is used
181 State syncOutput; // enable sync output for sync video input?
182 DecInputClk iClk; // which input clock is used
183 TimeGenByte tgb; // which timing generator byte does the camera use
184 int HReset; // select 64, 48, 32, or 16 CLKx1 for HReset
185 PLLFreq pllFreq; // what synthesized frequency to set PLL to
186 VSIZEPARMS vSize; // video size the camera produces
187 int lineCount; // expected total number of half-line per frame - 1
188 BOOL interlace; // interlace signal?
189} CameraSpec;
190
191//===========================================================================
192// <UPDATE REQUIRED>
193// Camera specifications database. Update this table whenever camera spec
194// has been changed or added/deleted supported camera models
195//===========================================================================
196static CameraSpec dbCameraSpec[ N_CAMERAOPTIONS ] =
197{ // Silicon Vision 512N
198 { Signal_CCIR656, VFormat_NTSC, VRef_alignedCb, Off, DecClk_GPCLK, TGB_NTSC624, 64, KHz19636,
199 // Clkx1_HACTIVE, Clkx1_HDELAY, VActive, VDelay, linesPerField; lineCount, Interlace
200 { 512, 0x64, 480, 0x13, 240 }, 0, TRUE
201 },
202 // Logitech VideoMan 1.3
203 { Signal_CCIR656, VFormat_NTSC, VRef_alignedCb, Off, DecClk_GPCLK, TGB_NTSC780, 64, KHz24545,
204 // Clkx1_HACTIVE, Clkx1_HDELAY, VActive, VDelay, linesPerField; lineCount, Interlace
205 { 640, 0x80, 480, 0x1A, 240 }, 0, TRUE
206 },
207 // Rockwell QuartzSight
208 // Note: Fusion Rev D (rev ID 0x02) and later supports 16 pixels for HReset which is preferable.
209 // Use 32 for earlier version of hardware. Clkx1_HDELAY also changed from 0x27 to 0x20.
210 { Signal_CCIR656, VFormat_NTSC, VRef_alignedCb, Off, DecClk_GPCLK, TGB_NTSC392, 16, KHz28636,
211 // Clkx1_HACTIVE, Clkx1_HDELAY, VActive, VDelay, linesPerField; lineCount, Interlace
212 { 352, 0x20, 576, 0x08, 288 }, 607, FALSE
213 }
214};
215*/
216
217/*
218The corresponding APIs required to be invoked are:
219SetConnector( ConCamera, TRUE/FALSE );
220SetSignalFormat( spec.signal );
221SetVideoFormat( spec.vidFormat );
222SetSyncVideoRef( spec.syncRef );
223SetEnableSyncOutput( spec.syncOutput );
224SetTimGenByte( SRAMTable[ spec.tgb ], SRAMTableSize[ spec.tgb ] );
225SetHReset( spec.HReset );
226SetPLL( spec.pllFreq );
227SetDecInputClock( spec.iClk );
228SetVideoInfo( spec.vSize );
229SetTotalLineCount( spec.lineCount );
230SetInterlaceMode( spec.interlace );
231*/
232
233/* from web:
234 Video Sampling
235Digital video is a sampled form of analog video. The most common sampling schemes in use today are:
236 Pixel Clock Horiz Horiz Vert
237 Rate Total Active
238NTSC square pixel 12.27 MHz 780 640 525
239NTSC CCIR-601 13.5 MHz 858 720 525
240NTSC 4FSc 14.32 MHz 910 768 525
241PAL square pixel 14.75 MHz 944 768 625
242PAL CCIR-601 13.5 MHz 864 720 625
243PAL 4FSc 17.72 MHz 1135 948 625
244
245For the CCIR-601 standards, the sampling is based on a static orthogonal sampling grid. The luminance component (Y) is sampled at 13.5 MHz, while the two color difference signals, Cr and Cb are sampled at half that, or 6.75 MHz. The Cr and Cb samples are colocated with alternate Y samples, and they are taken at the same position on each line, such that one sample is coincident with the 50% point of the falling edge of analog sync. The samples are coded to either 8 or 10 bits per component.
246*/
247
248/* from DScaler:*/
249/*
250//===========================================================================
251// CCIR656 Digital Input Support: The tables were taken from DScaler proyect
252//
253// 13 Dec 2000 - Michael Eskin, Conexant Systems - Initial version
254//
255
256//===========================================================================
257// Timing generator SRAM table values for CCIR601 720x480 NTSC
258//===========================================================================
259// For NTSC CCIR656
260BYTE BtCard::SRAMTable_NTSC[] =
261{
262 // SRAM Timing Table for NTSC
263 0x0c, 0xc0, 0x00,
264 0x00, 0x90, 0xc2,
265 0x03, 0x10, 0x03,
266 0x06, 0x10, 0x34,
267 0x12, 0x12, 0x65,
268 0x02, 0x13, 0x24,
269 0x19, 0x00, 0x24,
270 0x39, 0x00, 0x96,
271 0x59, 0x08, 0x93,
272 0x83, 0x08, 0x97,
273 0x03, 0x50, 0x30,
274 0xc0, 0x40, 0x30,
275 0x86, 0x01, 0x01,
276 0xa6, 0x0d, 0x62,
277 0x03, 0x11, 0x61,
278 0x05, 0x37, 0x30,
279 0xac, 0x21, 0x50
280};
281
282//===========================================================================
283// Timing generator SRAM table values for CCIR601 720x576 NTSC
284//===========================================================================
285// For PAL CCIR656
286BYTE BtCard::SRAMTable_PAL[] =
287{
288 // SRAM Timing Table for PAL
289 0x36, 0x11, 0x01,
290 0x00, 0x90, 0x02,
291 0x05, 0x10, 0x04,
292 0x16, 0x14, 0x05,
293 0x11, 0x00, 0x04,
294 0x12, 0xc0, 0x00,
295 0x31, 0x00, 0x06,
296 0x51, 0x08, 0x03,
297 0x89, 0x08, 0x07,
298 0xc0, 0x44, 0x00,
299 0x81, 0x01, 0x01,
300 0xa9, 0x0d, 0x02,
301 0x02, 0x50, 0x03,
302 0x37, 0x3d, 0x00,
303 0xaf, 0x21, 0x00,
304};
305*/
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 13742b0bbe3e..d24dcc025e37 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -44,7 +44,6 @@
44 44
45/* fwd decl */ 45/* fwd decl */
46static void boot_msp34xx(struct bttv *btv, int pin); 46static void boot_msp34xx(struct bttv *btv, int pin);
47static void boot_bt832(struct bttv *btv);
48static void hauppauge_eeprom(struct bttv *btv); 47static void hauppauge_eeprom(struct bttv *btv);
49static void avermedia_eeprom(struct bttv *btv); 48static void avermedia_eeprom(struct bttv *btv);
50static void osprey_eeprom(struct bttv *btv, const u8 ee[256]); 49static void osprey_eeprom(struct bttv *btv, const u8 ee[256]);
@@ -2217,9 +2216,9 @@ struct tvcard bttv_tvcards[] = {
2217 .tuner_addr = ADDR_UNSET, 2216 .tuner_addr = ADDR_UNSET,
2218 .radio_addr = ADDR_UNSET, 2217 .radio_addr = ADDR_UNSET,
2219 }, 2218 },
2220 [BTTV_BOARD_VD009X1_MINIDIN] = { 2219 [BTTV_BOARD_VD009X1_VD011_MINIDIN] = {
2221 /* M.Klahr@phytec.de */ 2220 /* M.Klahr@phytec.de */
2222 .name = "PHYTEC VD-009-X1 MiniDIN (bt878)", 2221 .name = "PHYTEC VD-009-X1 VD-011 MiniDIN (bt878)",
2223 .video_inputs = 4, 2222 .video_inputs = 4,
2224 .audio_inputs = 0, 2223 .audio_inputs = 0,
2225 .tuner = UNSET, /* card has no tuner */ 2224 .tuner = UNSET, /* card has no tuner */
@@ -2227,14 +2226,14 @@ struct tvcard bttv_tvcards[] = {
2227 .gpiomask = 0x00, 2226 .gpiomask = 0x00,
2228 .muxsel = { 2, 3, 1, 0 }, 2227 .muxsel = { 2, 3, 1, 0 },
2229 .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ 2228 .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
2230 .needs_tvaudio = 1, 2229 .needs_tvaudio = 0,
2231 .pll = PLL_28, 2230 .pll = PLL_28,
2232 .tuner_type = UNSET, 2231 .tuner_type = UNSET,
2233 .tuner_addr = ADDR_UNSET, 2232 .tuner_addr = ADDR_UNSET,
2234 .radio_addr = ADDR_UNSET, 2233 .radio_addr = ADDR_UNSET,
2235 }, 2234 },
2236 [BTTV_BOARD_VD009X1_COMBI] = { 2235 [BTTV_BOARD_VD009X1_VD011_COMBI] = {
2237 .name = "PHYTEC VD-009-X1 Combi (bt878)", 2236 .name = "PHYTEC VD-009-X1 VD-011 Combi (bt878)",
2238 .video_inputs = 4, 2237 .video_inputs = 4,
2239 .audio_inputs = 0, 2238 .audio_inputs = 0,
2240 .tuner = UNSET, /* card has no tuner */ 2239 .tuner = UNSET, /* card has no tuner */
@@ -2242,7 +2241,7 @@ struct tvcard bttv_tvcards[] = {
2242 .gpiomask = 0x00, 2241 .gpiomask = 0x00,
2243 .muxsel = { 2, 3, 1, 1 }, 2242 .muxsel = { 2, 3, 1, 1 },
2244 .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ 2243 .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
2245 .needs_tvaudio = 1, 2244 .needs_tvaudio = 0,
2246 .pll = PLL_28, 2245 .pll = PLL_28,
2247 .tuner_type = UNSET, 2246 .tuner_type = UNSET,
2248 .tuner_addr = ADDR_UNSET, 2247 .tuner_addr = ADDR_UNSET,
@@ -3061,6 +3060,54 @@ struct tvcard bttv_tvcards[] = {
3061 .pll = PLL_28, 3060 .pll = PLL_28,
3062 .has_radio = 1, 3061 .has_radio = 1,
3063 .has_remote = 1, 3062 .has_remote = 1,
3063 },
3064 [BTTV_BOARD_VD012] = {
3065 /* D.Heer@Phytec.de */
3066 .name = "PHYTEC VD-012 (bt878)",
3067 .video_inputs = 4,
3068 .audio_inputs = 0,
3069 .tuner = UNSET, /* card has no tuner */
3070 .svhs = UNSET, /* card has no s-video */
3071 .gpiomask = 0x00,
3072 .muxsel = { 0, 2, 3, 1 },
3073 .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
3074 .needs_tvaudio = 0,
3075 .pll = PLL_28,
3076 .tuner_type = UNSET,
3077 .tuner_addr = ADDR_UNSET,
3078 .radio_addr = ADDR_UNSET,
3079 },
3080 [BTTV_BOARD_VD012_X1] = {
3081 /* D.Heer@Phytec.de */
3082 .name = "PHYTEC VD-012-X1 (bt878)",
3083 .video_inputs = 4,
3084 .audio_inputs = 0,
3085 .tuner = UNSET, /* card has no tuner */
3086 .svhs = 3,
3087 .gpiomask = 0x00,
3088 .muxsel = { 2, 3, 1 },
3089 .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
3090 .needs_tvaudio = 0,
3091 .pll = PLL_28,
3092 .tuner_type = UNSET,
3093 .tuner_addr = ADDR_UNSET,
3094 .radio_addr = ADDR_UNSET,
3095 },
3096 [BTTV_BOARD_VD012_X2] = {
3097 /* D.Heer@Phytec.de */
3098 .name = "PHYTEC VD-012-X2 (bt878)",
3099 .video_inputs = 4,
3100 .audio_inputs = 0,
3101 .tuner = UNSET, /* card has no tuner */
3102 .svhs = 3,
3103 .gpiomask = 0x00,
3104 .muxsel = { 3, 2, 1 },
3105 .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
3106 .needs_tvaudio = 0,
3107 .pll = PLL_28,
3108 .tuner_type = UNSET,
3109 .tuner_addr = ADDR_UNSET,
3110 .radio_addr = ADDR_UNSET,
3064 } 3111 }
3065}; 3112};
3066 3113
@@ -3673,13 +3720,6 @@ void __devinit bttv_init_card2(struct bttv *btv)
3673 if (bttv_tvcards[btv->c.type].audio_mode_gpio) 3720 if (bttv_tvcards[btv->c.type].audio_mode_gpio)
3674 btv->audio_mode_gpio=bttv_tvcards[btv->c.type].audio_mode_gpio; 3721 btv->audio_mode_gpio=bttv_tvcards[btv->c.type].audio_mode_gpio;
3675 3722
3676 if (bttv_tvcards[btv->c.type].digital_mode == DIGITAL_MODE_CAMERA) {
3677 /* detect Bt832 chip for quartzsight digital camera */
3678 if ((bttv_I2CRead(btv, I2C_ADDR_BT832_ALT1, "Bt832") >=0) ||
3679 (bttv_I2CRead(btv, I2C_ADDR_BT832_ALT2, "Bt832") >=0))
3680 boot_bt832(btv);
3681 }
3682
3683 if (!autoload) 3723 if (!autoload)
3684 return; 3724 return;
3685 3725
@@ -4075,10 +4115,6 @@ static void __devinit boot_msp34xx(struct bttv *btv, int pin)
4075 "init [%d]\n", btv->c.nr, pin); 4115 "init [%d]\n", btv->c.nr, pin);
4076} 4116}
4077 4117
4078static void __devinit boot_bt832(struct bttv *btv)
4079{
4080}
4081
4082/* ----------------------------------------------------------------------- */ 4118/* ----------------------------------------------------------------------- */
4083/* Imagenation L-Model PXC200 Framegrabber */ 4119/* Imagenation L-Model PXC200 Framegrabber */
4084/* This is basically the same procedure as 4120/* This is basically the same procedure as
diff --git a/drivers/media/video/bt8xx/bttv-gpio.c b/drivers/media/video/bt8xx/bttv-gpio.c
index dce6dae5740e..74c325e594a2 100644
--- a/drivers/media/video/bt8xx/bttv-gpio.c
+++ b/drivers/media/video/bt8xx/bttv-gpio.c
@@ -42,7 +42,7 @@ static int bttv_sub_bus_match(struct device *dev, struct device_driver *drv)
42 struct bttv_sub_driver *sub = to_bttv_sub_drv(drv); 42 struct bttv_sub_driver *sub = to_bttv_sub_drv(drv);
43 int len = strlen(sub->wanted); 43 int len = strlen(sub->wanted);
44 44
45 if (0 == strncmp(dev->bus_id, sub->wanted, len)) 45 if (0 == strncmp(dev_name(dev), sub->wanted, len))
46 return 1; 46 return 1;
47 return 0; 47 return 0;
48} 48}
@@ -91,15 +91,14 @@ int bttv_sub_add_device(struct bttv_core *core, char *name)
91 sub->dev.parent = &core->pci->dev; 91 sub->dev.parent = &core->pci->dev;
92 sub->dev.bus = &bttv_sub_bus_type; 92 sub->dev.bus = &bttv_sub_bus_type;
93 sub->dev.release = release_sub_device; 93 sub->dev.release = release_sub_device;
94 snprintf(sub->dev.bus_id,sizeof(sub->dev.bus_id),"%s%d", 94 dev_set_name(&sub->dev, "%s%d", name, core->nr);
95 name, core->nr);
96 95
97 err = device_register(&sub->dev); 96 err = device_register(&sub->dev);
98 if (0 != err) { 97 if (0 != err) {
99 kfree(sub); 98 kfree(sub);
100 return err; 99 return err;
101 } 100 }
102 printk("bttv%d: add subdevice \"%s\"\n", core->nr, sub->dev.bus_id); 101 printk("bttv%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
103 list_add_tail(&sub->list,&core->subs); 102 list_add_tail(&sub->list,&core->subs);
104 return 0; 103 return 0;
105} 104}
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index 46cb90e0985b..529bf6cf634d 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -130,8 +130,8 @@
130#define BTTV_BOARD_XGUARD 0x67 130#define BTTV_BOARD_XGUARD 0x67
131#define BTTV_BOARD_NEBULA_DIGITV 0x68 131#define BTTV_BOARD_NEBULA_DIGITV 0x68
132#define BTTV_BOARD_PV143 0x69 132#define BTTV_BOARD_PV143 0x69
133#define BTTV_BOARD_VD009X1_MINIDIN 0x6a 133#define BTTV_BOARD_VD009X1_VD011_MINIDIN 0x6a
134#define BTTV_BOARD_VD009X1_COMBI 0x6b 134#define BTTV_BOARD_VD009X1_VD011_COMBI 0x6b
135#define BTTV_BOARD_VD009_MINIDIN 0x6c 135#define BTTV_BOARD_VD009_MINIDIN 0x6c
136#define BTTV_BOARD_VD009_COMBI 0x6d 136#define BTTV_BOARD_VD009_COMBI 0x6d
137#define BTTV_BOARD_IVC100 0x6e 137#define BTTV_BOARD_IVC100 0x6e
@@ -177,6 +177,10 @@
177#define BTTV_BOARD_GEOVISION_GV600 0x96 177#define BTTV_BOARD_GEOVISION_GV600 0x96
178#define BTTV_BOARD_KOZUMI_KTV_01C 0x97 178#define BTTV_BOARD_KOZUMI_KTV_01C 0x97
179#define BTTV_BOARD_ENLTV_FM_2 0x98 179#define BTTV_BOARD_ENLTV_FM_2 0x98
180#define BTTV_BOARD_VD012 0x99
181#define BTTV_BOARD_VD012_X1 0x9a
182#define BTTV_BOARD_VD012_X2 0x9b
183
180 184
181/* more card-specific defines */ 185/* more card-specific defines */
182#define PT2254_L_CHANNEL 0x10 186#define PT2254_L_CHANNEL 0x10
@@ -308,7 +312,7 @@ struct bttv_sub_device {
308 312
309struct bttv_sub_driver { 313struct bttv_sub_driver {
310 struct device_driver drv; 314 struct device_driver drv;
311 char wanted[BUS_ID_SIZE]; 315 char wanted[20];
312 int (*probe)(struct bttv_sub_device *sub); 316 int (*probe)(struct bttv_sub_device *sub);
313 void (*remove)(struct bttv_sub_device *sub); 317 void (*remove)(struct bttv_sub_device *sub);
314}; 318};
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index b4d940b2e447..199a4d225caf 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -459,7 +459,7 @@ struct bttv {
459}; 459};
460 460
461/* our devices */ 461/* our devices */
462#define BTTV_MAX 16 462#define BTTV_MAX 32
463extern unsigned int bttv_num; 463extern unsigned int bttv_num;
464extern struct bttv bttvs[BTTV_MAX]; 464extern struct bttv bttvs[BTTV_MAX];
465 465
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index ace4ff9ea023..17f80d03f38e 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -706,8 +706,7 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
706 * Video4linux interfacing 706 * Video4linux interfacing
707 */ 707 */
708 708
709static int qcam_do_ioctl(struct inode *inode, struct file *file, 709static int qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
710 unsigned int cmd, void *arg)
711{ 710{
712 struct video_device *dev = video_devdata(file); 711 struct video_device *dev = video_devdata(file);
713 struct qcam_device *qcam=(struct qcam_device *)dev; 712 struct qcam_device *qcam=(struct qcam_device *)dev;
@@ -867,7 +866,7 @@ static int qcam_do_ioctl(struct inode *inode, struct file *file,
867static int qcam_ioctl(struct inode *inode, struct file *file, 866static int qcam_ioctl(struct inode *inode, struct file *file,
868 unsigned int cmd, unsigned long arg) 867 unsigned int cmd, unsigned long arg)
869{ 868{
870 return video_usercopy(inode, file, cmd, arg, qcam_do_ioctl); 869 return video_usercopy(file, cmd, arg, qcam_do_ioctl);
871} 870}
872 871
873static ssize_t qcam_read(struct file *file, char __user *buf, 872static ssize_t qcam_read(struct file *file, char __user *buf,
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 0f930d351466..21c71eb085db 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -500,8 +500,7 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
500 * Video4linux interfacing 500 * Video4linux interfacing
501 */ 501 */
502 502
503static int qcam_do_ioctl(struct inode *inode, struct file *file, 503static int qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
504 unsigned int cmd, void *arg)
505{ 504{
506 struct video_device *dev = video_devdata(file); 505 struct video_device *dev = video_devdata(file);
507 struct qcam_device *qcam=(struct qcam_device *)dev; 506 struct qcam_device *qcam=(struct qcam_device *)dev;
@@ -667,9 +666,9 @@ static int qcam_do_ioctl(struct inode *inode, struct file *file,
667} 666}
668 667
669static int qcam_ioctl(struct inode *inode, struct file *file, 668static int qcam_ioctl(struct inode *inode, struct file *file,
670 unsigned int cmd, unsigned long arg) 669 unsigned int cmd, unsigned long arg)
671{ 670{
672 return video_usercopy(inode, file, cmd, arg, qcam_do_ioctl); 671 return video_usercopy(file, cmd, arg, qcam_do_ioctl);
673} 672}
674 673
675static ssize_t qcam_read(struct file *file, char __user *buf, 674static ssize_t qcam_read(struct file *file, char __user *buf,
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 16c094f77852..028a400d2453 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -3333,8 +3333,7 @@ static ssize_t cpia_read(struct file *file, char __user *buf,
3333 return cam->decompressed_frame.count; 3333 return cam->decompressed_frame.count;
3334} 3334}
3335 3335
3336static int cpia_do_ioctl(struct inode *inode, struct file *file, 3336static int cpia_do_ioctl(struct file *file, unsigned int cmd, void *arg)
3337 unsigned int ioctlnr, void *arg)
3338{ 3337{
3339 struct video_device *dev = file->private_data; 3338 struct video_device *dev = file->private_data;
3340 struct cam_data *cam = video_get_drvdata(dev); 3339 struct cam_data *cam = video_get_drvdata(dev);
@@ -3347,9 +3346,9 @@ static int cpia_do_ioctl(struct inode *inode, struct file *file,
3347 if (mutex_lock_interruptible(&cam->busy_lock)) 3346 if (mutex_lock_interruptible(&cam->busy_lock))
3348 return -EINTR; 3347 return -EINTR;
3349 3348
3350 //DBG("cpia_ioctl: %u\n", ioctlnr); 3349 /* DBG("cpia_ioctl: %u\n", cmd); */
3351 3350
3352 switch (ioctlnr) { 3351 switch (cmd) {
3353 /* query capabilities */ 3352 /* query capabilities */
3354 case VIDIOCGCAP: 3353 case VIDIOCGCAP:
3355 { 3354 {
@@ -3724,7 +3723,7 @@ static int cpia_do_ioctl(struct inode *inode, struct file *file,
3724static int cpia_ioctl(struct inode *inode, struct file *file, 3723static int cpia_ioctl(struct inode *inode, struct file *file,
3725 unsigned int cmd, unsigned long arg) 3724 unsigned int cmd, unsigned long arg)
3726{ 3725{
3727 return video_usercopy(inode, file, cmd, arg, cpia_do_ioctl); 3726 return video_usercopy(file, cmd, arg, cpia_do_ioctl);
3728} 3727}
3729 3728
3730 3729
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index 7e791b6923f9..1cc0df8befff 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -25,7 +25,7 @@
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 * 26 *
27 * Stripped of 2.4 stuff ready for main kernel submit by 27 * Stripped of 2.4 stuff ready for main kernel submit by
28 * Alan Cox <alan@redhat.com> 28 * Alan Cox <alan@lxorguk.ukuu.org.uk>
29 * 29 *
30 ****************************************************************************/ 30 ****************************************************************************/
31 31
diff --git a/drivers/media/video/cpia2/cpia2_usb.c b/drivers/media/video/cpia2/cpia2_usb.c
index 73511a542077..dc5b07a20f69 100644
--- a/drivers/media/video/cpia2/cpia2_usb.c
+++ b/drivers/media/video/cpia2/cpia2_usb.c
@@ -25,7 +25,7 @@
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 * 26 *
27 * Stripped of 2.4 stuff ready for main kernel submit by 27 * Stripped of 2.4 stuff ready for main kernel submit by
28 * Alan Cox <alan@redhat.com> 28 * Alan Cox <alan@lxorguk.ukuu.org.uk>
29 ****************************************************************************/ 29 ****************************************************************************/
30 30
31#include <linux/kernel.h> 31#include <linux/kernel.h>
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 1c6bd633f193..3c2d7eac1197 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -26,7 +26,7 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 * 27 *
28 * Stripped of 2.4 stuff ready for main kernel submit by 28 * Stripped of 2.4 stuff ready for main kernel submit by
29 * Alan Cox <alan@redhat.com> 29 * Alan Cox <alan@lxorguk.ukuu.org.uk>
30 ****************************************************************************/ 30 ****************************************************************************/
31 31
32#include <linux/version.h> 32#include <linux/version.h>
@@ -1572,8 +1572,7 @@ static int ioctl_dqbuf(void *arg,struct camera_data *cam, struct file *file)
1572 * cpia2_ioctl 1572 * cpia2_ioctl
1573 * 1573 *
1574 *****************************************************************************/ 1574 *****************************************************************************/
1575static int cpia2_do_ioctl(struct inode *inode, struct file *file, 1575static int cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1576 unsigned int ioctl_nr, void *arg)
1577{ 1576{
1578 struct camera_data *cam = video_drvdata(file); 1577 struct camera_data *cam = video_drvdata(file);
1579 int retval = 0; 1578 int retval = 0;
@@ -1591,7 +1590,7 @@ static int cpia2_do_ioctl(struct inode *inode, struct file *file,
1591 } 1590 }
1592 1591
1593 /* Priority check */ 1592 /* Priority check */
1594 switch (ioctl_nr) { 1593 switch (cmd) {
1595 case VIDIOCSWIN: 1594 case VIDIOCSWIN:
1596 case VIDIOCMCAPTURE: 1595 case VIDIOCMCAPTURE:
1597 case VIDIOC_S_FMT: 1596 case VIDIOC_S_FMT:
@@ -1618,7 +1617,7 @@ static int cpia2_do_ioctl(struct inode *inode, struct file *file,
1618 break; 1617 break;
1619 } 1618 }
1620 1619
1621 switch (ioctl_nr) { 1620 switch (cmd) {
1622 case VIDIOCGCAP: /* query capabilities */ 1621 case VIDIOCGCAP: /* query capabilities */
1623 retval = ioctl_cap_query(arg, cam); 1622 retval = ioctl_cap_query(arg, cam);
1624 break; 1623 break;
@@ -1683,7 +1682,7 @@ static int cpia2_do_ioctl(struct inode *inode, struct file *file,
1683 case VIDIOC_ENUMINPUT: 1682 case VIDIOC_ENUMINPUT:
1684 case VIDIOC_G_INPUT: 1683 case VIDIOC_G_INPUT:
1685 case VIDIOC_S_INPUT: 1684 case VIDIOC_S_INPUT:
1686 retval = ioctl_input(ioctl_nr, arg,cam); 1685 retval = ioctl_input(cmd, arg, cam);
1687 break; 1686 break;
1688 1687
1689 case VIDIOC_ENUM_FMT: 1688 case VIDIOC_ENUM_FMT:
@@ -1843,9 +1842,9 @@ static int cpia2_do_ioctl(struct inode *inode, struct file *file,
1843} 1842}
1844 1843
1845static int cpia2_ioctl(struct inode *inode, struct file *file, 1844static int cpia2_ioctl(struct inode *inode, struct file *file,
1846 unsigned int ioctl_nr, unsigned long iarg) 1845 unsigned int cmd, unsigned long arg)
1847{ 1846{
1848 return video_usercopy(inode, file, ioctl_nr, iarg, cpia2_do_ioctl); 1847 return video_usercopy(file, cmd, arg, cpia2_do_ioctl);
1849} 1848}
1850 1849
1851/****************************************************************************** 1850/******************************************************************************
diff --git a/drivers/media/video/cs5345.c b/drivers/media/video/cs5345.c
index a662b15d5b90..70fcd0d5de13 100644
--- a/drivers/media/video/cs5345.c
+++ b/drivers/media/video/cs5345.c
@@ -23,9 +23,9 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/i2c.h> 24#include <linux/i2c.h>
25#include <linux/videodev2.h> 25#include <linux/videodev2.h>
26#include <media/v4l2-i2c-drv.h> 26#include <media/v4l2-device.h>
27#include <media/v4l2-common.h>
28#include <media/v4l2-chip-ident.h> 27#include <media/v4l2-chip-ident.h>
28#include <media/v4l2-i2c-drv.h>
29 29
30MODULE_DESCRIPTION("i2c device driver for cs5345 Audio ADC"); 30MODULE_DESCRIPTION("i2c device driver for cs5345 Audio ADC");
31MODULE_AUTHOR("Hans Verkuil"); 31MODULE_AUTHOR("Hans Verkuil");
@@ -40,111 +40,143 @@ MODULE_PARM_DESC(debug, "Debugging messages, 0=Off (default), 1=On");
40 40
41/* ----------------------------------------------------------------------- */ 41/* ----------------------------------------------------------------------- */
42 42
43static inline int cs5345_write(struct i2c_client *client, u8 reg, u8 value) 43static inline int cs5345_write(struct v4l2_subdev *sd, u8 reg, u8 value)
44{ 44{
45 struct i2c_client *client = v4l2_get_subdevdata(sd);
46
45 return i2c_smbus_write_byte_data(client, reg, value); 47 return i2c_smbus_write_byte_data(client, reg, value);
46} 48}
47 49
48static inline int cs5345_read(struct i2c_client *client, u8 reg) 50static inline int cs5345_read(struct v4l2_subdev *sd, u8 reg)
49{ 51{
52 struct i2c_client *client = v4l2_get_subdevdata(sd);
53
50 return i2c_smbus_read_byte_data(client, reg); 54 return i2c_smbus_read_byte_data(client, reg);
51} 55}
52 56
53static int cs5345_command(struct i2c_client *client, unsigned cmd, void *arg) 57static int cs5345_s_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
54{ 58{
55 struct v4l2_routing *route = arg; 59 if ((route->input & 0xf) > 6) {
56 struct v4l2_control *ctrl = arg; 60 v4l2_err(sd, "Invalid input %d.\n", route->input);
57 61 return -EINVAL;
58 switch (cmd) {
59 case VIDIOC_INT_G_AUDIO_ROUTING:
60 route->input = cs5345_read(client, 0x09) & 7;
61 route->input |= cs5345_read(client, 0x05) & 0x70;
62 route->output = 0;
63 break;
64
65 case VIDIOC_INT_S_AUDIO_ROUTING:
66 if ((route->input & 0xf) > 6) {
67 v4l_err(client, "Invalid input %d.\n", route->input);
68 return -EINVAL;
69 }
70 cs5345_write(client, 0x09, route->input & 0xf);
71 cs5345_write(client, 0x05, route->input & 0xf0);
72 break;
73
74 case VIDIOC_G_CTRL:
75 if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
76 ctrl->value = (cs5345_read(client, 0x04) & 0x08) != 0;
77 break;
78 }
79 if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
80 return -EINVAL;
81 ctrl->value = cs5345_read(client, 0x07) & 0x3f;
82 if (ctrl->value >= 32)
83 ctrl->value = ctrl->value - 64;
84 break;
85
86 case VIDIOC_S_CTRL:
87 break;
88 if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
89 cs5345_write(client, 0x04, ctrl->value ? 0x80 : 0);
90 break;
91 }
92 if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
93 return -EINVAL;
94 if (ctrl->value > 24 || ctrl->value < -24)
95 return -EINVAL;
96 cs5345_write(client, 0x07, ((u8)ctrl->value) & 0x3f);
97 cs5345_write(client, 0x08, ((u8)ctrl->value) & 0x3f);
98 break;
99
100#ifdef CONFIG_VIDEO_ADV_DEBUG
101 case VIDIOC_DBG_G_REGISTER:
102 case VIDIOC_DBG_S_REGISTER:
103 {
104 struct v4l2_register *reg = arg;
105
106 if (!v4l2_chip_match_i2c_client(client,
107 reg->match_type, reg->match_chip))
108 return -EINVAL;
109 if (!capable(CAP_SYS_ADMIN))
110 return -EPERM;
111 if (cmd == VIDIOC_DBG_G_REGISTER)
112 reg->val = cs5345_read(client, reg->reg & 0x1f);
113 else
114 cs5345_write(client, reg->reg & 0x1f, reg->val & 0xff);
115 break;
116 } 62 }
117#endif 63 cs5345_write(sd, 0x09, route->input & 0xf);
64 cs5345_write(sd, 0x05, route->input & 0xf0);
65 return 0;
66}
118 67
119 case VIDIOC_G_CHIP_IDENT: 68static int cs5345_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
120 return v4l2_chip_ident_i2c_client(client, 69{
121 arg, V4L2_IDENT_CS5345, 0); 70 if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
122 71 ctrl->value = (cs5345_read(sd, 0x04) & 0x08) != 0;
123 case VIDIOC_LOG_STATUS: 72 return 0;
124 { 73 }
125 u8 v = cs5345_read(client, 0x09) & 7; 74 if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
126 u8 m = cs5345_read(client, 0x04);
127 int vol = cs5345_read(client, 0x08) & 0x3f;
128
129 v4l_info(client, "Input: %d%s\n", v,
130 (m & 0x80) ? " (muted)" : "");
131 if (vol >= 32)
132 vol = vol - 64;
133 v4l_info(client, "Volume: %d dB\n", vol);
134 break;
135 }
136
137 default:
138 return -EINVAL; 75 return -EINVAL;
76 ctrl->value = cs5345_read(sd, 0x07) & 0x3f;
77 if (ctrl->value >= 32)
78 ctrl->value = ctrl->value - 64;
79 return 0;
80}
81
82static int cs5345_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
83{
84 if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
85 cs5345_write(sd, 0x04, ctrl->value ? 0x80 : 0);
86 return 0;
139 } 87 }
88 if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
89 return -EINVAL;
90 if (ctrl->value > 24 || ctrl->value < -24)
91 return -EINVAL;
92 cs5345_write(sd, 0x07, ((u8)ctrl->value) & 0x3f);
93 cs5345_write(sd, 0x08, ((u8)ctrl->value) & 0x3f);
94 return 0;
95}
96
97#ifdef CONFIG_VIDEO_ADV_DEBUG
98static int cs5345_g_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
99{
100 struct i2c_client *client = v4l2_get_subdevdata(sd);
101
102 if (!v4l2_chip_match_i2c_client(client,
103 reg->match_type, reg->match_chip))
104 return -EINVAL;
105 if (!capable(CAP_SYS_ADMIN))
106 return -EPERM;
107 reg->val = cs5345_read(sd, reg->reg & 0x1f);
108 return 0;
109}
110
111static int cs5345_s_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
112{
113 struct i2c_client *client = v4l2_get_subdevdata(sd);
114
115 if (!v4l2_chip_match_i2c_client(client,
116 reg->match_type, reg->match_chip))
117 return -EINVAL;
118 if (!capable(CAP_SYS_ADMIN))
119 return -EPERM;
120 cs5345_write(sd, reg->reg & 0x1f, reg->val & 0xff);
140 return 0; 121 return 0;
141} 122}
123#endif
124
125static int cs5345_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
126{
127 struct i2c_client *client = v4l2_get_subdevdata(sd);
128
129 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_CS5345, 0);
130}
131
132static int cs5345_log_status(struct v4l2_subdev *sd)
133{
134 u8 v = cs5345_read(sd, 0x09) & 7;
135 u8 m = cs5345_read(sd, 0x04);
136 int vol = cs5345_read(sd, 0x08) & 0x3f;
137
138 v4l2_info(sd, "Input: %d%s\n", v,
139 (m & 0x80) ? " (muted)" : "");
140 if (vol >= 32)
141 vol = vol - 64;
142 v4l2_info(sd, "Volume: %d dB\n", vol);
143 return 0;
144}
145
146static int cs5345_command(struct i2c_client *client, unsigned cmd, void *arg)
147{
148 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
149}
150
151/* ----------------------------------------------------------------------- */
152
153static const struct v4l2_subdev_core_ops cs5345_core_ops = {
154 .log_status = cs5345_log_status,
155 .g_chip_ident = cs5345_g_chip_ident,
156 .g_ctrl = cs5345_g_ctrl,
157 .s_ctrl = cs5345_s_ctrl,
158#ifdef CONFIG_VIDEO_ADV_DEBUG
159 .g_register = cs5345_g_register,
160 .s_register = cs5345_s_register,
161#endif
162};
163
164static const struct v4l2_subdev_audio_ops cs5345_audio_ops = {
165 .s_routing = cs5345_s_routing,
166};
167
168static const struct v4l2_subdev_ops cs5345_ops = {
169 .core = &cs5345_core_ops,
170 .audio = &cs5345_audio_ops,
171};
142 172
143/* ----------------------------------------------------------------------- */ 173/* ----------------------------------------------------------------------- */
144 174
145static int cs5345_probe(struct i2c_client *client, 175static int cs5345_probe(struct i2c_client *client,
146 const struct i2c_device_id *id) 176 const struct i2c_device_id *id)
147{ 177{
178 struct v4l2_subdev *sd;
179
148 /* Check if the adapter supports the needed features */ 180 /* Check if the adapter supports the needed features */
149 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 181 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
150 return -EIO; 182 return -EIO;
@@ -152,9 +184,25 @@ static int cs5345_probe(struct i2c_client *client,
152 v4l_info(client, "chip found @ 0x%x (%s)\n", 184 v4l_info(client, "chip found @ 0x%x (%s)\n",
153 client->addr << 1, client->adapter->name); 185 client->addr << 1, client->adapter->name);
154 186
155 cs5345_write(client, 0x02, 0x00); 187 sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
156 cs5345_write(client, 0x04, 0x01); 188 if (sd == NULL)
157 cs5345_write(client, 0x09, 0x01); 189 return -ENOMEM;
190 v4l2_i2c_subdev_init(sd, client, &cs5345_ops);
191
192 cs5345_write(sd, 0x02, 0x00);
193 cs5345_write(sd, 0x04, 0x01);
194 cs5345_write(sd, 0x09, 0x01);
195 return 0;
196}
197
198/* ----------------------------------------------------------------------- */
199
200static int cs5345_remove(struct i2c_client *client)
201{
202 struct v4l2_subdev *sd = i2c_get_clientdata(client);
203
204 v4l2_device_unregister_subdev(sd);
205 kfree(sd);
158 return 0; 206 return 0;
159} 207}
160 208
@@ -171,5 +219,6 @@ static struct v4l2_i2c_driver_data v4l2_i2c_data = {
171 .driverid = I2C_DRIVERID_CS5345, 219 .driverid = I2C_DRIVERID_CS5345,
172 .command = cs5345_command, 220 .command = cs5345_command,
173 .probe = cs5345_probe, 221 .probe = cs5345_probe,
222 .remove = cs5345_remove,
174 .id_table = cs5345_id, 223 .id_table = cs5345_id,
175}; 224};
diff --git a/drivers/media/video/cs53l32a.c b/drivers/media/video/cs53l32a.c
index c4444500b330..cb65d519cf78 100644
--- a/drivers/media/video/cs53l32a.c
+++ b/drivers/media/video/cs53l32a.c
@@ -27,7 +27,7 @@
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/i2c-id.h> 28#include <linux/i2c-id.h>
29#include <linux/videodev2.h> 29#include <linux/videodev2.h>
30#include <media/v4l2-common.h> 30#include <media/v4l2-device.h>
31#include <media/v4l2-chip-ident.h> 31#include <media/v4l2-chip-ident.h>
32#include <media/v4l2-i2c-drv-legacy.h> 32#include <media/v4l2-i2c-drv-legacy.h>
33 33
@@ -47,84 +47,104 @@ I2C_CLIENT_INSMOD;
47 47
48/* ----------------------------------------------------------------------- */ 48/* ----------------------------------------------------------------------- */
49 49
50static int cs53l32a_write(struct i2c_client *client, u8 reg, u8 value) 50static int cs53l32a_write(struct v4l2_subdev *sd, u8 reg, u8 value)
51{ 51{
52 struct i2c_client *client = v4l2_get_subdevdata(sd);
53
52 return i2c_smbus_write_byte_data(client, reg, value); 54 return i2c_smbus_write_byte_data(client, reg, value);
53} 55}
54 56
55static int cs53l32a_read(struct i2c_client *client, u8 reg) 57static int cs53l32a_read(struct v4l2_subdev *sd, u8 reg)
56{ 58{
59 struct i2c_client *client = v4l2_get_subdevdata(sd);
60
57 return i2c_smbus_read_byte_data(client, reg); 61 return i2c_smbus_read_byte_data(client, reg);
58} 62}
59 63
60static int cs53l32a_command(struct i2c_client *client, unsigned cmd, void *arg) 64static int cs53l32a_s_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
61{ 65{
62 struct v4l2_routing *route = arg; 66 /* There are 2 physical inputs, but the second input can be
63 struct v4l2_control *ctrl = arg; 67 placed in two modes, the first mode bypasses the PGA (gain),
64 68 the second goes through the PGA. Hence there are three
65 switch (cmd) { 69 possible inputs to choose from. */
66 case VIDIOC_INT_G_AUDIO_ROUTING: 70 if (route->input > 2) {
67 route->input = (cs53l32a_read(client, 0x01) >> 4) & 3; 71 v4l2_err(sd, "Invalid input %d.\n", route->input);
68 route->output = 0;
69 break;
70
71 case VIDIOC_INT_S_AUDIO_ROUTING:
72 /* There are 2 physical inputs, but the second input can be
73 placed in two modes, the first mode bypasses the PGA (gain),
74 the second goes through the PGA. Hence there are three
75 possible inputs to choose from. */
76 if (route->input > 2) {
77 v4l_err(client, "Invalid input %d.\n", route->input);
78 return -EINVAL;
79 }
80 cs53l32a_write(client, 0x01, 0x01 + (route->input << 4));
81 break;
82
83 case VIDIOC_G_CTRL:
84 if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
85 ctrl->value = (cs53l32a_read(client, 0x03) & 0xc0) != 0;
86 break;
87 }
88 if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
89 return -EINVAL;
90 ctrl->value = (s8)cs53l32a_read(client, 0x04);
91 break;
92
93 case VIDIOC_S_CTRL:
94 if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
95 cs53l32a_write(client, 0x03, ctrl->value ? 0xf0 : 0x30);
96 break;
97 }
98 if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
99 return -EINVAL;
100 if (ctrl->value > 12 || ctrl->value < -96)
101 return -EINVAL;
102 cs53l32a_write(client, 0x04, (u8) ctrl->value);
103 cs53l32a_write(client, 0x05, (u8) ctrl->value);
104 break;
105
106 case VIDIOC_G_CHIP_IDENT:
107 return v4l2_chip_ident_i2c_client(client,
108 arg, V4L2_IDENT_CS53l32A, 0);
109
110 case VIDIOC_LOG_STATUS:
111 {
112 u8 v = cs53l32a_read(client, 0x01);
113 u8 m = cs53l32a_read(client, 0x03);
114 s8 vol = cs53l32a_read(client, 0x04);
115
116 v4l_info(client, "Input: %d%s\n", (v >> 4) & 3,
117 (m & 0xC0) ? " (muted)" : "");
118 v4l_info(client, "Volume: %d dB\n", vol);
119 break;
120 }
121
122 default:
123 return -EINVAL; 72 return -EINVAL;
124 } 73 }
74 cs53l32a_write(sd, 0x01, 0x01 + (route->input << 4));
75 return 0;
76}
77
78static int cs53l32a_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
79{
80 if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
81 ctrl->value = (cs53l32a_read(sd, 0x03) & 0xc0) != 0;
82 return 0;
83 }
84 if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
85 return -EINVAL;
86 ctrl->value = (s8)cs53l32a_read(sd, 0x04);
87 return 0;
88}
89
90static int cs53l32a_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
91{
92 if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
93 cs53l32a_write(sd, 0x03, ctrl->value ? 0xf0 : 0x30);
94 return 0;
95 }
96 if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
97 return -EINVAL;
98 if (ctrl->value > 12 || ctrl->value < -96)
99 return -EINVAL;
100 cs53l32a_write(sd, 0x04, (u8) ctrl->value);
101 cs53l32a_write(sd, 0x05, (u8) ctrl->value);
125 return 0; 102 return 0;
126} 103}
127 104
105static int cs53l32a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
106{
107 struct i2c_client *client = v4l2_get_subdevdata(sd);
108
109 return v4l2_chip_ident_i2c_client(client,
110 chip, V4L2_IDENT_CS53l32A, 0);
111}
112
113static int cs53l32a_log_status(struct v4l2_subdev *sd)
114{
115 u8 v = cs53l32a_read(sd, 0x01);
116 u8 m = cs53l32a_read(sd, 0x03);
117 s8 vol = cs53l32a_read(sd, 0x04);
118
119 v4l2_info(sd, "Input: %d%s\n", (v >> 4) & 3,
120 (m & 0xC0) ? " (muted)" : "");
121 v4l2_info(sd, "Volume: %d dB\n", vol);
122 return 0;
123}
124
125static int cs53l32a_command(struct i2c_client *client, unsigned cmd, void *arg)
126{
127 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
128}
129
130/* ----------------------------------------------------------------------- */
131
132static const struct v4l2_subdev_core_ops cs53l32a_core_ops = {
133 .log_status = cs53l32a_log_status,
134 .g_chip_ident = cs53l32a_g_chip_ident,
135 .g_ctrl = cs53l32a_g_ctrl,
136 .s_ctrl = cs53l32a_s_ctrl,
137};
138
139static const struct v4l2_subdev_audio_ops cs53l32a_audio_ops = {
140 .s_routing = cs53l32a_s_routing,
141};
142
143static const struct v4l2_subdev_ops cs53l32a_ops = {
144 .core = &cs53l32a_core_ops,
145 .audio = &cs53l32a_audio_ops,
146};
147
128/* ----------------------------------------------------------------------- */ 148/* ----------------------------------------------------------------------- */
129 149
130/* i2c implementation */ 150/* i2c implementation */
@@ -137,6 +157,7 @@ static int cs53l32a_command(struct i2c_client *client, unsigned cmd, void *arg)
137static int cs53l32a_probe(struct i2c_client *client, 157static int cs53l32a_probe(struct i2c_client *client,
138 const struct i2c_device_id *id) 158 const struct i2c_device_id *id)
139{ 159{
160 struct v4l2_subdev *sd;
140 int i; 161 int i;
141 162
142 /* Check if the adapter supports the needed features */ 163 /* Check if the adapter supports the needed features */
@@ -149,32 +170,46 @@ static int cs53l32a_probe(struct i2c_client *client,
149 v4l_info(client, "chip found @ 0x%x (%s)\n", 170 v4l_info(client, "chip found @ 0x%x (%s)\n",
150 client->addr << 1, client->adapter->name); 171 client->addr << 1, client->adapter->name);
151 172
173 sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
174 if (sd == NULL)
175 return -ENOMEM;
176 v4l2_i2c_subdev_init(sd, client, &cs53l32a_ops);
177
152 for (i = 1; i <= 7; i++) { 178 for (i = 1; i <= 7; i++) {
153 u8 v = cs53l32a_read(client, i); 179 u8 v = cs53l32a_read(sd, i);
154 180
155 v4l_dbg(1, debug, client, "Read Reg %d %02x\n", i, v); 181 v4l2_dbg(1, debug, sd, "Read Reg %d %02x\n", i, v);
156 } 182 }
157 183
158 /* Set cs53l32a internal register for Adaptec 2010/2410 setup */ 184 /* Set cs53l32a internal register for Adaptec 2010/2410 setup */
159 185
160 cs53l32a_write(client, 0x01, (u8) 0x21); 186 cs53l32a_write(sd, 0x01, (u8) 0x21);
161 cs53l32a_write(client, 0x02, (u8) 0x29); 187 cs53l32a_write(sd, 0x02, (u8) 0x29);
162 cs53l32a_write(client, 0x03, (u8) 0x30); 188 cs53l32a_write(sd, 0x03, (u8) 0x30);
163 cs53l32a_write(client, 0x04, (u8) 0x00); 189 cs53l32a_write(sd, 0x04, (u8) 0x00);
164 cs53l32a_write(client, 0x05, (u8) 0x00); 190 cs53l32a_write(sd, 0x05, (u8) 0x00);
165 cs53l32a_write(client, 0x06, (u8) 0x00); 191 cs53l32a_write(sd, 0x06, (u8) 0x00);
166 cs53l32a_write(client, 0x07, (u8) 0x00); 192 cs53l32a_write(sd, 0x07, (u8) 0x00);
167 193
168 /* Display results, should be 0x21,0x29,0x30,0x00,0x00,0x00,0x00 */ 194 /* Display results, should be 0x21,0x29,0x30,0x00,0x00,0x00,0x00 */
169 195
170 for (i = 1; i <= 7; i++) { 196 for (i = 1; i <= 7; i++) {
171 u8 v = cs53l32a_read(client, i); 197 u8 v = cs53l32a_read(sd, i);
172 198
173 v4l_dbg(1, debug, client, "Read Reg %d %02x\n", i, v); 199 v4l2_dbg(1, debug, sd, "Read Reg %d %02x\n", i, v);
174 } 200 }
175 return 0; 201 return 0;
176} 202}
177 203
204static int cs53l32a_remove(struct i2c_client *client)
205{
206 struct v4l2_subdev *sd = i2c_get_clientdata(client);
207
208 v4l2_device_unregister_subdev(sd);
209 kfree(sd);
210 return 0;
211}
212
178static const struct i2c_device_id cs53l32a_id[] = { 213static const struct i2c_device_id cs53l32a_id[] = {
179 { "cs53l32a", 0 }, 214 { "cs53l32a", 0 },
180 { } 215 { }
@@ -185,6 +220,7 @@ static struct v4l2_i2c_driver_data v4l2_i2c_data = {
185 .name = "cs53l32a", 220 .name = "cs53l32a",
186 .driverid = I2C_DRIVERID_CS53L32A, 221 .driverid = I2C_DRIVERID_CS53L32A,
187 .command = cs53l32a_command, 222 .command = cs53l32a_command,
223 .remove = cs53l32a_remove,
188 .probe = cs53l32a_probe, 224 .probe = cs53l32a_probe,
189 .id_table = cs53l32a_id, 225 .id_table = cs53l32a_id,
190}; 226};
diff --git a/drivers/media/video/cx18/cx18-av-audio.c b/drivers/media/video/cx18/cx18-av-audio.c
index 0b55837880a7..a2f0ad570434 100644
--- a/drivers/media/video/cx18/cx18-av-audio.c
+++ b/drivers/media/video/cx18/cx18-av-audio.c
@@ -4,6 +4,7 @@
4 * Derived from cx25840-audio.c 4 * Derived from cx25840-audio.c
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -30,98 +31,165 @@ static int set_audclk_freq(struct cx18 *cx, u32 freq)
30 if (freq != 32000 && freq != 44100 && freq != 48000) 31 if (freq != 32000 && freq != 44100 && freq != 48000)
31 return -EINVAL; 32 return -EINVAL;
32 33
33 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x10 */ 34 /*
34 cx18_av_write(cx, 0x127, 0x50); 35 * The PLL parameters are based on the external crystal frequency that
36 * would ideally be:
37 *
38 * NTSC Color subcarrier freq * 8 =
39 * 4.5 MHz/286 * 455/2 * 8 = 28.63636363... MHz
40 *
41 * The accidents of history and rationale that explain from where this
42 * combination of magic numbers originate can be found in:
43 *
44 * [1] Abrahams, I. C., "Choice of Chrominance Subcarrier Frequency in
45 * the NTSC Standards", Proceedings of the I-R-E, January 1954, pp 79-80
46 *
47 * [2] Abrahams, I. C., "The 'Frequency Interleaving' Principle in the
48 * NTSC Standards", Proceedings of the I-R-E, January 1954, pp 81-83
49 *
50 * As Mike Bradley has rightly pointed out, it's not the exact crystal
51 * frequency that matters, only that all parts of the driver and
52 * firmware are using the same value (close to the ideal value).
53 *
54 * Since I have a strong suspicion that, if the firmware ever assumes a
55 * crystal value at all, it will assume 28.636360 MHz, the crystal
56 * freq used in calculations in this driver will be:
57 *
58 * xtal_freq = 28.636360 MHz
59 *
60 * an error of less than 0.13 ppm which is way, way better than any off
61 * the shelf crystal will have for accuracy anyway.
62 *
63 * Below I aim to run the PLLs' VCOs near 400 MHz to minimze error.
64 *
65 * Many thanks to Jeff Campbell and Mike Bradley for their extensive
66 * investigation, experimentation, testing, and suggested solutions of
67 * of audio/video sync problems with SVideo and CVBS captures.
68 */
35 69
36 if (state->aud_input > CX18_AV_AUDIO_SERIAL2) { 70 if (state->aud_input > CX18_AV_AUDIO_SERIAL2) {
37 switch (freq) { 71 switch (freq) {
38 case 32000: 72 case 32000:
39 /* VID_PLL and AUX_PLL */ 73 /*
40 cx18_av_write4(cx, 0x108, 0x1408040f); 74 * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
75 * AUX_PLL Integer = 0x0d, AUX PLL Post Divider = 0x20
76 */
77 cx18_av_write4(cx, 0x108, 0x200d040f);
41 78
42 /* AUX_PLL_FRAC */ 79 /* VID_PLL Fraction = 0x2be2fe */
43 /* 0x8.9504318a * 28,636,363.636 / 0x14 = 32000 * 384 */ 80 /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/
44 cx18_av_write4(cx, 0x110, 0x012a0863); 81 cx18_av_write4(cx, 0x10c, 0x002be2fe);
82
83 /* AUX_PLL Fraction = 0x176740c */
84 /* xtal * 0xd.bb3a060/0x20 = 32000 * 384: 393 MHz p-pd*/
85 cx18_av_write4(cx, 0x110, 0x0176740c);
45 86
46 /* src3/4/6_ctl */ 87 /* src3/4/6_ctl */
47 /* 0x1.f77f = (4 * 15734.26) / 32000 */ 88 /* 0x1.f77f = (4 * xtal/8*2/455) / 32000 */
48 cx18_av_write4(cx, 0x900, 0x0801f77f); 89 cx18_av_write4(cx, 0x900, 0x0801f77f);
49 cx18_av_write4(cx, 0x904, 0x0801f77f); 90 cx18_av_write4(cx, 0x904, 0x0801f77f);
50 cx18_av_write4(cx, 0x90c, 0x0801f77f); 91 cx18_av_write4(cx, 0x90c, 0x0801f77f);
51 92
52 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x14 */ 93 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x20 */
53 cx18_av_write(cx, 0x127, 0x54); 94 cx18_av_write(cx, 0x127, 0x60);
54 95
55 /* AUD_COUNT = 0x2fff = 8 samples * 4 * 384 - 1 */ 96 /* AUD_COUNT = 0x2fff = 8 samples * 4 * 384 - 1 */
56 cx18_av_write4(cx, 0x12c, 0x11202fff); 97 cx18_av_write4(cx, 0x12c, 0x11202fff);
57 98
58 /* 99 /*
59 * EN_AV_LOCK = 1 100 * EN_AV_LOCK = 0
60 * VID_COUNT = 0x0d2ef8 = 107999.000 * 8 = 101 * VID_COUNT = 0x0d2ef8 = 107999.000 * 8 =
61 * ((8 samples/32,000) * (13,500,000 * 8) * 4 - 1) * 8 102 * ((8 samples/32,000) * (13,500,000 * 8) * 4 - 1) * 8
62 */ 103 */
63 cx18_av_write4(cx, 0x128, 0xa10d2ef8); 104 cx18_av_write4(cx, 0x128, 0xa00d2ef8);
64 break; 105 break;
65 106
66 case 44100: 107 case 44100:
67 /* VID_PLL and AUX_PLL */ 108 /*
68 cx18_av_write4(cx, 0x108, 0x1009040f); 109 * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
110 * AUX_PLL Integer = 0x0e, AUX PLL Post Divider = 0x18
111 */
112 cx18_av_write4(cx, 0x108, 0x180e040f);
113
114 /* VID_PLL Fraction = 0x2be2fe */
115 /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/
116 cx18_av_write4(cx, 0x10c, 0x002be2fe);
69 117
70 /* AUX_PLL_FRAC */ 118 /* AUX_PLL Fraction = 0x062a1f2 */
71 /* 0x9.7635e7 * 28,636,363.63 / 0x10 = 44100 * 384 */ 119 /* xtal * 0xe.3150f90/0x18 = 44100 * 384: 406 MHz p-pd*/
72 cx18_av_write4(cx, 0x110, 0x00ec6bce); 120 cx18_av_write4(cx, 0x110, 0x0062a1f2);
73 121
74 /* src3/4/6_ctl */ 122 /* src3/4/6_ctl */
75 /* 0x1.6d59 = (4 * 15734.26) / 44100 */ 123 /* 0x1.6d59 = (4 * xtal/8*2/455) / 44100 */
76 cx18_av_write4(cx, 0x900, 0x08016d59); 124 cx18_av_write4(cx, 0x900, 0x08016d59);
77 cx18_av_write4(cx, 0x904, 0x08016d59); 125 cx18_av_write4(cx, 0x904, 0x08016d59);
78 cx18_av_write4(cx, 0x90c, 0x08016d59); 126 cx18_av_write4(cx, 0x90c, 0x08016d59);
79 127
128 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x18 */
129 cx18_av_write(cx, 0x127, 0x58);
130
80 /* AUD_COUNT = 0x92ff = 49 samples * 2 * 384 - 1 */ 131 /* AUD_COUNT = 0x92ff = 49 samples * 2 * 384 - 1 */
81 cx18_av_write4(cx, 0x12c, 0x112092ff); 132 cx18_av_write4(cx, 0x12c, 0x112092ff);
82 133
83 /* 134 /*
84 * EN_AV_LOCK = 1 135 * EN_AV_LOCK = 0
85 * VID_COUNT = 0x1d4bf8 = 239999.000 * 8 = 136 * VID_COUNT = 0x1d4bf8 = 239999.000 * 8 =
86 * ((49 samples/44,100) * (13,500,000 * 8) * 2 - 1) * 8 137 * ((49 samples/44,100) * (13,500,000 * 8) * 2 - 1) * 8
87 */ 138 */
88 cx18_av_write4(cx, 0x128, 0xa11d4bf8); 139 cx18_av_write4(cx, 0x128, 0xa01d4bf8);
89 break; 140 break;
90 141
91 case 48000: 142 case 48000:
92 /* VID_PLL and AUX_PLL */ 143 /*
93 cx18_av_write4(cx, 0x108, 0x100a040f); 144 * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
145 * AUX_PLL Integer = 0x0e, AUX PLL Post Divider = 0x16
146 */
147 cx18_av_write4(cx, 0x108, 0x160e040f);
94 148
95 /* AUX_PLL_FRAC */ 149 /* VID_PLL Fraction = 0x2be2fe */
96 /* 0xa.4c6b6ea * 28,636,363.63 / 0x10 = 48000 * 384 */ 150 /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/
97 cx18_av_write4(cx, 0x110, 0x0098d6dd); 151 cx18_av_write4(cx, 0x10c, 0x002be2fe);
152
153 /* AUX_PLL Fraction = 0x05227ad */
154 /* xtal * 0xe.2913d68/0x16 = 48000 * 384: 406 MHz p-pd*/
155 cx18_av_write4(cx, 0x110, 0x005227ad);
98 156
99 /* src3/4/6_ctl */ 157 /* src3/4/6_ctl */
100 /* 0x1.4faa = (4 * 15734.26) / 48000 */ 158 /* 0x1.4faa = (4 * xtal/8*2/455) / 48000 */
101 cx18_av_write4(cx, 0x900, 0x08014faa); 159 cx18_av_write4(cx, 0x900, 0x08014faa);
102 cx18_av_write4(cx, 0x904, 0x08014faa); 160 cx18_av_write4(cx, 0x904, 0x08014faa);
103 cx18_av_write4(cx, 0x90c, 0x08014faa); 161 cx18_av_write4(cx, 0x90c, 0x08014faa);
104 162
163 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x16 */
164 cx18_av_write(cx, 0x127, 0x56);
165
105 /* AUD_COUNT = 0x5fff = 4 samples * 16 * 384 - 1 */ 166 /* AUD_COUNT = 0x5fff = 4 samples * 16 * 384 - 1 */
106 cx18_av_write4(cx, 0x12c, 0x11205fff); 167 cx18_av_write4(cx, 0x12c, 0x11205fff);
107 168
108 /* 169 /*
109 * EN_AV_LOCK = 1 170 * EN_AV_LOCK = 0
110 * VID_COUNT = 0x1193f8 = 143999.000 * 8 = 171 * VID_COUNT = 0x1193f8 = 143999.000 * 8 =
111 * ((4 samples/48,000) * (13,500,000 * 8) * 16 - 1) * 8 172 * ((4 samples/48,000) * (13,500,000 * 8) * 16 - 1) * 8
112 */ 173 */
113 cx18_av_write4(cx, 0x128, 0xa11193f8); 174 cx18_av_write4(cx, 0x128, 0xa01193f8);
114 break; 175 break;
115 } 176 }
116 } else { 177 } else {
117 switch (freq) { 178 switch (freq) {
118 case 32000: 179 case 32000:
119 /* VID_PLL and AUX_PLL */ 180 /*
120 cx18_av_write4(cx, 0x108, 0x1e08040f); 181 * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
182 * AUX_PLL Integer = 0x0d, AUX PLL Post Divider = 0x30
183 */
184 cx18_av_write4(cx, 0x108, 0x300d040f);
185
186 /* VID_PLL Fraction = 0x2be2fe */
187 /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/
188 cx18_av_write4(cx, 0x10c, 0x002be2fe);
121 189
122 /* AUX_PLL_FRAC */ 190 /* AUX_PLL Fraction = 0x176740c */
123 /* 0x8.9504318 * 28,636,363.63 / 0x1e = 32000 * 256 */ 191 /* xtal * 0xd.bb3a060/0x30 = 32000 * 256: 393 MHz p-pd*/
124 cx18_av_write4(cx, 0x110, 0x012a0863); 192 cx18_av_write4(cx, 0x110, 0x0176740c);
125 193
126 /* src1_ctl */ 194 /* src1_ctl */
127 /* 0x1.0000 = 32000/32000 */ 195 /* 0x1.0000 = 32000/32000 */
@@ -133,27 +201,34 @@ static int set_audclk_freq(struct cx18 *cx, u32 freq)
133 cx18_av_write4(cx, 0x904, 0x08020000); 201 cx18_av_write4(cx, 0x904, 0x08020000);
134 cx18_av_write4(cx, 0x90c, 0x08020000); 202 cx18_av_write4(cx, 0x90c, 0x08020000);
135 203
136 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x14 */ 204 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x30 */
137 cx18_av_write(cx, 0x127, 0x54); 205 cx18_av_write(cx, 0x127, 0x70);
138 206
139 /* AUD_COUNT = 0x1fff = 8 samples * 4 * 256 - 1 */ 207 /* AUD_COUNT = 0x1fff = 8 samples * 4 * 256 - 1 */
140 cx18_av_write4(cx, 0x12c, 0x11201fff); 208 cx18_av_write4(cx, 0x12c, 0x11201fff);
141 209
142 /* 210 /*
143 * EN_AV_LOCK = 1 211 * EN_AV_LOCK = 0
144 * VID_COUNT = 0x0d2ef8 = 107999.000 * 8 = 212 * VID_COUNT = 0x0d2ef8 = 107999.000 * 8 =
145 * ((8 samples/32,000) * (13,500,000 * 8) * 4 - 1) * 8 213 * ((8 samples/32,000) * (13,500,000 * 8) * 4 - 1) * 8
146 */ 214 */
147 cx18_av_write4(cx, 0x128, 0xa10d2ef8); 215 cx18_av_write4(cx, 0x128, 0xa00d2ef8);
148 break; 216 break;
149 217
150 case 44100: 218 case 44100:
151 /* VID_PLL and AUX_PLL */ 219 /*
152 cx18_av_write4(cx, 0x108, 0x1809040f); 220 * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
221 * AUX_PLL Integer = 0x0e, AUX PLL Post Divider = 0x24
222 */
223 cx18_av_write4(cx, 0x108, 0x240e040f);
153 224
154 /* AUX_PLL_FRAC */ 225 /* VID_PLL Fraction = 0x2be2fe */
155 /* 0x9.7635e74 * 28,636,363.63 / 0x18 = 44100 * 256 */ 226 /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/
156 cx18_av_write4(cx, 0x110, 0x00ec6bce); 227 cx18_av_write4(cx, 0x10c, 0x002be2fe);
228
229 /* AUX_PLL Fraction = 0x062a1f2 */
230 /* xtal * 0xe.3150f90/0x24 = 44100 * 256: 406 MHz p-pd*/
231 cx18_av_write4(cx, 0x110, 0x0062a1f2);
157 232
158 /* src1_ctl */ 233 /* src1_ctl */
159 /* 0x1.60cd = 44100/32000 */ 234 /* 0x1.60cd = 44100/32000 */
@@ -165,24 +240,34 @@ static int set_audclk_freq(struct cx18 *cx, u32 freq)
165 cx18_av_write4(cx, 0x904, 0x08017385); 240 cx18_av_write4(cx, 0x904, 0x08017385);
166 cx18_av_write4(cx, 0x90c, 0x08017385); 241 cx18_av_write4(cx, 0x90c, 0x08017385);
167 242
243 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x24 */
244 cx18_av_write(cx, 0x127, 0x64);
245
168 /* AUD_COUNT = 0x61ff = 49 samples * 2 * 256 - 1 */ 246 /* AUD_COUNT = 0x61ff = 49 samples * 2 * 256 - 1 */
169 cx18_av_write4(cx, 0x12c, 0x112061ff); 247 cx18_av_write4(cx, 0x12c, 0x112061ff);
170 248
171 /* 249 /*
172 * EN_AV_LOCK = 1 250 * EN_AV_LOCK = 0
173 * VID_COUNT = 0x1d4bf8 = 239999.000 * 8 = 251 * VID_COUNT = 0x1d4bf8 = 239999.000 * 8 =
174 * ((49 samples/44,100) * (13,500,000 * 8) * 2 - 1) * 8 252 * ((49 samples/44,100) * (13,500,000 * 8) * 2 - 1) * 8
175 */ 253 */
176 cx18_av_write4(cx, 0x128, 0xa11d4bf8); 254 cx18_av_write4(cx, 0x128, 0xa01d4bf8);
177 break; 255 break;
178 256
179 case 48000: 257 case 48000:
180 /* VID_PLL and AUX_PLL */ 258 /*
181 cx18_av_write4(cx, 0x108, 0x180a040f); 259 * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
260 * AUX_PLL Integer = 0x0d, AUX PLL Post Divider = 0x20
261 */
262 cx18_av_write4(cx, 0x108, 0x200d040f);
263
264 /* VID_PLL Fraction = 0x2be2fe */
265 /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/
266 cx18_av_write4(cx, 0x10c, 0x002be2fe);
182 267
183 /* AUX_PLL_FRAC */ 268 /* AUX_PLL Fraction = 0x176740c */
184 /* 0xa.4c6b6ea * 28,636,363.63 / 0x18 = 48000 * 256 */ 269 /* xtal * 0xd.bb3a060/0x20 = 48000 * 256: 393 MHz p-pd*/
185 cx18_av_write4(cx, 0x110, 0x0098d6dd); 270 cx18_av_write4(cx, 0x110, 0x0176740c);
186 271
187 /* src1_ctl */ 272 /* src1_ctl */
188 /* 0x1.8000 = 48000/32000 */ 273 /* 0x1.8000 = 48000/32000 */
@@ -194,15 +279,18 @@ static int set_audclk_freq(struct cx18 *cx, u32 freq)
194 cx18_av_write4(cx, 0x904, 0x08015555); 279 cx18_av_write4(cx, 0x904, 0x08015555);
195 cx18_av_write4(cx, 0x90c, 0x08015555); 280 cx18_av_write4(cx, 0x90c, 0x08015555);
196 281
282 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x20 */
283 cx18_av_write(cx, 0x127, 0x60);
284
197 /* AUD_COUNT = 0x3fff = 4 samples * 16 * 256 - 1 */ 285 /* AUD_COUNT = 0x3fff = 4 samples * 16 * 256 - 1 */
198 cx18_av_write4(cx, 0x12c, 0x11203fff); 286 cx18_av_write4(cx, 0x12c, 0x11203fff);
199 287
200 /* 288 /*
201 * EN_AV_LOCK = 1 289 * EN_AV_LOCK = 0
202 * VID_COUNT = 0x1193f8 = 143999.000 * 8 = 290 * VID_COUNT = 0x1193f8 = 143999.000 * 8 =
203 * ((4 samples/48,000) * (13,500,000 * 8) * 16 - 1) * 8 291 * ((4 samples/48,000) * (13,500,000 * 8) * 16 - 1) * 8
204 */ 292 */
205 cx18_av_write4(cx, 0x128, 0xa11193f8); 293 cx18_av_write4(cx, 0x128, 0xa01193f8);
206 break; 294 break;
207 } 295 }
208 } 296 }
@@ -215,12 +303,15 @@ static int set_audclk_freq(struct cx18 *cx, u32 freq)
215void cx18_av_audio_set_path(struct cx18 *cx) 303void cx18_av_audio_set_path(struct cx18 *cx)
216{ 304{
217 struct cx18_av_state *state = &cx->av_state; 305 struct cx18_av_state *state = &cx->av_state;
306 u8 v;
218 307
219 /* stop microcontroller */ 308 /* stop microcontroller */
220 cx18_av_and_or(cx, 0x803, ~0x10, 0); 309 v = cx18_av_read(cx, 0x803) & ~0x10;
310 cx18_av_write_expect(cx, 0x803, v, v, 0x1f);
221 311
222 /* assert soft reset */ 312 /* assert soft reset */
223 cx18_av_and_or(cx, 0x810, ~0x1, 0x01); 313 v = cx18_av_read(cx, 0x810) | 0x01;
314 cx18_av_write_expect(cx, 0x810, v, v, 0x0f);
224 315
225 /* Mute everything to prevent the PFFT! */ 316 /* Mute everything to prevent the PFFT! */
226 cx18_av_write(cx, 0x8d3, 0x1f); 317 cx18_av_write(cx, 0x8d3, 0x1f);
@@ -240,12 +331,14 @@ void cx18_av_audio_set_path(struct cx18 *cx)
240 set_audclk_freq(cx, state->audclk_freq); 331 set_audclk_freq(cx, state->audclk_freq);
241 332
242 /* deassert soft reset */ 333 /* deassert soft reset */
243 cx18_av_and_or(cx, 0x810, ~0x1, 0x00); 334 v = cx18_av_read(cx, 0x810) & ~0x01;
335 cx18_av_write_expect(cx, 0x810, v, v, 0x0f);
244 336
245 if (state->aud_input > CX18_AV_AUDIO_SERIAL2) { 337 if (state->aud_input > CX18_AV_AUDIO_SERIAL2) {
246 /* When the microcontroller detects the 338 /* When the microcontroller detects the
247 * audio format, it will unmute the lines */ 339 * audio format, it will unmute the lines */
248 cx18_av_and_or(cx, 0x803, ~0x10, 0x10); 340 v = cx18_av_read(cx, 0x803) | 0x10;
341 cx18_av_write_expect(cx, 0x803, v, v, 0x1f);
249 } 342 }
250} 343}
251 344
@@ -347,19 +440,23 @@ static int get_mute(struct cx18 *cx)
347static void set_mute(struct cx18 *cx, int mute) 440static void set_mute(struct cx18 *cx, int mute)
348{ 441{
349 struct cx18_av_state *state = &cx->av_state; 442 struct cx18_av_state *state = &cx->av_state;
443 u8 v;
350 444
351 if (state->aud_input > CX18_AV_AUDIO_SERIAL2) { 445 if (state->aud_input > CX18_AV_AUDIO_SERIAL2) {
352 /* Must turn off microcontroller in order to mute sound. 446 /* Must turn off microcontroller in order to mute sound.
353 * Not sure if this is the best method, but it does work. 447 * Not sure if this is the best method, but it does work.
354 * If the microcontroller is running, then it will undo any 448 * If the microcontroller is running, then it will undo any
355 * changes to the mute register. */ 449 * changes to the mute register. */
450 v = cx18_av_read(cx, 0x803);
356 if (mute) { 451 if (mute) {
357 /* disable microcontroller */ 452 /* disable microcontroller */
358 cx18_av_and_or(cx, 0x803, ~0x10, 0x00); 453 v &= ~0x10;
454 cx18_av_write_expect(cx, 0x803, v, v, 0x1f);
359 cx18_av_write(cx, 0x8d3, 0x1f); 455 cx18_av_write(cx, 0x8d3, 0x1f);
360 } else { 456 } else {
361 /* enable microcontroller */ 457 /* enable microcontroller */
362 cx18_av_and_or(cx, 0x803, ~0x10, 0x10); 458 v |= 0x10;
459 cx18_av_write_expect(cx, 0x803, v, v, 0x1f);
363 } 460 }
364 } else { 461 } else {
365 /* SRC1_MUTE_EN */ 462 /* SRC1_MUTE_EN */
@@ -375,16 +472,26 @@ int cx18_av_audio(struct cx18 *cx, unsigned int cmd, void *arg)
375 472
376 switch (cmd) { 473 switch (cmd) {
377 case VIDIOC_INT_AUDIO_CLOCK_FREQ: 474 case VIDIOC_INT_AUDIO_CLOCK_FREQ:
475 {
476 u8 v;
378 if (state->aud_input > CX18_AV_AUDIO_SERIAL2) { 477 if (state->aud_input > CX18_AV_AUDIO_SERIAL2) {
379 cx18_av_and_or(cx, 0x803, ~0x10, 0); 478 v = cx18_av_read(cx, 0x803) & ~0x10;
479 cx18_av_write_expect(cx, 0x803, v, v, 0x1f);
380 cx18_av_write(cx, 0x8d3, 0x1f); 480 cx18_av_write(cx, 0x8d3, 0x1f);
381 } 481 }
382 cx18_av_and_or(cx, 0x810, ~0x1, 1); 482 v = cx18_av_read(cx, 0x810) | 0x1;
483 cx18_av_write_expect(cx, 0x810, v, v, 0x0f);
484
383 retval = set_audclk_freq(cx, *(u32 *)arg); 485 retval = set_audclk_freq(cx, *(u32 *)arg);
384 cx18_av_and_or(cx, 0x810, ~0x1, 0); 486
385 if (state->aud_input > CX18_AV_AUDIO_SERIAL2) 487 v = cx18_av_read(cx, 0x810) & ~0x1;
386 cx18_av_and_or(cx, 0x803, ~0x10, 0x10); 488 cx18_av_write_expect(cx, 0x810, v, v, 0x0f);
489 if (state->aud_input > CX18_AV_AUDIO_SERIAL2) {
490 v = cx18_av_read(cx, 0x803) | 0x10;
491 cx18_av_write_expect(cx, 0x803, v, v, 0x1f);
492 }
387 return retval; 493 return retval;
494 }
388 495
389 case VIDIOC_G_CTRL: 496 case VIDIOC_G_CTRL:
390 switch (ctrl->id) { 497 switch (ctrl->id) {
diff --git a/drivers/media/video/cx18/cx18-av-core.c b/drivers/media/video/cx18/cx18-av-core.c
index 73f5141a42d1..0b1c84b4ddd6 100644
--- a/drivers/media/video/cx18/cx18-av-core.c
+++ b/drivers/media/video/cx18/cx18-av-core.c
@@ -4,6 +4,7 @@
4 * Derived from cx25840-core.c 4 * Derived from cx25840-core.c
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -36,12 +37,31 @@ int cx18_av_write(struct cx18 *cx, u16 addr, u8 value)
36 return 0; 37 return 0;
37} 38}
38 39
40int cx18_av_write_expect(struct cx18 *cx, u16 addr, u8 value, u8 eval, u8 mask)
41{
42 u32 reg = 0xc40000 + (addr & ~3);
43 int shift = (addr & 3) * 8;
44 u32 x = cx18_read_reg(cx, reg);
45
46 x = (x & ~((u32)0xff << shift)) | ((u32)value << shift);
47 cx18_write_reg_expect(cx, x, reg,
48 ((u32)eval << shift), ((u32)mask << shift));
49 return 0;
50}
51
39int cx18_av_write4(struct cx18 *cx, u16 addr, u32 value) 52int cx18_av_write4(struct cx18 *cx, u16 addr, u32 value)
40{ 53{
41 cx18_write_reg(cx, value, 0xc40000 + addr); 54 cx18_write_reg(cx, value, 0xc40000 + addr);
42 return 0; 55 return 0;
43} 56}
44 57
58int
59cx18_av_write4_expect(struct cx18 *cx, u16 addr, u32 value, u32 eval, u32 mask)
60{
61 cx18_write_reg_expect(cx, value, 0xc40000 + addr, eval, mask);
62 return 0;
63}
64
45int cx18_av_write4_noretry(struct cx18 *cx, u16 addr, u32 value) 65int cx18_av_write4_noretry(struct cx18 *cx, u16 addr, u32 value)
46{ 66{
47 cx18_write_reg_noretry(cx, value, 0xc40000 + addr); 67 cx18_write_reg_noretry(cx, value, 0xc40000 + addr);
@@ -61,11 +81,6 @@ u32 cx18_av_read4(struct cx18 *cx, u16 addr)
61 return cx18_read_reg(cx, 0xc40000 + addr); 81 return cx18_read_reg(cx, 0xc40000 + addr);
62} 82}
63 83
64u32 cx18_av_read4_noretry(struct cx18 *cx, u16 addr)
65{
66 return cx18_read_reg_noretry(cx, 0xc40000 + addr);
67}
68
69int cx18_av_and_or(struct cx18 *cx, u16 addr, unsigned and_mask, 84int cx18_av_and_or(struct cx18 *cx, u16 addr, unsigned and_mask,
70 u8 or_value) 85 u8 or_value)
71{ 86{
@@ -98,14 +113,16 @@ static void cx18_av_initialize(struct cx18 *cx)
98 113
99 cx18_av_loadfw(cx); 114 cx18_av_loadfw(cx);
100 /* Stop 8051 code execution */ 115 /* Stop 8051 code execution */
101 cx18_av_write4(cx, CXADEC_DL_CTL, 0x03000000); 116 cx18_av_write4_expect(cx, CXADEC_DL_CTL, 0x03000000,
117 0x03000000, 0x13000000);
102 118
103 /* initallize the PLL by toggling sleep bit */ 119 /* initallize the PLL by toggling sleep bit */
104 v = cx18_av_read4(cx, CXADEC_HOST_REG1); 120 v = cx18_av_read4(cx, CXADEC_HOST_REG1);
105 /* enable sleep mode */ 121 /* enable sleep mode - register appears to be read only... */
106 cx18_av_write4(cx, CXADEC_HOST_REG1, v | 1); 122 cx18_av_write4_expect(cx, CXADEC_HOST_REG1, v | 1, v, 0xfffe);
107 /* disable sleep mode */ 123 /* disable sleep mode */
108 cx18_av_write4(cx, CXADEC_HOST_REG1, v & 0xfffe); 124 cx18_av_write4_expect(cx, CXADEC_HOST_REG1, v & 0xfffe,
125 v & 0xfffe, 0xffff);
109 126
110 /* initialize DLLs */ 127 /* initialize DLLs */
111 v = cx18_av_read4(cx, CXADEC_DLL1_DIAG_CTRL) & 0xE1FFFEFF; 128 v = cx18_av_read4(cx, CXADEC_DLL1_DIAG_CTRL) & 0xE1FFFEFF;
@@ -125,9 +142,10 @@ static void cx18_av_initialize(struct cx18 *cx)
125 142
126 v = cx18_av_read4(cx, CXADEC_AFE_DIAG_CTRL3) | 1; 143 v = cx18_av_read4(cx, CXADEC_AFE_DIAG_CTRL3) | 1;
127 /* enable TUNE_FIL_RST */ 144 /* enable TUNE_FIL_RST */
128 cx18_av_write4(cx, CXADEC_AFE_DIAG_CTRL3, v); 145 cx18_av_write4_expect(cx, CXADEC_AFE_DIAG_CTRL3, v, v, 0x03009F0F);
129 /* disable TUNE_FIL_RST */ 146 /* disable TUNE_FIL_RST */
130 cx18_av_write4(cx, CXADEC_AFE_DIAG_CTRL3, v & 0xFFFFFFFE); 147 cx18_av_write4_expect(cx, CXADEC_AFE_DIAG_CTRL3,
148 v & 0xFFFFFFFE, v & 0xFFFFFFFE, 0x03009F0F);
131 149
132 /* enable 656 output */ 150 /* enable 656 output */
133 cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x040C00); 151 cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x040C00);
@@ -251,10 +269,9 @@ void cx18_av_std_setup(struct cx18 *cx)
251 pll_int, pll_frac, pll_post); 269 pll_int, pll_frac, pll_post);
252 270
253 if (pll_post) { 271 if (pll_post) {
254 int fin, fsc; 272 int fin, fsc, pll;
255 int pll = 28636363L * ((((u64)pll_int) << 25) + pll_frac);
256 273
257 pll >>= 25; 274 pll = (28636360L * ((((u64)pll_int) << 25) + pll_frac)) >> 25;
258 pll /= pll_post; 275 pll /= pll_post;
259 CX18_DEBUG_INFO("PLL = %d.%06d MHz\n", 276 CX18_DEBUG_INFO("PLL = %d.%06d MHz\n",
260 pll / 1000000, pll % 1000000); 277 pll / 1000000, pll % 1000000);
@@ -324,6 +341,7 @@ static void input_change(struct cx18 *cx)
324{ 341{
325 struct cx18_av_state *state = &cx->av_state; 342 struct cx18_av_state *state = &cx->av_state;
326 v4l2_std_id std = state->std; 343 v4l2_std_id std = state->std;
344 u8 v;
327 345
328 /* Follow step 8c and 8d of section 3.16 in the cx18_av datasheet */ 346 /* Follow step 8c and 8d of section 3.16 in the cx18_av datasheet */
329 cx18_av_write(cx, 0x49f, (std & V4L2_STD_NTSC) ? 0x14 : 0x11); 347 cx18_av_write(cx, 0x49f, (std & V4L2_STD_NTSC) ? 0x14 : 0x11);
@@ -333,31 +351,34 @@ static void input_change(struct cx18 *cx)
333 if (std & V4L2_STD_525_60) { 351 if (std & V4L2_STD_525_60) {
334 if (std == V4L2_STD_NTSC_M_JP) { 352 if (std == V4L2_STD_NTSC_M_JP) {
335 /* Japan uses EIAJ audio standard */ 353 /* Japan uses EIAJ audio standard */
336 cx18_av_write(cx, 0x808, 0xf7); 354 cx18_av_write_expect(cx, 0x808, 0xf7, 0xf7, 0xff);
337 cx18_av_write(cx, 0x80b, 0x02); 355 cx18_av_write_expect(cx, 0x80b, 0x02, 0x02, 0x3f);
338 } else if (std == V4L2_STD_NTSC_M_KR) { 356 } else if (std == V4L2_STD_NTSC_M_KR) {
339 /* South Korea uses A2 audio standard */ 357 /* South Korea uses A2 audio standard */
340 cx18_av_write(cx, 0x808, 0xf8); 358 cx18_av_write_expect(cx, 0x808, 0xf8, 0xf8, 0xff);
341 cx18_av_write(cx, 0x80b, 0x03); 359 cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f);
342 } else { 360 } else {
343 /* Others use the BTSC audio standard */ 361 /* Others use the BTSC audio standard */
344 cx18_av_write(cx, 0x808, 0xf6); 362 cx18_av_write_expect(cx, 0x808, 0xf6, 0xf6, 0xff);
345 cx18_av_write(cx, 0x80b, 0x01); 363 cx18_av_write_expect(cx, 0x80b, 0x01, 0x01, 0x3f);
346 } 364 }
347 } else if (std & V4L2_STD_PAL) { 365 } else if (std & V4L2_STD_PAL) {
348 /* Follow tuner change procedure for PAL */ 366 /* Follow tuner change procedure for PAL */
349 cx18_av_write(cx, 0x808, 0xff); 367 cx18_av_write_expect(cx, 0x808, 0xff, 0xff, 0xff);
350 cx18_av_write(cx, 0x80b, 0x03); 368 cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f);
351 } else if (std & V4L2_STD_SECAM) { 369 } else if (std & V4L2_STD_SECAM) {
352 /* Select autodetect for SECAM */ 370 /* Select autodetect for SECAM */
353 cx18_av_write(cx, 0x808, 0xff); 371 cx18_av_write_expect(cx, 0x808, 0xff, 0xff, 0xff);
354 cx18_av_write(cx, 0x80b, 0x03); 372 cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f);
355 } 373 }
356 374
357 if (cx18_av_read(cx, 0x803) & 0x10) { 375 v = cx18_av_read(cx, 0x803);
376 if (v & 0x10) {
358 /* restart audio decoder microcontroller */ 377 /* restart audio decoder microcontroller */
359 cx18_av_and_or(cx, 0x803, ~0x10, 0x00); 378 v &= ~0x10;
360 cx18_av_and_or(cx, 0x803, ~0x10, 0x10); 379 cx18_av_write_expect(cx, 0x803, v, v, 0x1f);
380 v |= 0x10;
381 cx18_av_write_expect(cx, 0x803, v, v, 0x1f);
361 } 382 }
362} 383}
363 384
@@ -368,6 +389,7 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
368 u8 is_composite = (vid_input >= CX18_AV_COMPOSITE1 && 389 u8 is_composite = (vid_input >= CX18_AV_COMPOSITE1 &&
369 vid_input <= CX18_AV_COMPOSITE8); 390 vid_input <= CX18_AV_COMPOSITE8);
370 u8 reg; 391 u8 reg;
392 u8 v;
371 393
372 CX18_DEBUG_INFO("decoder set video input %d, audio input %d\n", 394 CX18_DEBUG_INFO("decoder set video input %d, audio input %d\n",
373 vid_input, aud_input); 395 vid_input, aud_input);
@@ -413,16 +435,23 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
413 return -EINVAL; 435 return -EINVAL;
414 } 436 }
415 437
416 cx18_av_write(cx, 0x103, reg); 438 cx18_av_write_expect(cx, 0x103, reg, reg, 0xf7);
417 /* Set INPUT_MODE to Composite (0) or S-Video (1) */ 439 /* Set INPUT_MODE to Composite (0) or S-Video (1) */
418 cx18_av_and_or(cx, 0x401, ~0x6, is_composite ? 0 : 0x02); 440 cx18_av_and_or(cx, 0x401, ~0x6, is_composite ? 0 : 0x02);
441
419 /* Set CH_SEL_ADC2 to 1 if input comes from CH3 */ 442 /* Set CH_SEL_ADC2 to 1 if input comes from CH3 */
420 cx18_av_and_or(cx, 0x102, ~0x2, (reg & 0x80) == 0 ? 2 : 0); 443 v = cx18_av_read(cx, 0x102);
444 if (reg & 0x80)
445 v &= ~0x2;
446 else
447 v |= 0x2;
421 /* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2 and CH3 */ 448 /* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2 and CH3 */
422 if ((reg & 0xc0) != 0xc0 && (reg & 0x30) != 0x30) 449 if ((reg & 0xc0) != 0xc0 && (reg & 0x30) != 0x30)
423 cx18_av_and_or(cx, 0x102, ~0x4, 4); 450 v |= 0x4;
424 else 451 else
425 cx18_av_and_or(cx, 0x102, ~0x4, 0); 452 v &= ~0x4;
453 cx18_av_write_expect(cx, 0x102, v, v, 0x17);
454
426 /*cx18_av_and_or4(cx, 0x104, ~0x001b4180, 0x00004180);*/ 455 /*cx18_av_and_or4(cx, 0x104, ~0x001b4180, 0x00004180);*/
427 456
428 state->vid_input = vid_input; 457 state->vid_input = vid_input;
@@ -799,40 +828,47 @@ int cx18_av_cmd(struct cx18 *cx, unsigned int cmd, void *arg)
799 } 828 }
800 829
801 case VIDIOC_S_TUNER: 830 case VIDIOC_S_TUNER:
831 {
832 u8 v;
833
802 if (state->radio) 834 if (state->radio)
803 break; 835 break;
804 836
837 v = cx18_av_read(cx, 0x809);
838 v &= ~0xf;
839
805 switch (vt->audmode) { 840 switch (vt->audmode) {
806 case V4L2_TUNER_MODE_MONO: 841 case V4L2_TUNER_MODE_MONO:
807 /* mono -> mono 842 /* mono -> mono
808 stereo -> mono 843 stereo -> mono
809 bilingual -> lang1 */ 844 bilingual -> lang1 */
810 cx18_av_and_or(cx, 0x809, ~0xf, 0x00);
811 break; 845 break;
812 case V4L2_TUNER_MODE_STEREO: 846 case V4L2_TUNER_MODE_STEREO:
813 case V4L2_TUNER_MODE_LANG1: 847 case V4L2_TUNER_MODE_LANG1:
814 /* mono -> mono 848 /* mono -> mono
815 stereo -> stereo 849 stereo -> stereo
816 bilingual -> lang1 */ 850 bilingual -> lang1 */
817 cx18_av_and_or(cx, 0x809, ~0xf, 0x04); 851 v |= 0x4;
818 break; 852 break;
819 case V4L2_TUNER_MODE_LANG1_LANG2: 853 case V4L2_TUNER_MODE_LANG1_LANG2:
820 /* mono -> mono 854 /* mono -> mono
821 stereo -> stereo 855 stereo -> stereo
822 bilingual -> lang1/lang2 */ 856 bilingual -> lang1/lang2 */
823 cx18_av_and_or(cx, 0x809, ~0xf, 0x07); 857 v |= 0x7;
824 break; 858 break;
825 case V4L2_TUNER_MODE_LANG2: 859 case V4L2_TUNER_MODE_LANG2:
826 /* mono -> mono 860 /* mono -> mono
827 stereo -> stereo 861 stereo -> stereo
828 bilingual -> lang2 */ 862 bilingual -> lang2 */
829 cx18_av_and_or(cx, 0x809, ~0xf, 0x01); 863 v |= 0x1;
830 break; 864 break;
831 default: 865 default:
832 return -EINVAL; 866 return -EINVAL;
833 } 867 }
868 cx18_av_write_expect(cx, 0x809, v, v, 0xff);
834 state->audmode = vt->audmode; 869 state->audmode = vt->audmode;
835 break; 870 break;
871 }
836 872
837 case VIDIOC_G_FMT: 873 case VIDIOC_G_FMT:
838 return get_v4lfmt(cx, (struct v4l2_format *)arg); 874 return get_v4lfmt(cx, (struct v4l2_format *)arg);
diff --git a/drivers/media/video/cx18/cx18-av-core.h b/drivers/media/video/cx18/cx18-av-core.h
index b67d8df20cc6..cf68a6039091 100644
--- a/drivers/media/video/cx18/cx18-av-core.h
+++ b/drivers/media/video/cx18/cx18-av-core.h
@@ -4,6 +4,7 @@
4 * Derived from cx25840-core.h 4 * Derived from cx25840-core.h
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -302,9 +303,11 @@ struct cx18_av_state {
302int cx18_av_write(struct cx18 *cx, u16 addr, u8 value); 303int cx18_av_write(struct cx18 *cx, u16 addr, u8 value);
303int cx18_av_write4(struct cx18 *cx, u16 addr, u32 value); 304int cx18_av_write4(struct cx18 *cx, u16 addr, u32 value);
304int cx18_av_write4_noretry(struct cx18 *cx, u16 addr, u32 value); 305int cx18_av_write4_noretry(struct cx18 *cx, u16 addr, u32 value);
306int cx18_av_write_expect(struct cx18 *cx, u16 addr, u8 value, u8 eval, u8 mask);
307int cx18_av_write4_expect(struct cx18 *cx, u16 addr, u32 value, u32 eval,
308 u32 mask);
305u8 cx18_av_read(struct cx18 *cx, u16 addr); 309u8 cx18_av_read(struct cx18 *cx, u16 addr);
306u32 cx18_av_read4(struct cx18 *cx, u16 addr); 310u32 cx18_av_read4(struct cx18 *cx, u16 addr);
307u32 cx18_av_read4_noretry(struct cx18 *cx, u16 addr);
308int cx18_av_and_or(struct cx18 *cx, u16 addr, unsigned mask, u8 value); 311int cx18_av_and_or(struct cx18 *cx, u16 addr, unsigned mask, u8 value);
309int cx18_av_and_or4(struct cx18 *cx, u16 addr, u32 mask, u32 value); 312int cx18_av_and_or4(struct cx18 *cx, u16 addr, u32 mask, u32 value);
310int cx18_av_cmd(struct cx18 *cx, unsigned int cmd, void *arg); 313int cx18_av_cmd(struct cx18 *cx, unsigned int cmd, void *arg);
diff --git a/drivers/media/video/cx18/cx18-av-firmware.c b/drivers/media/video/cx18/cx18-av-firmware.c
index 522a035b2e8f..c64fd0a05a97 100644
--- a/drivers/media/video/cx18/cx18-av-firmware.c
+++ b/drivers/media/video/cx18/cx18-av-firmware.c
@@ -2,6 +2,7 @@
2 * cx18 ADEC firmware functions 2 * cx18 ADEC firmware functions
3 * 3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -43,11 +44,13 @@ int cx18_av_loadfw(struct cx18 *cx)
43 /* The firmware load often has byte errors, so allow for several 44 /* The firmware load often has byte errors, so allow for several
44 retries, both at byte level and at the firmware load level. */ 45 retries, both at byte level and at the firmware load level. */
45 while (retries1 < 5) { 46 while (retries1 < 5) {
46 cx18_av_write4(cx, CXADEC_CHIP_CTRL, 0x00010000); 47 cx18_av_write4_expect(cx, CXADEC_CHIP_CTRL, 0x00010000,
47 cx18_av_write(cx, CXADEC_STD_DET_CTL, 0xf6); 48 0x00008430, 0xffffffff); /* cx25843 */
49 cx18_av_write_expect(cx, CXADEC_STD_DET_CTL, 0xf6, 0xf6, 0xff);
48 50
49 /* Reset the Mako core (Register is undocumented.) */ 51 /* Reset the Mako core, Register is alias of CXADEC_CHIP_CTRL */
50 cx18_av_write4(cx, 0x8100, 0x00010000); 52 cx18_av_write4_expect(cx, 0x8100, 0x00010000,
53 0x00008430, 0xffffffff); /* cx25843 */
51 54
52 /* Put the 8051 in reset and enable firmware upload */ 55 /* Put the 8051 in reset and enable firmware upload */
53 cx18_av_write4_noretry(cx, CXADEC_DL_CTL, 0x0F000000); 56 cx18_av_write4_noretry(cx, CXADEC_DL_CTL, 0x0F000000);
@@ -61,13 +64,12 @@ int cx18_av_loadfw(struct cx18 *cx)
61 int retries2; 64 int retries2;
62 int unrec_err = 0; 65 int unrec_err = 0;
63 66
64 for (retries2 = 0; retries2 < CX18_MAX_MMIO_RETRIES; 67 for (retries2 = 0; retries2 < CX18_MAX_MMIO_WR_RETRIES;
65 retries2++) { 68 retries2++) {
66 cx18_av_write4_noretry(cx, CXADEC_DL_CTL, 69 cx18_av_write4_noretry(cx, CXADEC_DL_CTL,
67 dl_control); 70 dl_control);
68 udelay(10); 71 udelay(10);
69 value = cx18_av_read4_noretry(cx, 72 value = cx18_av_read4(cx, CXADEC_DL_CTL);
70 CXADEC_DL_CTL);
71 if (value == dl_control) 73 if (value == dl_control)
72 break; 74 break;
73 /* Check if we can correct the byte by changing 75 /* Check if we can correct the byte by changing
@@ -78,9 +80,7 @@ int cx18_av_loadfw(struct cx18 *cx)
78 break; 80 break;
79 } 81 }
80 } 82 }
81 cx18_log_write_retries(cx, retries2, 83 if (unrec_err || retries2 >= CX18_MAX_MMIO_WR_RETRIES)
82 cx->reg_mem + 0xc40000 + CXADEC_DL_CTL);
83 if (unrec_err || retries2 >= CX18_MAX_MMIO_RETRIES)
84 break; 84 break;
85 } 85 }
86 if (i == size) 86 if (i == size)
@@ -93,7 +93,8 @@ int cx18_av_loadfw(struct cx18 *cx)
93 return -EIO; 93 return -EIO;
94 } 94 }
95 95
96 cx18_av_write4(cx, CXADEC_DL_CTL, 0x13000000 | fw->size); 96 cx18_av_write4_expect(cx, CXADEC_DL_CTL,
97 0x13000000 | fw->size, 0x13000000, 0x13000000);
97 98
98 /* Output to the 416 */ 99 /* Output to the 416 */
99 cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x78000); 100 cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x78000);
@@ -118,7 +119,8 @@ int cx18_av_loadfw(struct cx18 *cx)
118 passthrough */ 119 passthrough */
119 cx18_av_write4(cx, CXADEC_PIN_CFG3, 0x5000B687); 120 cx18_av_write4(cx, CXADEC_PIN_CFG3, 0x5000B687);
120 121
121 cx18_av_write4(cx, CXADEC_STD_DET_CTL, 0x000000F6); 122 cx18_av_write4_expect(cx, CXADEC_STD_DET_CTL, 0x000000F6, 0x000000F6,
123 0x3F00FFFF);
122 /* CxDevWrReg(CXADEC_STD_DET_CTL, 0x000000FF); */ 124 /* CxDevWrReg(CXADEC_STD_DET_CTL, 0x000000FF); */
123 125
124 /* Set bit 0 in register 0x9CC to signify that this is MiniMe. */ 126 /* Set bit 0 in register 0x9CC to signify that this is MiniMe. */
@@ -136,7 +138,7 @@ int cx18_av_loadfw(struct cx18 *cx)
136 v |= 0xFF; /* Auto by default */ 138 v |= 0xFF; /* Auto by default */
137 v |= 0x400; /* Stereo by default */ 139 v |= 0x400; /* Stereo by default */
138 v |= 0x14000000; 140 v |= 0x14000000;
139 cx18_av_write4(cx, CXADEC_STD_DET_CTL, v); 141 cx18_av_write4_expect(cx, CXADEC_STD_DET_CTL, v, v, 0x3F00FFFF);
140 142
141 release_firmware(fw); 143 release_firmware(fw);
142 144
diff --git a/drivers/media/video/cx18/cx18-av-vbi.c b/drivers/media/video/cx18/cx18-av-vbi.c
index 02fdf57bb678..1527ea4f6b06 100644
--- a/drivers/media/video/cx18/cx18-av-vbi.c
+++ b/drivers/media/video/cx18/cx18-av-vbi.c
@@ -141,10 +141,11 @@ int cx18_av_vbi(struct cx18 *cx, unsigned int cmd, void *arg)
141 u8 lcr[24]; 141 u8 lcr[24];
142 142
143 fmt = arg; 143 fmt = arg;
144 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) 144 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE &&
145 fmt->type != V4L2_BUF_TYPE_VBI_CAPTURE)
145 return -EINVAL; 146 return -EINVAL;
146 svbi = &fmt->fmt.sliced; 147 svbi = &fmt->fmt.sliced;
147 if (svbi->service_set == 0) { 148 if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
148 /* raw VBI */ 149 /* raw VBI */
149 memset(svbi, 0, sizeof(*svbi)); 150 memset(svbi, 0, sizeof(*svbi));
150 151
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index 5efe01ebe9db..e274043657dd 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -4,6 +4,7 @@
4 * Derived from ivtv-cards.c 4 * Derived from ivtv-cards.c
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -50,7 +51,7 @@ static struct cx18_card_tuner_i2c cx18_i2c_std = {
50static const struct cx18_card cx18_card_hvr1600_esmt = { 51static const struct cx18_card cx18_card_hvr1600_esmt = {
51 .type = CX18_CARD_HVR_1600_ESMT, 52 .type = CX18_CARD_HVR_1600_ESMT,
52 .name = "Hauppauge HVR-1600", 53 .name = "Hauppauge HVR-1600",
53 .comment = "VBI is not yet supported\n", 54 .comment = "Raw VBI supported; Sliced VBI is not yet supported\n",
54 .v4l2_capabilities = CX18_CAP_ENCODER, 55 .v4l2_capabilities = CX18_CAP_ENCODER,
55 .hw_audio_ctrl = CX18_HW_CX23418, 56 .hw_audio_ctrl = CX18_HW_CX23418,
56 .hw_muxer = CX18_HW_CS5345, 57 .hw_muxer = CX18_HW_CS5345,
@@ -96,7 +97,7 @@ static const struct cx18_card cx18_card_hvr1600_esmt = {
96static const struct cx18_card cx18_card_hvr1600_samsung = { 97static const struct cx18_card cx18_card_hvr1600_samsung = {
97 .type = CX18_CARD_HVR_1600_SAMSUNG, 98 .type = CX18_CARD_HVR_1600_SAMSUNG,
98 .name = "Hauppauge HVR-1600 (Preproduction)", 99 .name = "Hauppauge HVR-1600 (Preproduction)",
99 .comment = "VBI is not yet supported\n", 100 .comment = "Raw VBI supported; Sliced VBI is not yet supported\n",
100 .v4l2_capabilities = CX18_CAP_ENCODER, 101 .v4l2_capabilities = CX18_CAP_ENCODER,
101 .hw_audio_ctrl = CX18_HW_CX23418, 102 .hw_audio_ctrl = CX18_HW_CX23418,
102 .hw_muxer = CX18_HW_CS5345, 103 .hw_muxer = CX18_HW_CS5345,
@@ -151,7 +152,7 @@ static const struct cx18_card_pci_info cx18_pci_h900[] = {
151static const struct cx18_card cx18_card_h900 = { 152static const struct cx18_card cx18_card_h900 = {
152 .type = CX18_CARD_COMPRO_H900, 153 .type = CX18_CARD_COMPRO_H900,
153 .name = "Compro VideoMate H900", 154 .name = "Compro VideoMate H900",
154 .comment = "VBI is not yet supported\n", 155 .comment = "Raw VBI supported; Sliced VBI is not yet supported\n",
155 .v4l2_capabilities = CX18_CAP_ENCODER, 156 .v4l2_capabilities = CX18_CAP_ENCODER,
156 .hw_audio_ctrl = CX18_HW_CX23418, 157 .hw_audio_ctrl = CX18_HW_CX23418,
157 .hw_all = CX18_HW_TUNER, 158 .hw_all = CX18_HW_TUNER,
@@ -248,7 +249,7 @@ static const struct cx18_card_pci_info cx18_pci_cnxt_raptor_pal[] = {
248static const struct cx18_card cx18_card_cnxt_raptor_pal = { 249static const struct cx18_card cx18_card_cnxt_raptor_pal = {
249 .type = CX18_CARD_CNXT_RAPTOR_PAL, 250 .type = CX18_CARD_CNXT_RAPTOR_PAL,
250 .name = "Conexant Raptor PAL/SECAM", 251 .name = "Conexant Raptor PAL/SECAM",
251 .comment = "VBI is not yet supported\n", 252 .comment = "Raw VBI supported; Sliced VBI is not yet supported\n",
252 .v4l2_capabilities = CX18_CAP_ENCODER, 253 .v4l2_capabilities = CX18_CAP_ENCODER,
253 .hw_audio_ctrl = CX18_HW_CX23418, 254 .hw_audio_ctrl = CX18_HW_CX23418,
254 .hw_muxer = CX18_HW_GPIO, 255 .hw_muxer = CX18_HW_GPIO,
diff --git a/drivers/media/video/cx18/cx18-cards.h b/drivers/media/video/cx18/cx18-cards.h
index 32155f6e6fe4..6fa7bcb42dde 100644
--- a/drivers/media/video/cx18/cx18-cards.h
+++ b/drivers/media/video/cx18/cx18-cards.h
@@ -4,6 +4,7 @@
4 * Derived from ivtv-cards.c 4 * Derived from ivtv-cards.c
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -47,8 +48,9 @@
47 48
48/* V4L2 capability aliases */ 49/* V4L2 capability aliases */
49#define CX18_CAP_ENCODER (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | \ 50#define CX18_CAP_ENCODER (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | \
50 V4L2_CAP_AUDIO | V4L2_CAP_READWRITE) 51 V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | \
51/* | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE) not yet */ 52 V4L2_CAP_VBI_CAPTURE)
53/* | V4L2_CAP_SLICED_VBI_CAPTURE) not yet */
52 54
53struct cx18_card_video_input { 55struct cx18_card_video_input {
54 u8 video_type; /* video input type */ 56 u8 video_type; /* video input type */
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
index f46c7e5ed747..17edf305d649 100644
--- a/drivers/media/video/cx18/cx18-controls.c
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -259,6 +259,7 @@ int cx18_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
259 return err; 259 return err;
260 } 260 }
261 if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG) { 261 if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
262 struct cx18_api_func_private priv;
262 struct cx2341x_mpeg_params p = cx->params; 263 struct cx2341x_mpeg_params p = cx->params;
263 int err = cx2341x_ext_ctrls(&p, atomic_read(&cx->ana_capturing), 264 int err = cx2341x_ext_ctrls(&p, atomic_read(&cx->ana_capturing),
264 c, VIDIOC_S_EXT_CTRLS); 265 c, VIDIOC_S_EXT_CTRLS);
@@ -278,7 +279,9 @@ int cx18_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
278 fmt.fmt.pix.height = cx->params.height; 279 fmt.fmt.pix.height = cx->params.height;
279 cx18_av_cmd(cx, VIDIOC_S_FMT, &fmt); 280 cx18_av_cmd(cx, VIDIOC_S_FMT, &fmt);
280 } 281 }
281 err = cx2341x_update(cx, cx18_api_func, &cx->params, &p); 282 priv.cx = cx;
283 priv.s = &cx->streams[id->type];
284 err = cx2341x_update(&priv, cx18_api_func, &cx->params, &p);
282 if (!err && cx->params.stream_vbi_fmt != p.stream_vbi_fmt) 285 if (!err && cx->params.stream_vbi_fmt != p.stream_vbi_fmt)
283 err = cx18_setup_vbi_fmt(cx, p.stream_vbi_fmt); 286 err = cx18_setup_vbi_fmt(cx, p.stream_vbi_fmt);
284 cx->params = p; 287 cx->params = p;
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 7874d9790a51..f50cf2167adc 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -75,48 +75,76 @@ static int radio[CX18_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
75 -1, -1, -1, -1, -1, -1, -1, -1, 75 -1, -1, -1, -1, -1, -1, -1, -1,
76 -1, -1, -1, -1, -1, -1, -1, -1, 76 -1, -1, -1, -1, -1, -1, -1, -1,
77 -1, -1, -1, -1, -1, -1, -1, -1 }; 77 -1, -1, -1, -1, -1, -1, -1, -1 };
78static int mmio_ndelay[CX18_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
79 -1, -1, -1, -1, -1, -1, -1, -1,
80 -1, -1, -1, -1, -1, -1, -1, -1,
81 -1, -1, -1, -1, -1, -1, -1, -1 };
82static unsigned cardtype_c = 1; 78static unsigned cardtype_c = 1;
83static unsigned tuner_c = 1; 79static unsigned tuner_c = 1;
84static unsigned radio_c = 1; 80static unsigned radio_c = 1;
85static unsigned mmio_ndelay_c = 1;
86static char pal[] = "--"; 81static char pal[] = "--";
87static char secam[] = "--"; 82static char secam[] = "--";
88static char ntsc[] = "-"; 83static char ntsc[] = "-";
89 84
90/* Buffers */ 85/* Buffers */
91static int enc_mpg_buffers = CX18_DEFAULT_ENC_MPG_BUFFERS;
92static int enc_ts_buffers = CX18_DEFAULT_ENC_TS_BUFFERS; 86static int enc_ts_buffers = CX18_DEFAULT_ENC_TS_BUFFERS;
87static int enc_mpg_buffers = CX18_DEFAULT_ENC_MPG_BUFFERS;
88static int enc_idx_buffers = CX18_DEFAULT_ENC_IDX_BUFFERS;
93static int enc_yuv_buffers = CX18_DEFAULT_ENC_YUV_BUFFERS; 89static int enc_yuv_buffers = CX18_DEFAULT_ENC_YUV_BUFFERS;
94static int enc_vbi_buffers = CX18_DEFAULT_ENC_VBI_BUFFERS; 90static int enc_vbi_buffers = CX18_DEFAULT_ENC_VBI_BUFFERS;
95static int enc_pcm_buffers = CX18_DEFAULT_ENC_PCM_BUFFERS; 91static int enc_pcm_buffers = CX18_DEFAULT_ENC_PCM_BUFFERS;
96 92
93static int enc_ts_bufsize = CX18_DEFAULT_ENC_TS_BUFSIZE;
94static int enc_mpg_bufsize = CX18_DEFAULT_ENC_MPG_BUFSIZE;
95static int enc_idx_bufsize = CX18_DEFAULT_ENC_IDX_BUFSIZE;
96static int enc_yuv_bufsize = CX18_DEFAULT_ENC_YUV_BUFSIZE;
97/* VBI bufsize based on standards supported by card tuner for now */
98static int enc_pcm_bufsize = CX18_DEFAULT_ENC_PCM_BUFSIZE;
99
100static int enc_ts_bufs = -1;
101static int enc_mpg_bufs = -1;
102static int enc_idx_bufs = -1;
103static int enc_yuv_bufs = -1;
104static int enc_vbi_bufs = -1;
105static int enc_pcm_bufs = -1;
106
107
97static int cx18_pci_latency = 1; 108static int cx18_pci_latency = 1;
98 109
99int cx18_retry_mmio = 1; 110static int mmio_ndelay;
111static int retry_mmio = 1;
112
100int cx18_debug; 113int cx18_debug;
101 114
102module_param_array(tuner, int, &tuner_c, 0644); 115module_param_array(tuner, int, &tuner_c, 0644);
103module_param_array(radio, bool, &radio_c, 0644); 116module_param_array(radio, bool, &radio_c, 0644);
104module_param_array(cardtype, int, &cardtype_c, 0644); 117module_param_array(cardtype, int, &cardtype_c, 0644);
105module_param_array(mmio_ndelay, int, &mmio_ndelay_c, 0644);
106module_param_string(pal, pal, sizeof(pal), 0644); 118module_param_string(pal, pal, sizeof(pal), 0644);
107module_param_string(secam, secam, sizeof(secam), 0644); 119module_param_string(secam, secam, sizeof(secam), 0644);
108module_param_string(ntsc, ntsc, sizeof(ntsc), 0644); 120module_param_string(ntsc, ntsc, sizeof(ntsc), 0644);
109module_param_named(debug, cx18_debug, int, 0644); 121module_param_named(debug, cx18_debug, int, 0644);
110module_param_named(retry_mmio, cx18_retry_mmio, int, 0644); 122module_param(mmio_ndelay, int, 0644);
123module_param(retry_mmio, int, 0644);
111module_param(cx18_pci_latency, int, 0644); 124module_param(cx18_pci_latency, int, 0644);
112module_param(cx18_first_minor, int, 0644); 125module_param(cx18_first_minor, int, 0644);
113 126
114module_param(enc_mpg_buffers, int, 0644);
115module_param(enc_ts_buffers, int, 0644); 127module_param(enc_ts_buffers, int, 0644);
128module_param(enc_mpg_buffers, int, 0644);
129module_param(enc_idx_buffers, int, 0644);
116module_param(enc_yuv_buffers, int, 0644); 130module_param(enc_yuv_buffers, int, 0644);
117module_param(enc_vbi_buffers, int, 0644); 131module_param(enc_vbi_buffers, int, 0644);
118module_param(enc_pcm_buffers, int, 0644); 132module_param(enc_pcm_buffers, int, 0644);
119 133
134module_param(enc_ts_bufsize, int, 0644);
135module_param(enc_mpg_bufsize, int, 0644);
136module_param(enc_idx_bufsize, int, 0644);
137module_param(enc_yuv_bufsize, int, 0644);
138/* VBI bufsize based on standards supported by card tuner for now */
139module_param(enc_pcm_bufsize, int, 0644);
140
141module_param(enc_ts_bufs, int, 0644);
142module_param(enc_mpg_bufs, int, 0644);
143module_param(enc_idx_bufs, int, 0644);
144module_param(enc_yuv_bufs, int, 0644);
145module_param(enc_vbi_bufs, int, 0644);
146module_param(enc_pcm_bufs, int, 0644);
147
120MODULE_PARM_DESC(tuner, "Tuner type selection,\n" 148MODULE_PARM_DESC(tuner, "Tuner type selection,\n"
121 "\t\t\tsee tuner.h for values"); 149 "\t\t\tsee tuner.h for values");
122MODULE_PARM_DESC(radio, 150MODULE_PARM_DESC(radio,
@@ -152,28 +180,62 @@ MODULE_PARM_DESC(cx18_pci_latency,
152 "Change the PCI latency to 64 if lower: 0 = No, 1 = Yes,\n" 180 "Change the PCI latency to 64 if lower: 0 = No, 1 = Yes,\n"
153 "\t\t\tDefault: Yes"); 181 "\t\t\tDefault: Yes");
154MODULE_PARM_DESC(retry_mmio, 182MODULE_PARM_DESC(retry_mmio,
155 "Check and retry memory mapped IO accesses\n" 183 "(Deprecated) MMIO writes are now always checked and retried\n"
156 "\t\t\tDefault: 1 [Yes]"); 184 "\t\t\tEffectively: 1 [Yes]");
157MODULE_PARM_DESC(mmio_ndelay, 185MODULE_PARM_DESC(mmio_ndelay,
158 "Delay (ns) for each CX23418 memory mapped IO access.\n" 186 "(Deprecated) MMIO accesses are now never purposely delayed\n"
159 "\t\t\tTry larger values that are close to a multiple of the\n" 187 "\t\t\tEffectively: 0 ns");
160 "\t\t\tPCI clock period, 30.3 ns, if your card doesn't work.\n"
161 "\t\t\tDefault: " __stringify(CX18_DEFAULT_MMIO_NDELAY));
162MODULE_PARM_DESC(enc_mpg_buffers,
163 "Encoder MPG Buffers (in MB)\n"
164 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_MPG_BUFFERS));
165MODULE_PARM_DESC(enc_ts_buffers, 188MODULE_PARM_DESC(enc_ts_buffers,
166 "Encoder TS Buffers (in MB)\n" 189 "Encoder TS buffer memory (MB). (enc_ts_bufs can override)\n"
167 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_TS_BUFFERS)); 190 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_TS_BUFFERS));
191MODULE_PARM_DESC(enc_ts_bufsize,
192 "Size of an encoder TS buffer (kB)\n"
193 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_TS_BUFSIZE));
194MODULE_PARM_DESC(enc_ts_bufs,
195 "Number of encoder TS buffers\n"
196 "\t\t\tDefault is computed from other enc_ts_* parameters");
197MODULE_PARM_DESC(enc_mpg_buffers,
198 "Encoder MPG buffer memory (MB). (enc_mpg_bufs can override)\n"
199 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_MPG_BUFFERS));
200MODULE_PARM_DESC(enc_mpg_bufsize,
201 "Size of an encoder MPG buffer (kB)\n"
202 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_MPG_BUFSIZE));
203MODULE_PARM_DESC(enc_mpg_bufs,
204 "Number of encoder MPG buffers\n"
205 "\t\t\tDefault is computed from other enc_mpg_* parameters");
206MODULE_PARM_DESC(enc_idx_buffers,
207 "Encoder IDX buffer memory (MB). (enc_idx_bufs can override)\n"
208 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_IDX_BUFFERS));
209MODULE_PARM_DESC(enc_idx_bufsize,
210 "Size of an encoder IDX buffer (kB)\n"
211 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_IDX_BUFSIZE));
212MODULE_PARM_DESC(enc_idx_bufs,
213 "Number of encoder IDX buffers\n"
214 "\t\t\tDefault is computed from other enc_idx_* parameters");
168MODULE_PARM_DESC(enc_yuv_buffers, 215MODULE_PARM_DESC(enc_yuv_buffers,
169 "Encoder YUV Buffers (in MB)\n" 216 "Encoder YUV buffer memory (MB). (enc_yuv_bufs can override)\n"
170 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_YUV_BUFFERS)); 217 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_YUV_BUFFERS));
218MODULE_PARM_DESC(enc_yuv_bufsize,
219 "Size of an encoder YUV buffer (kB)\n"
220 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_YUV_BUFSIZE));
221MODULE_PARM_DESC(enc_yuv_bufs,
222 "Number of encoder YUV buffers\n"
223 "\t\t\tDefault is computed from other enc_yuv_* parameters");
171MODULE_PARM_DESC(enc_vbi_buffers, 224MODULE_PARM_DESC(enc_vbi_buffers,
172 "Encoder VBI Buffers (in MB)\n" 225 "Encoder VBI buffer memory (MB). (enc_vbi_bufs can override)\n"
173 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_VBI_BUFFERS)); 226 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_VBI_BUFFERS));
227MODULE_PARM_DESC(enc_vbi_bufs,
228 "Number of encoder VBI buffers\n"
229 "\t\t\tDefault is computed from enc_vbi_buffers & tuner std");
174MODULE_PARM_DESC(enc_pcm_buffers, 230MODULE_PARM_DESC(enc_pcm_buffers,
175 "Encoder PCM buffers (in MB)\n" 231 "Encoder PCM buffer memory (MB). (enc_pcm_bufs can override)\n"
176 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_PCM_BUFFERS)); 232 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_PCM_BUFFERS));
233MODULE_PARM_DESC(enc_pcm_bufsize,
234 "Size of an encoder PCM buffer (kB)\n"
235 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_PCM_BUFSIZE));
236MODULE_PARM_DESC(enc_pcm_bufs,
237 "Number of encoder PCM buffers\n"
238 "\t\t\tDefault is computed from other enc_pcm_* parameters");
177 239
178MODULE_PARM_DESC(cx18_first_minor, "Set kernel number assigned to first card"); 240MODULE_PARM_DESC(cx18_first_minor, "Set kernel number assigned to first card");
179 241
@@ -187,7 +249,7 @@ MODULE_VERSION(CX18_VERSION);
187/* Generic utility functions */ 249/* Generic utility functions */
188int cx18_msleep_timeout(unsigned int msecs, int intr) 250int cx18_msleep_timeout(unsigned int msecs, int intr)
189{ 251{
190 int timeout = msecs_to_jiffies(msecs); 252 long int timeout = msecs_to_jiffies(msecs);
191 int sig; 253 int sig;
192 254
193 do { 255 do {
@@ -366,20 +428,69 @@ static void cx18_process_options(struct cx18 *cx)
366{ 428{
367 int i, j; 429 int i, j;
368 430
369 cx->options.megabytes[CX18_ENC_STREAM_TYPE_MPG] = enc_mpg_buffers;
370 cx->options.megabytes[CX18_ENC_STREAM_TYPE_TS] = enc_ts_buffers; 431 cx->options.megabytes[CX18_ENC_STREAM_TYPE_TS] = enc_ts_buffers;
432 cx->options.megabytes[CX18_ENC_STREAM_TYPE_MPG] = enc_mpg_buffers;
433 cx->options.megabytes[CX18_ENC_STREAM_TYPE_IDX] = enc_idx_buffers;
371 cx->options.megabytes[CX18_ENC_STREAM_TYPE_YUV] = enc_yuv_buffers; 434 cx->options.megabytes[CX18_ENC_STREAM_TYPE_YUV] = enc_yuv_buffers;
372 cx->options.megabytes[CX18_ENC_STREAM_TYPE_VBI] = enc_vbi_buffers; 435 cx->options.megabytes[CX18_ENC_STREAM_TYPE_VBI] = enc_vbi_buffers;
373 cx->options.megabytes[CX18_ENC_STREAM_TYPE_PCM] = enc_pcm_buffers; 436 cx->options.megabytes[CX18_ENC_STREAM_TYPE_PCM] = enc_pcm_buffers;
437 cx->options.megabytes[CX18_ENC_STREAM_TYPE_RAD] = 0; /* control only */
438
439 cx->stream_buffers[CX18_ENC_STREAM_TYPE_TS] = enc_ts_bufs;
440 cx->stream_buffers[CX18_ENC_STREAM_TYPE_MPG] = enc_mpg_bufs;
441 cx->stream_buffers[CX18_ENC_STREAM_TYPE_IDX] = enc_idx_bufs;
442 cx->stream_buffers[CX18_ENC_STREAM_TYPE_YUV] = enc_yuv_bufs;
443 cx->stream_buffers[CX18_ENC_STREAM_TYPE_VBI] = enc_vbi_bufs;
444 cx->stream_buffers[CX18_ENC_STREAM_TYPE_PCM] = enc_pcm_bufs;
445 cx->stream_buffers[CX18_ENC_STREAM_TYPE_RAD] = 0; /* control, no data */
446
447 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_TS] = enc_ts_bufsize;
448 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_MPG] = enc_mpg_bufsize;
449 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_IDX] = enc_idx_bufsize;
450 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_YUV] = enc_yuv_bufsize;
451 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_VBI] = 0; /* computed later */
452 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_PCM] = enc_pcm_bufsize;
453 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_RAD] = 0; /* control no data */
454
455 /* Except for VBI ensure stream_buffers & stream_buf_size are valid */
456 for (i = 0; i < CX18_MAX_STREAMS; i++) {
457 /* User said to use 0 buffers */
458 if (cx->stream_buffers[i] == 0) {
459 cx->options.megabytes[i] = 0;
460 cx->stream_buf_size[i] = 0;
461 continue;
462 }
463 /* User said to use 0 MB total */
464 if (cx->options.megabytes[i] <= 0) {
465 cx->options.megabytes[i] = 0;
466 cx->stream_buffers[i] = 0;
467 cx->stream_buf_size[i] = 0;
468 continue;
469 }
470 /* VBI is computed later or user said buffer has size 0 */
471 if (cx->stream_buf_size[i] <= 0) {
472 if (i != CX18_ENC_STREAM_TYPE_VBI) {
473 cx->options.megabytes[i] = 0;
474 cx->stream_buffers[i] = 0;
475 cx->stream_buf_size[i] = 0;
476 }
477 continue;
478 }
479 if (cx->stream_buffers[i] < 0) {
480 cx->stream_buffers[i] = cx->options.megabytes[i] * 1024
481 / cx->stream_buf_size[i];
482 } else {
483 /* N.B. This might round down to 0 */
484 cx->options.megabytes[i] =
485 cx->stream_buffers[i] * cx->stream_buf_size[i] / 1024;
486 }
487 cx->stream_buf_size[i] *= 1024; /* convert from kB to bytes */
488 }
489
374 cx->options.cardtype = cardtype[cx->num]; 490 cx->options.cardtype = cardtype[cx->num];
375 cx->options.tuner = tuner[cx->num]; 491 cx->options.tuner = tuner[cx->num];
376 cx->options.radio = radio[cx->num]; 492 cx->options.radio = radio[cx->num];
377 493
378 if (mmio_ndelay[cx->num] < 0)
379 cx->options.mmio_ndelay = CX18_DEFAULT_MMIO_NDELAY;
380 else
381 cx->options.mmio_ndelay = mmio_ndelay[cx->num];
382
383 cx->std = cx18_parse_std(cx); 494 cx->std = cx18_parse_std(cx);
384 if (cx->options.cardtype == -1) { 495 if (cx->options.cardtype == -1) {
385 CX18_INFO("Ignore card\n"); 496 CX18_INFO("Ignore card\n");
@@ -440,22 +551,30 @@ done:
440 */ 551 */
441static int __devinit cx18_init_struct1(struct cx18 *cx) 552static int __devinit cx18_init_struct1(struct cx18 *cx)
442{ 553{
554 int i;
555
443 cx->base_addr = pci_resource_start(cx->dev, 0); 556 cx->base_addr = pci_resource_start(cx->dev, 0);
444 557
445 mutex_init(&cx->serialize_lock); 558 mutex_init(&cx->serialize_lock);
446 mutex_init(&cx->i2c_bus_lock[0]); 559 mutex_init(&cx->i2c_bus_lock[0]);
447 mutex_init(&cx->i2c_bus_lock[1]); 560 mutex_init(&cx->i2c_bus_lock[1]);
448 mutex_init(&cx->gpio_lock); 561 mutex_init(&cx->gpio_lock);
562 mutex_init(&cx->epu2apu_mb_lock);
563 mutex_init(&cx->epu2cpu_mb_lock);
449 564
450 spin_lock_init(&cx->lock); 565 spin_lock_init(&cx->lock);
451 566
452 cx->work_queue = create_singlethread_workqueue(cx->name); 567 cx->work_queue = create_singlethread_workqueue(cx->name);
453 if (cx->work_queue == NULL) { 568 if (cx->work_queue == NULL) {
454 CX18_ERR("Could not create work queue\n"); 569 CX18_ERR("Unable to create work hander thread\n");
455 return -1; 570 return -ENOMEM;
456 } 571 }
457 572
458 INIT_WORK(&cx->work, cx18_work_handler); 573 for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) {
574 cx->epu_work_order[i].cx = cx;
575 cx->epu_work_order[i].str = cx->epu_debug_str;
576 INIT_WORK(&cx->epu_work_order[i].work, cx18_epu_work_handler);
577 }
459 578
460 /* start counting open_id at 1 */ 579 /* start counting open_id at 1 */
461 cx->open_id = 1; 580 cx->open_id = 1;
@@ -472,20 +591,55 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
472 init_waitqueue_head(&cx->cap_w); 591 init_waitqueue_head(&cx->cap_w);
473 init_waitqueue_head(&cx->mb_apu_waitq); 592 init_waitqueue_head(&cx->mb_apu_waitq);
474 init_waitqueue_head(&cx->mb_cpu_waitq); 593 init_waitqueue_head(&cx->mb_cpu_waitq);
475 init_waitqueue_head(&cx->mb_epu_waitq);
476 init_waitqueue_head(&cx->mb_hpu_waitq);
477 init_waitqueue_head(&cx->dma_waitq); 594 init_waitqueue_head(&cx->dma_waitq);
478 595
479 /* VBI */ 596 /* VBI */
480 cx->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; 597 cx->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE;
481 cx->vbi.sliced_in = &cx->vbi.in.fmt.sliced; 598 cx->vbi.sliced_in = &cx->vbi.in.fmt.sliced;
482 cx->vbi.raw_size = 1456; 599
483 cx->vbi.raw_decoder_line_size = 1456; 600 /*
484 cx->vbi.raw_decoder_sav_odd_field = 0x20; 601 * The VBI line sizes depend on the pixel clock and the horiz rate
485 cx->vbi.raw_decoder_sav_even_field = 0x60; 602 *
486 cx->vbi.sliced_decoder_line_size = 272; 603 * (1/Fh)*(2*Fp) = Samples/line
487 cx->vbi.sliced_decoder_sav_odd_field = 0xB0; 604 * = 4 bytes EAV + Anc data in hblank + 4 bytes SAV + active samples
488 cx->vbi.sliced_decoder_sav_even_field = 0xF0; 605 *
606 * Sliced VBI is sent as ancillary data during horizontal blanking
607 * Raw VBI is sent as active video samples during vertcal blanking
608 *
609 * We use a BT.656 pxiel clock of 13.5 MHz and a BT.656 active line
610 * length of 720 pixels @ 4:2:2 sampling. Thus...
611 *
612 * For systems that use a 15.734 kHz horizontal rate, such as
613 * NTSC-M, PAL-M, PAL-60, and other 60 Hz/525 line systems, we have:
614 *
615 * (1/15.734 kHz) * 2 * 13.5 MHz = 1716 samples/line =
616 * 4 bytes SAV + 268 bytes anc data + 4 bytes SAV + 1440 active samples
617 *
618 * For systems that use a 15.625 kHz horizontal rate, such as
619 * PAL-B/G/H, PAL-I, SECAM-L and other 50 Hz/625 line systems, we have:
620 *
621 * (1/15.625 kHz) * 2 * 13.5 MHz = 1728 samples/line =
622 * 4 bytes SAV + 280 bytes anc data + 4 bytes SAV + 1440 active samples
623 *
624 */
625
626 /* FIXME: init these based on tuner std & modify when std changes */
627 /* CX18-AV-Core number of VBI samples output per horizontal line */
628 cx->vbi.raw_decoder_line_size = 1444; /* 4 byte SAV + 2 * 720 */
629 cx->vbi.sliced_decoder_line_size = 272; /* 60 Hz: 268+4, 50 Hz: 280+4 */
630
631 /* CX18-AV-Core VBI samples/line possibly rounded up */
632 cx->vbi.raw_size = 1444; /* Real max size is 1444 */
633 cx->vbi.sliced_size = 284; /* Real max size is 284 */
634
635 /*
636 * CX18-AV-Core SAV/EAV RP codes in VIP 1.x mode
637 * Task Field VerticalBlank HorizontalBlank 0 0 0 0
638 */
639 cx->vbi.raw_decoder_sav_odd_field = 0x20; /* V */
640 cx->vbi.raw_decoder_sav_even_field = 0x60; /* FV */
641 cx->vbi.sliced_decoder_sav_odd_field = 0xB0; /* T VH - actually EAV */
642 cx->vbi.sliced_decoder_sav_even_field = 0xF0; /* TFVH - actually EAV */
489 return 0; 643 return 0;
490} 644}
491 645
@@ -518,6 +672,7 @@ static void __devinit cx18_init_struct2(struct cx18 *cx)
518 cx->av_state.aud_input = CX18_AV_AUDIO8; 672 cx->av_state.aud_input = CX18_AV_AUDIO8;
519 cx->av_state.audclk_freq = 48000; 673 cx->av_state.audclk_freq = 48000;
520 cx->av_state.audmode = V4L2_TUNER_MODE_LANG1; 674 cx->av_state.audmode = V4L2_TUNER_MODE_LANG1;
675 /* FIXME - 8 is NTSC value, investigate */
521 cx->av_state.vbi_line_offset = 8; 676 cx->av_state.vbi_line_offset = 8;
522} 677}
523 678
@@ -662,12 +817,9 @@ static int __devinit cx18_probe(struct pci_dev *dev,
662 817
663 /* PCI Device Setup */ 818 /* PCI Device Setup */
664 retval = cx18_setup_pci(cx, dev, pci_id); 819 retval = cx18_setup_pci(cx, dev, pci_id);
665 if (retval != 0) { 820 if (retval != 0)
666 if (retval == -EIO) 821 goto free_workqueue;
667 goto free_workqueue; 822
668 else if (retval == -ENXIO)
669 goto free_mem;
670 }
671 /* save cx in the pci struct for later use */ 823 /* save cx in the pci struct for later use */
672 pci_set_drvdata(dev, cx); 824 pci_set_drvdata(dev, cx);
673 825
@@ -726,6 +878,7 @@ static int __devinit cx18_probe(struct pci_dev *dev,
726 goto free_i2c; 878 goto free_i2c;
727 } 879 }
728 cx18_init_memory(cx); 880 cx18_init_memory(cx);
881 cx18_init_scb(cx);
729 882
730 /* Register IRQ */ 883 /* Register IRQ */
731 retval = request_irq(cx->dev->irq, cx18_irq_handler, 884 retval = request_irq(cx->dev->irq, cx18_irq_handler,
@@ -739,8 +892,6 @@ static int __devinit cx18_probe(struct pci_dev *dev,
739 cx->std = V4L2_STD_NTSC_M; 892 cx->std = V4L2_STD_NTSC_M;
740 893
741 if (cx->options.tuner == -1) { 894 if (cx->options.tuner == -1) {
742 int i;
743
744 for (i = 0; i < CX18_CARD_MAX_TUNERS; i++) { 895 for (i = 0; i < CX18_CARD_MAX_TUNERS; i++) {
745 if ((cx->std & cx->card->tuners[i].std) == 0) 896 if ((cx->std & cx->card->tuners[i].std) == 0)
746 continue; 897 continue;
@@ -777,13 +928,23 @@ static int __devinit cx18_probe(struct pci_dev *dev,
777 } 928 }
778 cx->params.video_gop_size = cx->is_60hz ? 15 : 12; 929 cx->params.video_gop_size = cx->is_60hz ? 15 : 12;
779 930
780 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_MPG] = 0x08000; 931 /*
781 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_TS] = 0x08000; 932 * FIXME: setting the buffer size based on the tuner standard is
782 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_PCM] = 0x01200; 933 * suboptimal, as the CVBS and SVideo inputs could use a different std
783 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_YUV] = 0x20000; 934 * and the buffer could end up being too small in that case.
935 */
784 vbi_buf_size = cx->vbi.raw_size * (cx->is_60hz ? 24 : 36) / 2; 936 vbi_buf_size = cx->vbi.raw_size * (cx->is_60hz ? 24 : 36) / 2;
785 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_VBI] = vbi_buf_size; 937 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_VBI] = vbi_buf_size;
786 938
939 if (cx->stream_buffers[CX18_ENC_STREAM_TYPE_VBI] < 0)
940 cx->stream_buffers[CX18_ENC_STREAM_TYPE_VBI] =
941 cx->options.megabytes[CX18_ENC_STREAM_TYPE_VBI] * 1024 * 1024
942 / vbi_buf_size;
943 else
944 cx->options.megabytes[CX18_ENC_STREAM_TYPE_VBI] =
945 cx->stream_buffers[CX18_ENC_STREAM_TYPE_VBI] * vbi_buf_size
946 / (1024 * 1024);
947
787 if (cx->options.radio > 0) 948 if (cx->options.radio > 0)
788 cx->v4l2_cap |= V4L2_CAP_RADIO; 949 cx->v4l2_cap |= V4L2_CAP_RADIO;
789 950
@@ -844,7 +1005,6 @@ err:
844 if (retval == 0) 1005 if (retval == 0)
845 retval = -ENODEV; 1006 retval = -ENODEV;
846 CX18_ERR("Error %d on initialization\n", retval); 1007 CX18_ERR("Error %d on initialization\n", retval);
847 cx18_log_statistics(cx);
848 1008
849 i = cx->num; 1009 i = cx->num;
850 spin_lock(&cx18_cards_lock); 1010 spin_lock(&cx18_cards_lock);
@@ -923,6 +1083,13 @@ int cx18_init_on_first_open(struct cx18 *cx)
923 return 0; 1083 return 0;
924} 1084}
925 1085
1086static void cx18_cancel_epu_work_orders(struct cx18 *cx)
1087{
1088 int i;
1089 for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++)
1090 cancel_work_sync(&cx->epu_work_order[i].work);
1091}
1092
926static void cx18_remove(struct pci_dev *pci_dev) 1093static void cx18_remove(struct pci_dev *pci_dev)
927{ 1094{
928 struct cx18 *cx = pci_get_drvdata(pci_dev); 1095 struct cx18 *cx = pci_get_drvdata(pci_dev);
@@ -940,7 +1107,8 @@ static void cx18_remove(struct pci_dev *pci_dev)
940 1107
941 cx18_halt_firmware(cx); 1108 cx18_halt_firmware(cx);
942 1109
943 flush_workqueue(cx->work_queue); 1110 cx18_cancel_epu_work_orders(cx);
1111
944 destroy_workqueue(cx->work_queue); 1112 destroy_workqueue(cx->work_queue);
945 1113
946 cx18_streams_cleanup(cx, 1); 1114 cx18_streams_cleanup(cx, 1);
@@ -955,7 +1123,6 @@ static void cx18_remove(struct pci_dev *pci_dev)
955 1123
956 pci_disable_device(cx->dev); 1124 pci_disable_device(cx->dev);
957 1125
958 cx18_log_statistics(cx);
959 CX18_INFO("Removed %s, card #%d\n", cx->card_name, cx->num); 1126 CX18_INFO("Removed %s, card #%d\n", cx->card_name, cx->num);
960} 1127}
961 1128
@@ -1004,6 +1171,7 @@ static void module_cleanup(void)
1004 continue; 1171 continue;
1005 kfree(cx18_cards[i]); 1172 kfree(cx18_cards[i]);
1006 } 1173 }
1174
1007} 1175}
1008 1176
1009module_init(module_start); 1177module_init(module_start);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index bbdd5f25041d..0d2edebc39b4 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -4,6 +4,7 @@
4 * Derived from ivtv-driver.h 4 * Derived from ivtv-driver.h
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -64,9 +65,6 @@
64# error "This driver requires kernel PCI support." 65# error "This driver requires kernel PCI support."
65#endif 66#endif
66 67
67/* Default delay to throttle mmio access to the CX23418 */
68#define CX18_DEFAULT_MMIO_NDELAY 0 /* 0 ns = 0 PCI clock(s) / 33 MHz */
69
70#define CX18_MEM_OFFSET 0x00000000 68#define CX18_MEM_OFFSET 0x00000000
71#define CX18_MEM_SIZE 0x04000000 69#define CX18_MEM_SIZE 0x04000000
72#define CX18_REG_OFFSET 0x02000000 70#define CX18_REG_OFFSET 0x02000000
@@ -117,6 +115,17 @@
117#define CX18_DEFAULT_ENC_VBI_BUFFERS 1 115#define CX18_DEFAULT_ENC_VBI_BUFFERS 1
118#define CX18_DEFAULT_ENC_PCM_BUFFERS 1 116#define CX18_DEFAULT_ENC_PCM_BUFFERS 1
119 117
118/* Maximum firmware DMA buffers per stream */
119#define CX18_MAX_FW_MDLS_PER_STREAM 63
120
121/* DMA buffer, default size in kB allocated */
122#define CX18_DEFAULT_ENC_TS_BUFSIZE 32
123#define CX18_DEFAULT_ENC_MPG_BUFSIZE 32
124#define CX18_DEFAULT_ENC_IDX_BUFSIZE 32
125#define CX18_DEFAULT_ENC_YUV_BUFSIZE 128
126/* Default VBI bufsize based on standards supported by card tuner for now */
127#define CX18_DEFAULT_ENC_PCM_BUFSIZE 4
128
120/* i2c stuff */ 129/* i2c stuff */
121#define I2C_CLIENTS_MAX 16 130#define I2C_CLIENTS_MAX 16
122 131
@@ -176,7 +185,6 @@
176 185
177#define CX18_MAX_PGM_INDEX (400) 186#define CX18_MAX_PGM_INDEX (400)
178 187
179extern int cx18_retry_mmio; /* enable check & retry of mmio accesses */
180extern int cx18_debug; 188extern int cx18_debug;
181 189
182 190
@@ -185,7 +193,6 @@ struct cx18_options {
185 int cardtype; /* force card type on load */ 193 int cardtype; /* force card type on load */
186 int tuner; /* set tuner on load */ 194 int tuner; /* set tuner on load */
187 int radio; /* enable/disable radio */ 195 int radio; /* enable/disable radio */
188 unsigned long mmio_ndelay; /* delay in ns after every PCI mmio access */
189}; 196};
190 197
191/* per-buffer bit flags */ 198/* per-buffer bit flags */
@@ -203,11 +210,8 @@ struct cx18_options {
203#define CX18_F_I_EOS 4 /* End of encoder stream */ 210#define CX18_F_I_EOS 4 /* End of encoder stream */
204#define CX18_F_I_RADIO_USER 5 /* radio tuner is selected */ 211#define CX18_F_I_RADIO_USER 5 /* radio tuner is selected */
205#define CX18_F_I_ENC_PAUSED 13 /* the encoder is paused */ 212#define CX18_F_I_ENC_PAUSED 13 /* the encoder is paused */
206#define CX18_F_I_HAVE_WORK 15 /* there is work to be done */
207#define CX18_F_I_WORK_HANDLER_DVB 18 /* work to be done for DVB */
208#define CX18_F_I_INITED 21 /* set after first open */ 213#define CX18_F_I_INITED 21 /* set after first open */
209#define CX18_F_I_FAILED 22 /* set if first open failed */ 214#define CX18_F_I_FAILED 22 /* set if first open failed */
210#define CX18_F_I_WORK_INITED 23 /* worker thread initialized */
211 215
212/* These are the VBI types as they appear in the embedded VBI private packets. */ 216/* These are the VBI types as they appear in the embedded VBI private packets. */
213#define CX18_SLICED_TYPE_TELETEXT_B (1) 217#define CX18_SLICED_TYPE_TELETEXT_B (1)
@@ -220,6 +224,7 @@ struct cx18_buffer {
220 dma_addr_t dma_handle; 224 dma_addr_t dma_handle;
221 u32 id; 225 u32 id;
222 unsigned long b_flags; 226 unsigned long b_flags;
227 unsigned skipped;
223 char *buf; 228 char *buf;
224 229
225 u32 bytesused; 230 u32 bytesused;
@@ -248,6 +253,27 @@ struct cx18_dvb {
248struct cx18; /* forward reference */ 253struct cx18; /* forward reference */
249struct cx18_scb; /* forward reference */ 254struct cx18_scb; /* forward reference */
250 255
256
257#define CX18_MAX_MDL_ACKS 2
258#define CX18_MAX_EPU_WORK_ORDERS (CX18_MAX_FW_MDLS_PER_STREAM + 7)
259/* CPU_DE_RELEASE_MDL can burst CX18_MAX_FW_MDLS_PER_STREAM orders in a group */
260
261#define CX18_F_EWO_MB_STALE_UPON_RECEIPT 0x1
262#define CX18_F_EWO_MB_STALE_WHILE_PROC 0x2
263#define CX18_F_EWO_MB_STALE \
264 (CX18_F_EWO_MB_STALE_UPON_RECEIPT | CX18_F_EWO_MB_STALE_WHILE_PROC)
265
266struct cx18_epu_work_order {
267 struct work_struct work;
268 atomic_t pending;
269 struct cx18 *cx;
270 unsigned long flags;
271 int rpu;
272 struct cx18_mailbox mb;
273 struct cx18_mdl_ack mdl_ack[CX18_MAX_MDL_ACKS];
274 char *str;
275};
276
251#define CX18_INVALID_TASK_HANDLE 0xffffffff 277#define CX18_INVALID_TASK_HANDLE 0xffffffff
252 278
253struct cx18_stream { 279struct cx18_stream {
@@ -261,7 +287,7 @@ struct cx18_stream {
261 unsigned mdl_offset; 287 unsigned mdl_offset;
262 288
263 u32 id; 289 u32 id;
264 spinlock_t qlock; /* locks access to the queues */ 290 struct mutex qlock; /* locks access to the queues */
265 unsigned long s_flags; /* status flags, see above */ 291 unsigned long s_flags; /* status flags, see above */
266 int dma; /* can be PCI_DMA_TODEVICE, 292 int dma; /* can be PCI_DMA_TODEVICE,
267 PCI_DMA_FROMDEVICE or 293 PCI_DMA_FROMDEVICE or
@@ -275,8 +301,8 @@ struct cx18_stream {
275 301
276 /* Buffer Queues */ 302 /* Buffer Queues */
277 struct cx18_queue q_free; /* free buffers */ 303 struct cx18_queue q_free; /* free buffers */
278 struct cx18_queue q_full; /* full buffers */ 304 struct cx18_queue q_busy; /* busy buffers - in use by firmware */
279 struct cx18_queue q_io; /* waiting for I/O */ 305 struct cx18_queue q_full; /* full buffers - data for user apps */
280 306
281 /* DVB / Digital Transport */ 307 /* DVB / Digital Transport */
282 struct cx18_dvb dvb; 308 struct cx18_dvb dvb;
@@ -353,12 +379,7 @@ struct cx18_i2c_algo_callback_data {
353 int bus_index; /* 0 or 1 for the cx23418's 1st or 2nd I2C bus */ 379 int bus_index; /* 0 or 1 for the cx23418's 1st or 2nd I2C bus */
354}; 380};
355 381
356#define CX18_MAX_MMIO_RETRIES 10 382#define CX18_MAX_MMIO_WR_RETRIES 10
357
358struct cx18_mmio_stats {
359 atomic_t retried_write[CX18_MAX_MMIO_RETRIES+1];
360 atomic_t retried_read[CX18_MAX_MMIO_RETRIES+1];
361};
362 383
363/* Struct to hold info about cx18 cards */ 384/* Struct to hold info about cx18 cards */
364struct cx18 { 385struct cx18 {
@@ -378,7 +399,9 @@ struct cx18 {
378 u32 v4l2_cap; /* V4L2 capabilities of card */ 399 u32 v4l2_cap; /* V4L2 capabilities of card */
379 u32 hw_flags; /* Hardware description of the board */ 400 u32 hw_flags; /* Hardware description of the board */
380 unsigned mdl_offset; 401 unsigned mdl_offset;
381 struct cx18_scb __iomem *scb; /* pointer to SCB */ 402 struct cx18_scb __iomem *scb; /* pointer to SCB */
403 struct mutex epu2apu_mb_lock; /* protect driver to chip mailbox in SCB*/
404 struct mutex epu2cpu_mb_lock; /* protect driver to chip mailbox in SCB*/
382 405
383 struct cx18_av_state av_state; 406 struct cx18_av_state av_state;
384 407
@@ -397,6 +420,7 @@ struct cx18 {
397 420
398 struct mutex serialize_lock; /* mutex used to serialize open/close/start/stop/ioctl operations */ 421 struct mutex serialize_lock; /* mutex used to serialize open/close/start/stop/ioctl operations */
399 struct cx18_options options; /* User options */ 422 struct cx18_options options; /* User options */
423 int stream_buffers[CX18_MAX_STREAMS]; /* # of buffers for each stream */
400 int stream_buf_size[CX18_MAX_STREAMS]; /* Stream buffer size */ 424 int stream_buf_size[CX18_MAX_STREAMS]; /* Stream buffer size */
401 struct cx18_stream streams[CX18_MAX_STREAMS]; /* Stream data */ 425 struct cx18_stream streams[CX18_MAX_STREAMS]; /* Stream data */
402 unsigned long i_flags; /* global cx18 flags */ 426 unsigned long i_flags; /* global cx18 flags */
@@ -428,14 +452,17 @@ struct cx18 {
428 452
429 wait_queue_head_t mb_apu_waitq; 453 wait_queue_head_t mb_apu_waitq;
430 wait_queue_head_t mb_cpu_waitq; 454 wait_queue_head_t mb_cpu_waitq;
431 wait_queue_head_t mb_epu_waitq;
432 wait_queue_head_t mb_hpu_waitq;
433 wait_queue_head_t cap_w; 455 wait_queue_head_t cap_w;
434 /* when the current DMA is finished this queue is woken up */ 456 /* when the current DMA is finished this queue is woken up */
435 wait_queue_head_t dma_waitq; 457 wait_queue_head_t dma_waitq;
436 458
459 u32 sw1_irq_mask;
460 u32 sw2_irq_mask;
461 u32 hw2_irq_mask;
462
437 struct workqueue_struct *work_queue; 463 struct workqueue_struct *work_queue;
438 struct work_struct work; 464 struct cx18_epu_work_order epu_work_order[CX18_MAX_EPU_WORK_ORDERS];
465 char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */
439 466
440 /* i2c */ 467 /* i2c */
441 struct i2c_adapter i2c_adap[2]; 468 struct i2c_adapter i2c_adap[2];
@@ -450,9 +477,6 @@ struct cx18 {
450 u32 gpio_val; 477 u32 gpio_val;
451 struct mutex gpio_lock; 478 struct mutex gpio_lock;
452 479
453 /* Statistics */
454 struct cx18_mmio_stats mmio_stats;
455
456 /* v4l2 and User settings */ 480 /* v4l2 and User settings */
457 481
458 /* codec settings */ 482 /* codec settings */
@@ -481,4 +505,10 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv);
481/* First-open initialization: load firmware, etc. */ 505/* First-open initialization: load firmware, etc. */
482int cx18_init_on_first_open(struct cx18 *cx); 506int cx18_init_on_first_open(struct cx18 *cx);
483 507
508/* Test if the current VBI mode is raw (1) or sliced (0) */
509static inline int cx18_raw_vbi(const struct cx18 *cx)
510{
511 return cx->vbi.in.type == V4L2_BUF_TYPE_VBI_CAPTURE;
512}
513
484#endif /* CX18_DRIVER_H */ 514#endif /* CX18_DRIVER_H */
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index 4542e2e5e3d7..bd5e6f3fd4d0 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -2,6 +2,7 @@
2 * cx18 functions for DVB support 2 * cx18 functions for DVB support
3 * 3 *
4 * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org> 4 * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org>
5 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -23,8 +24,6 @@
23#include "cx18-dvb.h" 24#include "cx18-dvb.h"
24#include "cx18-io.h" 25#include "cx18-io.h"
25#include "cx18-streams.h" 26#include "cx18-streams.h"
26#include "cx18-queue.h"
27#include "cx18-scb.h"
28#include "cx18-cards.h" 27#include "cx18-cards.h"
29#include "s5h1409.h" 28#include "s5h1409.h"
30#include "mxl5005s.h" 29#include "mxl5005s.h"
@@ -109,20 +108,23 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
109 if (!demux->dmx.frontend) 108 if (!demux->dmx.frontend)
110 return -EINVAL; 109 return -EINVAL;
111 110
112 if (stream) { 111 if (!stream)
113 mutex_lock(&stream->dvb.feedlock); 112 return -EINVAL;
114 if (stream->dvb.feeding++ == 0) { 113
115 CX18_DEBUG_INFO("Starting Transport DMA\n"); 114 mutex_lock(&stream->dvb.feedlock);
116 ret = cx18_start_v4l2_encode_stream(stream); 115 if (stream->dvb.feeding++ == 0) {
117 if (ret < 0) { 116 CX18_DEBUG_INFO("Starting Transport DMA\n");
118 CX18_DEBUG_INFO( 117 set_bit(CX18_F_S_STREAMING, &stream->s_flags);
119 "Failed to start Transport DMA\n"); 118 ret = cx18_start_v4l2_encode_stream(stream);
120 stream->dvb.feeding--; 119 if (ret < 0) {
121 } 120 CX18_DEBUG_INFO("Failed to start Transport DMA\n");
122 } else 121 stream->dvb.feeding--;
123 ret = 0; 122 if (stream->dvb.feeding == 0)
124 mutex_unlock(&stream->dvb.feedlock); 123 clear_bit(CX18_F_S_STREAMING, &stream->s_flags);
125 } 124 }
125 } else
126 ret = 0;
127 mutex_unlock(&stream->dvb.feedlock);
126 128
127 return ret; 129 return ret;
128} 130}
@@ -215,6 +217,10 @@ int cx18_dvb_register(struct cx18_stream *stream)
215 dvb_net_init(dvb_adapter, &dvb->dvbnet, dmx); 217 dvb_net_init(dvb_adapter, &dvb->dvbnet, dmx);
216 218
217 CX18_INFO("DVB Frontend registered\n"); 219 CX18_INFO("DVB Frontend registered\n");
220 CX18_INFO("Registered DVB adapter%d for %s (%d x %d kB)\n",
221 stream->dvb.dvb_adapter.num, stream->name,
222 stream->buffers, stream->buf_size/1024);
223
218 mutex_init(&dvb->feedlock); 224 mutex_init(&dvb->feedlock);
219 dvb->enabled = 1; 225 dvb->enabled = 1;
220 return ret; 226 return ret;
@@ -302,24 +308,3 @@ static int dvb_register(struct cx18_stream *stream)
302 308
303 return ret; 309 return ret;
304} 310}
305
306void cx18_dvb_work_handler(struct cx18 *cx)
307{
308 struct cx18_buffer *buf;
309 struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_TS];
310
311 while ((buf = cx18_dequeue(s, &s->q_full)) != NULL) {
312 if (s->dvb.enabled)
313 dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
314 buf->bytesused);
315
316 cx18_enqueue(s, buf, &s->q_free);
317 cx18_buf_sync_for_device(s, buf);
318 if (s->handle == CX18_INVALID_TASK_HANDLE) /* FIXME: improve */
319 continue;
320
321 cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
322 (void __iomem *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
323 1, buf->id, s->buf_size);
324 }
325}
diff --git a/drivers/media/video/cx18/cx18-dvb.h b/drivers/media/video/cx18/cx18-dvb.h
index bbdcefc87f28..bf8d8f6f5455 100644
--- a/drivers/media/video/cx18/cx18-dvb.h
+++ b/drivers/media/video/cx18/cx18-dvb.h
@@ -23,4 +23,3 @@
23 23
24int cx18_dvb_register(struct cx18_stream *stream); 24int cx18_dvb_register(struct cx18_stream *stream);
25void cx18_dvb_unregister(struct cx18_stream *stream); 25void cx18_dvb_unregister(struct cx18_stream *stream);
26void cx18_dvb_work_handler(struct cx18 *cx);
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 5f9089907544..425271a29517 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -4,6 +4,7 @@
4 * Derived from ivtv-fileops.c 4 * Derived from ivtv-fileops.c
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -66,12 +67,11 @@ static int cx18_claim_stream(struct cx18_open_id *id, int type)
66 } 67 }
67 s->id = id->open_id; 68 s->id = id->open_id;
68 69
69 /* CX18_DEC_STREAM_TYPE_MPG needs to claim CX18_DEC_STREAM_TYPE_VBI, 70 /* CX18_ENC_STREAM_TYPE_MPG needs to claim CX18_ENC_STREAM_TYPE_VBI
70 CX18_ENC_STREAM_TYPE_MPG needs to claim CX18_ENC_STREAM_TYPE_VBI
71 (provided VBI insertion is on and sliced VBI is selected), for all 71 (provided VBI insertion is on and sliced VBI is selected), for all
72 other streams we're done */ 72 other streams we're done */
73 if (type == CX18_ENC_STREAM_TYPE_MPG && 73 if (type == CX18_ENC_STREAM_TYPE_MPG &&
74 cx->vbi.insert_mpeg && cx->vbi.sliced_in->service_set) { 74 cx->vbi.insert_mpeg && !cx18_raw_vbi(cx)) {
75 vbi_type = CX18_ENC_STREAM_TYPE_VBI; 75 vbi_type = CX18_ENC_STREAM_TYPE_VBI;
76 } else { 76 } else {
77 return 0; 77 return 0;
@@ -185,8 +185,10 @@ static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block,
185 !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) { 185 !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) {
186 while ((buf = cx18_dequeue(s_vbi, &s_vbi->q_full))) { 186 while ((buf = cx18_dequeue(s_vbi, &s_vbi->q_full))) {
187 /* byteswap and process VBI data */ 187 /* byteswap and process VBI data */
188/* cx18_process_vbi_data(cx, buf, s_vbi->dma_pts, s_vbi->type); */ 188 cx18_process_vbi_data(cx, buf,
189 cx18_enqueue(s_vbi, buf, &s_vbi->q_free); 189 s_vbi->dma_pts,
190 s_vbi->type);
191 cx18_stream_put_buf_fw(s_vbi, buf);
190 } 192 }
191 } 193 }
192 buf = &cx->vbi.sliced_mpeg_buf; 194 buf = &cx->vbi.sliced_mpeg_buf;
@@ -194,11 +196,6 @@ static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block,
194 return buf; 196 return buf;
195 } 197 }
196 198
197 /* do we have leftover data? */
198 buf = cx18_dequeue(s, &s->q_io);
199 if (buf)
200 return buf;
201
202 /* do we have new data? */ 199 /* do we have new data? */
203 buf = cx18_dequeue(s, &s->q_full); 200 buf = cx18_dequeue(s, &s->q_full);
204 if (buf) { 201 if (buf) {
@@ -262,7 +259,7 @@ static size_t cx18_copy_buf_to_user(struct cx18_stream *s,
262 if (len > ucount) 259 if (len > ucount)
263 len = ucount; 260 len = ucount;
264 if (cx->vbi.insert_mpeg && s->type == CX18_ENC_STREAM_TYPE_MPG && 261 if (cx->vbi.insert_mpeg && s->type == CX18_ENC_STREAM_TYPE_MPG &&
265 cx->vbi.sliced_in->service_set && buf != &cx->vbi.sliced_mpeg_buf) { 262 !cx18_raw_vbi(cx) && buf != &cx->vbi.sliced_mpeg_buf) {
266 const char *start = buf->buf + buf->readpos; 263 const char *start = buf->buf + buf->readpos;
267 const char *p = start + 1; 264 const char *p = start + 1;
268 const u8 *q; 265 const u8 *q;
@@ -337,8 +334,7 @@ static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf,
337 /* Each VBI buffer is one frame, the v4l2 API says that for VBI the 334 /* Each VBI buffer is one frame, the v4l2 API says that for VBI the
338 frames should arrive one-by-one, so make sure we never output more 335 frames should arrive one-by-one, so make sure we never output more
339 than one VBI frame at a time */ 336 than one VBI frame at a time */
340 if (s->type == CX18_ENC_STREAM_TYPE_VBI && 337 if (s->type == CX18_ENC_STREAM_TYPE_VBI && !cx18_raw_vbi(cx))
341 cx->vbi.sliced_in->service_set)
342 single_frame = 1; 338 single_frame = 1;
343 339
344 for (;;) { 340 for (;;) {
@@ -365,16 +361,10 @@ static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf,
365 tot_count - tot_written); 361 tot_count - tot_written);
366 362
367 if (buf != &cx->vbi.sliced_mpeg_buf) { 363 if (buf != &cx->vbi.sliced_mpeg_buf) {
368 if (buf->readpos == buf->bytesused) { 364 if (buf->readpos == buf->bytesused)
369 cx18_buf_sync_for_device(s, buf); 365 cx18_stream_put_buf_fw(s, buf);
370 cx18_enqueue(s, buf, &s->q_free); 366 else
371 cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, 367 cx18_push(s, buf, &s->q_full);
372 s->handle,
373 (void __iomem *)&cx->scb->cpu_mdl[buf->id] -
374 cx->enc_mem,
375 1, buf->id, s->buf_size);
376 } else
377 cx18_enqueue(s, buf, &s->q_io);
378 } else if (buf->readpos == buf->bytesused) { 368 } else if (buf->readpos == buf->bytesused) {
379 int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES; 369 int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES;
380 370
@@ -518,7 +508,7 @@ unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
518 CX18_DEBUG_HI_FILE("Encoder poll\n"); 508 CX18_DEBUG_HI_FILE("Encoder poll\n");
519 poll_wait(filp, &s->waitq, wait); 509 poll_wait(filp, &s->waitq, wait);
520 510
521 if (atomic_read(&s->q_full.buffers) || atomic_read(&s->q_io.buffers)) 511 if (atomic_read(&s->q_full.buffers))
522 return POLLIN | POLLRDNORM; 512 return POLLIN | POLLRDNORM;
523 if (eof) 513 if (eof)
524 return POLLHUP; 514 return POLLHUP;
diff --git a/drivers/media/video/cx18/cx18-firmware.c b/drivers/media/video/cx18/cx18-firmware.c
index 51534428cd00..1fa95da1575e 100644
--- a/drivers/media/video/cx18/cx18-firmware.c
+++ b/drivers/media/video/cx18/cx18-firmware.c
@@ -2,6 +2,7 @@
2 * cx18 firmware functions 2 * cx18 firmware functions
3 * 3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -25,6 +26,7 @@
25#include "cx18-irq.h" 26#include "cx18-irq.h"
26#include "cx18-firmware.h" 27#include "cx18-firmware.h"
27#include "cx18-cards.h" 28#include "cx18-cards.h"
29#include "cx18-av-core.h"
28#include <linux/firmware.h> 30#include <linux/firmware.h>
29 31
30#define CX18_PROC_SOFT_RESET 0xc70010 32#define CX18_PROC_SOFT_RESET 0xc70010
@@ -121,6 +123,7 @@ static int load_cpu_fw_direct(const char *fn, u8 __iomem *mem, struct cx18 *cx)
121 if (cx18_raw_readl(cx, dst) != *src) { 123 if (cx18_raw_readl(cx, dst) != *src) {
122 CX18_ERR("Mismatch at offset %x\n", i); 124 CX18_ERR("Mismatch at offset %x\n", i);
123 release_firmware(fw); 125 release_firmware(fw);
126 cx18_setup_page(cx, 0);
124 return -EIO; 127 return -EIO;
125 } 128 }
126 dst++; 129 dst++;
@@ -131,10 +134,12 @@ static int load_cpu_fw_direct(const char *fn, u8 __iomem *mem, struct cx18 *cx)
131 CX18_INFO("loaded %s firmware (%zd bytes)\n", fn, fw->size); 134 CX18_INFO("loaded %s firmware (%zd bytes)\n", fn, fw->size);
132 size = fw->size; 135 size = fw->size;
133 release_firmware(fw); 136 release_firmware(fw);
137 cx18_setup_page(cx, SCB_OFFSET);
134 return size; 138 return size;
135} 139}
136 140
137static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx) 141static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
142 u32 *entry_addr)
138{ 143{
139 const struct firmware *fw = NULL; 144 const struct firmware *fw = NULL;
140 int i, j; 145 int i, j;
@@ -149,9 +154,11 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx)
149 if (request_firmware(&fw, fn, &cx->dev->dev)) { 154 if (request_firmware(&fw, fn, &cx->dev->dev)) {
150 CX18_ERR("unable to open firmware %s\n", fn); 155 CX18_ERR("unable to open firmware %s\n", fn);
151 CX18_ERR("did you put the firmware in the hotplug firmware directory?\n"); 156 CX18_ERR("did you put the firmware in the hotplug firmware directory?\n");
157 cx18_setup_page(cx, 0);
152 return -ENOMEM; 158 return -ENOMEM;
153 } 159 }
154 160
161 *entry_addr = 0;
155 src = (const u32 *)fw->data; 162 src = (const u32 *)fw->data;
156 vers = fw->data + sizeof(seghdr); 163 vers = fw->data + sizeof(seghdr);
157 sz = fw->size; 164 sz = fw->size;
@@ -168,10 +175,12 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx)
168 } 175 }
169 CX18_DEBUG_INFO("load segment %x-%x\n", seghdr.addr, 176 CX18_DEBUG_INFO("load segment %x-%x\n", seghdr.addr,
170 seghdr.addr + seghdr.size - 1); 177 seghdr.addr + seghdr.size - 1);
178 if (*entry_addr == 0)
179 *entry_addr = seghdr.addr;
171 if (offset + seghdr.size > sz) 180 if (offset + seghdr.size > sz)
172 break; 181 break;
173 for (i = 0; i < seghdr.size; i += 4096) { 182 for (i = 0; i < seghdr.size; i += 4096) {
174 cx18_setup_page(cx, offset + i); 183 cx18_setup_page(cx, seghdr.addr + i);
175 for (j = i; j < seghdr.size && j < i + 4096; j += 4) { 184 for (j = i; j < seghdr.size && j < i + 4096; j += 4) {
176 /* no need for endianness conversion on the ppc */ 185 /* no need for endianness conversion on the ppc */
177 cx18_raw_writel(cx, src[(offset + j) / 4], 186 cx18_raw_writel(cx, src[(offset + j) / 4],
@@ -181,6 +190,7 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx)
181 CX18_ERR("Mismatch at offset %x\n", 190 CX18_ERR("Mismatch at offset %x\n",
182 offset + j); 191 offset + j);
183 release_firmware(fw); 192 release_firmware(fw);
193 cx18_setup_page(cx, 0);
184 return -EIO; 194 return -EIO;
185 } 195 }
186 } 196 }
@@ -192,16 +202,17 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx)
192 fn, apu_version, fw->size); 202 fn, apu_version, fw->size);
193 size = fw->size; 203 size = fw->size;
194 release_firmware(fw); 204 release_firmware(fw);
195 /* Clear bit0 for APU to start from 0 */ 205 cx18_setup_page(cx, 0);
196 cx18_write_reg(cx, cx18_read_reg(cx, 0xc72030) & ~1, 0xc72030);
197 return size; 206 return size;
198} 207}
199 208
200void cx18_halt_firmware(struct cx18 *cx) 209void cx18_halt_firmware(struct cx18 *cx)
201{ 210{
202 CX18_DEBUG_INFO("Preparing for firmware halt.\n"); 211 CX18_DEBUG_INFO("Preparing for firmware halt.\n");
203 cx18_write_reg(cx, 0x000F000F, CX18_PROC_SOFT_RESET); /* stop the fw */ 212 cx18_write_reg_expect(cx, 0x000F000F, CX18_PROC_SOFT_RESET,
204 cx18_write_reg(cx, 0x00020002, CX18_ADEC_CONTROL); 213 0x0000000F, 0x000F000F);
214 cx18_write_reg_expect(cx, 0x00020002, CX18_ADEC_CONTROL,
215 0x00000002, 0x00020002);
205} 216}
206 217
207void cx18_init_power(struct cx18 *cx, int lowpwr) 218void cx18_init_power(struct cx18 *cx, int lowpwr)
@@ -211,9 +222,48 @@ void cx18_init_power(struct cx18 *cx, int lowpwr)
211 cx18_write_reg(cx, 0x00000008, CX18_PLL_POWER_DOWN); 222 cx18_write_reg(cx, 0x00000008, CX18_PLL_POWER_DOWN);
212 223
213 /* ADEC out of sleep */ 224 /* ADEC out of sleep */
214 cx18_write_reg(cx, 0x00020000, CX18_ADEC_CONTROL); 225 cx18_write_reg_expect(cx, 0x00020000, CX18_ADEC_CONTROL,
215 226 0x00000000, 0x00020002);
216 /* The fast clock is at 200/245 MHz */ 227
228 /*
229 * The PLL parameters are based on the external crystal frequency that
230 * would ideally be:
231 *
232 * NTSC Color subcarrier freq * 8 =
233 * 4.5 MHz/286 * 455/2 * 8 = 28.63636363... MHz
234 *
235 * The accidents of history and rationale that explain from where this
236 * combination of magic numbers originate can be found in:
237 *
238 * [1] Abrahams, I. C., "Choice of Chrominance Subcarrier Frequency in
239 * the NTSC Standards", Proceedings of the I-R-E, January 1954, pp 79-80
240 *
241 * [2] Abrahams, I. C., "The 'Frequency Interleaving' Principle in the
242 * NTSC Standards", Proceedings of the I-R-E, January 1954, pp 81-83
243 *
244 * As Mike Bradley has rightly pointed out, it's not the exact crystal
245 * frequency that matters, only that all parts of the driver and
246 * firmware are using the same value (close to the ideal value).
247 *
248 * Since I have a strong suspicion that, if the firmware ever assumes a
249 * crystal value at all, it will assume 28.636360 MHz, the crystal
250 * freq used in calculations in this driver will be:
251 *
252 * xtal_freq = 28.636360 MHz
253 *
254 * an error of less than 0.13 ppm which is way, way better than any off
255 * the shelf crystal will have for accuracy anyway.
256 *
257 * Below I aim to run the PLLs' VCOs near 400 MHz to minimze errors.
258 *
259 * Many thanks to Jeff Campbell and Mike Bradley for their extensive
260 * investigation, experimentation, testing, and suggested solutions of
261 * of audio/video sync problems with SVideo and CVBS captures.
262 */
263
264 /* the fast clock is at 200/245 MHz */
265 /* 1 * xtal_freq * 0x0d.f7df9b8 / 2 = 200 MHz: 400 MHz pre post-divide*/
266 /* 1 * xtal_freq * 0x11.1c71eb8 / 2 = 245 MHz: 490 MHz pre post-divide*/
217 cx18_write_reg(cx, lowpwr ? 0xD : 0x11, CX18_FAST_CLOCK_PLL_INT); 267 cx18_write_reg(cx, lowpwr ? 0xD : 0x11, CX18_FAST_CLOCK_PLL_INT);
218 cx18_write_reg(cx, lowpwr ? 0x1EFBF37 : 0x038E3D7, 268 cx18_write_reg(cx, lowpwr ? 0x1EFBF37 : 0x038E3D7,
219 CX18_FAST_CLOCK_PLL_FRAC); 269 CX18_FAST_CLOCK_PLL_FRAC);
@@ -223,16 +273,36 @@ void cx18_init_power(struct cx18 *cx, int lowpwr)
223 cx18_write_reg(cx, 4, CX18_FAST_CLOCK_PLL_ADJUST_BANDWIDTH); 273 cx18_write_reg(cx, 4, CX18_FAST_CLOCK_PLL_ADJUST_BANDWIDTH);
224 274
225 /* set slow clock to 125/120 MHz */ 275 /* set slow clock to 125/120 MHz */
226 cx18_write_reg(cx, lowpwr ? 0x11 : 0x10, CX18_SLOW_CLOCK_PLL_INT); 276 /* xtal_freq * 0x0d.1861a20 / 3 = 125 MHz: 375 MHz before post-divide */
227 cx18_write_reg(cx, lowpwr ? 0xEBAF05 : 0x18618A8, 277 /* xtal_freq * 0x0c.92493f8 / 3 = 120 MHz: 360 MHz before post-divide */
278 cx18_write_reg(cx, lowpwr ? 0xD : 0xC, CX18_SLOW_CLOCK_PLL_INT);
279 cx18_write_reg(cx, lowpwr ? 0x30C344 : 0x124927F,
228 CX18_SLOW_CLOCK_PLL_FRAC); 280 CX18_SLOW_CLOCK_PLL_FRAC);
229 cx18_write_reg(cx, 4, CX18_SLOW_CLOCK_PLL_POST); 281 cx18_write_reg(cx, 3, CX18_SLOW_CLOCK_PLL_POST);
230 282
231 /* mpeg clock pll 54MHz */ 283 /* mpeg clock pll 54MHz */
284 /* xtal_freq * 0xf.15f17f0 / 8 = 54 MHz: 432 MHz before post-divide */
232 cx18_write_reg(cx, 0xF, CX18_MPEG_CLOCK_PLL_INT); 285 cx18_write_reg(cx, 0xF, CX18_MPEG_CLOCK_PLL_INT);
233 cx18_write_reg(cx, 0x2BCFEF, CX18_MPEG_CLOCK_PLL_FRAC); 286 cx18_write_reg(cx, 0x2BE2FE, CX18_MPEG_CLOCK_PLL_FRAC);
234 cx18_write_reg(cx, 8, CX18_MPEG_CLOCK_PLL_POST); 287 cx18_write_reg(cx, 8, CX18_MPEG_CLOCK_PLL_POST);
235 288
289 /*
290 * VDCLK Integer = 0x0f, Post Divider = 0x04
291 * AIMCLK Integer = 0x0e, Post Divider = 0x16
292 */
293 cx18_av_write4(cx, CXADEC_PLL_CTRL1, 0x160e040f);
294
295 /* VDCLK Fraction = 0x2be2fe */
296 /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz before post divide */
297 cx18_av_write4(cx, CXADEC_VID_PLL_FRAC, 0x002be2fe);
298
299 /* AIMCLK Fraction = 0x05227ad */
300 /* xtal * 0xe.2913d68/0x16 = 48000 * 384: 406 MHz before post-divide */
301 cx18_av_write4(cx, CXADEC_AUX_PLL_FRAC, 0x005227ad);
302
303 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x16 */
304 cx18_av_write(cx, CXADEC_I2S_MCLK, 0x56);
305
236 /* Defaults */ 306 /* Defaults */
237 /* APU = SC or SC/2 = 125/62.5 */ 307 /* APU = SC or SC/2 = 125/62.5 */
238 /* EPU = SC = 125 */ 308 /* EPU = SC = 125 */
@@ -248,22 +318,34 @@ void cx18_init_power(struct cx18 *cx, int lowpwr)
248 /* VFC = disabled */ 318 /* VFC = disabled */
249 /* USB = disabled */ 319 /* USB = disabled */
250 320
251 cx18_write_reg(cx, lowpwr ? 0xFFFF0020 : 0x00060004, 321 if (lowpwr) {
252 CX18_CLOCK_SELECT1); 322 cx18_write_reg_expect(cx, 0xFFFF0020, CX18_CLOCK_SELECT1,
253 cx18_write_reg(cx, lowpwr ? 0xFFFF0004 : 0x00060006, 323 0x00000020, 0xFFFFFFFF);
254 CX18_CLOCK_SELECT2); 324 cx18_write_reg_expect(cx, 0xFFFF0004, CX18_CLOCK_SELECT2,
255 325 0x00000004, 0xFFFFFFFF);
256 cx18_write_reg(cx, 0xFFFF0002, CX18_HALF_CLOCK_SELECT1); 326 } else {
257 cx18_write_reg(cx, 0xFFFF0104, CX18_HALF_CLOCK_SELECT2); 327 /* This doesn't explicitly set every clock select */
328 cx18_write_reg_expect(cx, 0x00060004, CX18_CLOCK_SELECT1,
329 0x00000004, 0x00060006);
330 cx18_write_reg_expect(cx, 0x00060006, CX18_CLOCK_SELECT2,
331 0x00000006, 0x00060006);
332 }
258 333
259 cx18_write_reg(cx, 0xFFFF9026, CX18_CLOCK_ENABLE1); 334 cx18_write_reg_expect(cx, 0xFFFF0002, CX18_HALF_CLOCK_SELECT1,
260 cx18_write_reg(cx, 0xFFFF3105, CX18_CLOCK_ENABLE2); 335 0x00000002, 0xFFFFFFFF);
336 cx18_write_reg_expect(cx, 0xFFFF0104, CX18_HALF_CLOCK_SELECT2,
337 0x00000104, 0xFFFFFFFF);
338 cx18_write_reg_expect(cx, 0xFFFF9026, CX18_CLOCK_ENABLE1,
339 0x00009026, 0xFFFFFFFF);
340 cx18_write_reg_expect(cx, 0xFFFF3105, CX18_CLOCK_ENABLE2,
341 0x00003105, 0xFFFFFFFF);
261} 342}
262 343
263void cx18_init_memory(struct cx18 *cx) 344void cx18_init_memory(struct cx18 *cx)
264{ 345{
265 cx18_msleep_timeout(10, 0); 346 cx18_msleep_timeout(10, 0);
266 cx18_write_reg(cx, 0x10000, CX18_DDR_SOFT_RESET); 347 cx18_write_reg_expect(cx, 0x00010000, CX18_DDR_SOFT_RESET,
348 0x00000000, 0x00010001);
267 cx18_msleep_timeout(10, 0); 349 cx18_msleep_timeout(10, 0);
268 350
269 cx18_write_reg(cx, cx->card->ddr.chip_config, CX18_DDR_CHIP_CONFIG); 351 cx18_write_reg(cx, cx->card->ddr.chip_config, CX18_DDR_CHIP_CONFIG);
@@ -282,13 +364,15 @@ void cx18_init_memory(struct cx18 *cx)
282 364
283 cx18_msleep_timeout(10, 0); 365 cx18_msleep_timeout(10, 0);
284 366
285 cx18_write_reg(cx, 0x20000, CX18_DDR_SOFT_RESET); 367 cx18_write_reg_expect(cx, 0x00020000, CX18_DDR_SOFT_RESET,
368 0x00000000, 0x00020002);
286 cx18_msleep_timeout(10, 0); 369 cx18_msleep_timeout(10, 0);
287 370
288 /* use power-down mode when idle */ 371 /* use power-down mode when idle */
289 cx18_write_reg(cx, 0x00000010, CX18_DDR_POWER_REG); 372 cx18_write_reg(cx, 0x00000010, CX18_DDR_POWER_REG);
290 373
291 cx18_write_reg(cx, 0x10001, CX18_REG_BUS_TIMEOUT_EN); 374 cx18_write_reg_expect(cx, 0x00010001, CX18_REG_BUS_TIMEOUT_EN,
375 0x00000001, 0x00010001);
292 376
293 cx18_write_reg(cx, 0x48, CX18_DDR_MB_PER_ROW_7); 377 cx18_write_reg(cx, 0x48, CX18_DDR_MB_PER_ROW_7);
294 cx18_write_reg(cx, 0xE0000, CX18_DDR_BASE_63_ADDR); 378 cx18_write_reg(cx, 0xE0000, CX18_DDR_BASE_63_ADDR);
@@ -307,51 +391,76 @@ void cx18_init_memory(struct cx18 *cx)
307 391
308int cx18_firmware_init(struct cx18 *cx) 392int cx18_firmware_init(struct cx18 *cx)
309{ 393{
394 u32 fw_entry_addr;
395 int sz, retries;
396 u32 api_args[MAX_MB_ARGUMENTS];
397
310 /* Allow chip to control CLKRUN */ 398 /* Allow chip to control CLKRUN */
311 cx18_write_reg(cx, 0x5, CX18_DSP0_INTERRUPT_MASK); 399 cx18_write_reg(cx, 0x5, CX18_DSP0_INTERRUPT_MASK);
312 400
313 cx18_write_reg(cx, 0x000F000F, CX18_PROC_SOFT_RESET); /* stop the fw */ 401 /* Stop the firmware */
402 cx18_write_reg_expect(cx, 0x000F000F, CX18_PROC_SOFT_RESET,
403 0x0000000F, 0x000F000F);
314 404
315 cx18_msleep_timeout(1, 0); 405 cx18_msleep_timeout(1, 0);
316 406
407 /* If the CPU is still running */
408 if ((cx18_read_reg(cx, CX18_PROC_SOFT_RESET) & 8) == 0) {
409 CX18_ERR("%s: couldn't stop CPU to load firmware\n", __func__);
410 return -EIO;
411 }
412
317 cx18_sw1_irq_enable(cx, IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU); 413 cx18_sw1_irq_enable(cx, IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU);
318 cx18_sw2_irq_enable(cx, IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK); 414 cx18_sw2_irq_enable(cx, IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK);
319 415
320 /* Only if the processor is not running */ 416 sz = load_cpu_fw_direct("v4l-cx23418-cpu.fw", cx->enc_mem, cx);
321 if (cx18_read_reg(cx, CX18_PROC_SOFT_RESET) & 8) { 417 if (sz <= 0)
322 int sz = load_apu_fw_direct("v4l-cx23418-apu.fw", 418 return sz;
323 cx->enc_mem, cx); 419
324 420 /* The SCB & IPC area *must* be correct before starting the firmwares */
325 cx18_write_enc(cx, 0xE51FF004, 0); 421 cx18_init_scb(cx);
326 cx18_write_enc(cx, 0xa00000, 4); /* todo: not hardcoded */ 422
327 /* Start APU */ 423 fw_entry_addr = 0;
328 cx18_write_reg(cx, 0x00010000, CX18_PROC_SOFT_RESET); 424 sz = load_apu_fw_direct("v4l-cx23418-apu.fw", cx->enc_mem, cx,
329 cx18_msleep_timeout(500, 0); 425 &fw_entry_addr);
330 426 if (sz <= 0)
331 sz = sz <= 0 ? sz : load_cpu_fw_direct("v4l-cx23418-cpu.fw", 427 return sz;
332 cx->enc_mem, cx); 428
333 429 /* Start the CPU. The CPU will take care of the APU for us. */
334 if (sz > 0) { 430 cx18_write_reg_expect(cx, 0x00080000, CX18_PROC_SOFT_RESET,
335 int retries = 0; 431 0x00000000, 0x00080008);
336 432
337 /* start the CPU */ 433 /* Wait up to 500 ms for the APU to come out of reset */
338 cx18_write_reg(cx, 0x00080000, CX18_PROC_SOFT_RESET); 434 for (retries = 0;
339 while (retries++ < 50) { /* Loop for max 500mS */ 435 retries < 50 && (cx18_read_reg(cx, CX18_PROC_SOFT_RESET) & 1) == 1;
340 if ((cx18_read_reg(cx, CX18_PROC_SOFT_RESET) 436 retries++)
341 & 1) == 0) 437 cx18_msleep_timeout(10, 0);
342 break; 438
343 cx18_msleep_timeout(10, 0); 439 cx18_msleep_timeout(200, 0);
344 } 440
345 cx18_msleep_timeout(200, 0); 441 if (retries == 50 &&
346 if (retries == 51) { 442 (cx18_read_reg(cx, CX18_PROC_SOFT_RESET) & 1) == 1) {
347 CX18_ERR("Could not start the CPU\n"); 443 CX18_ERR("Could not start the CPU\n");
348 return -EIO; 444 return -EIO;
349 }
350 }
351 if (sz <= 0)
352 return -EIO;
353 } 445 }
446
447 /*
448 * The CPU had once before set up to receive an interrupt for it's
449 * outgoing IRQ_CPU_TO_EPU_ACK to us. If it ever does this, we get an
450 * interrupt when it sends us an ack, but by the time we process it,
451 * that flag in the SW2 status register has been cleared by the CPU
452 * firmware. We'll prevent that not so useful condition from happening
453 * by clearing the CPU's interrupt enables for Ack IRQ's we want to
454 * process.
455 */
456 cx18_sw2_irq_disable_cpu(cx, IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK);
457
458 /* Try a benign command to see if the CPU is alive and well */
459 sz = cx18_vapi_result(cx, api_args, CX18_CPU_DEBUG_PEEK32, 1, 0);
460 if (sz < 0)
461 return sz;
462
354 /* initialize GPIO */ 463 /* initialize GPIO */
355 cx18_write_reg(cx, 0x14001400, 0xC78110); 464 cx18_write_reg_expect(cx, 0x14001400, 0xc78110, 0x00001400, 0x14001400);
356 return 0; 465 return 0;
357} 466}
diff --git a/drivers/media/video/cx18/cx18-gpio.c b/drivers/media/video/cx18/cx18-gpio.c
index 0e560421989e..1a99329f33cb 100644
--- a/drivers/media/video/cx18/cx18-gpio.c
+++ b/drivers/media/video/cx18/cx18-gpio.c
@@ -4,6 +4,7 @@
4 * Derived from ivtv-gpio.c 4 * Derived from ivtv-gpio.c
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -47,15 +48,19 @@
47 48
48static void gpio_write(struct cx18 *cx) 49static void gpio_write(struct cx18 *cx)
49{ 50{
50 u32 dir = cx->gpio_dir; 51 u32 dir_lo = cx->gpio_dir & 0xffff;
51 u32 val = cx->gpio_val; 52 u32 val_lo = cx->gpio_val & 0xffff;
52 53 u32 dir_hi = cx->gpio_dir >> 16;
53 cx18_write_reg(cx, (dir & 0xffff) << 16, CX18_REG_GPIO_DIR1); 54 u32 val_hi = cx->gpio_val >> 16;
54 cx18_write_reg(cx, ((dir & 0xffff) << 16) | (val & 0xffff), 55
55 CX18_REG_GPIO_OUT1); 56 cx18_write_reg_expect(cx, dir_lo << 16,
56 cx18_write_reg(cx, dir & 0xffff0000, CX18_REG_GPIO_DIR2); 57 CX18_REG_GPIO_DIR1, ~dir_lo, dir_lo);
57 cx18_write_reg_sync(cx, (dir & 0xffff0000) | ((val & 0xffff0000) >> 16), 58 cx18_write_reg_expect(cx, (dir_lo << 16) | val_lo,
58 CX18_REG_GPIO_OUT2); 59 CX18_REG_GPIO_OUT1, val_lo, dir_lo);
60 cx18_write_reg_expect(cx, dir_hi << 16,
61 CX18_REG_GPIO_DIR2, ~dir_hi, dir_hi);
62 cx18_write_reg_expect(cx, (dir_hi << 16) | val_hi,
63 CX18_REG_GPIO_OUT2, val_hi, dir_hi);
59} 64}
60 65
61void cx18_reset_i2c_slaves_gpio(struct cx18 *cx) 66void cx18_reset_i2c_slaves_gpio(struct cx18 *cx)
diff --git a/drivers/media/video/cx18/cx18-gpio.h b/drivers/media/video/cx18/cx18-gpio.h
index beb7424b9944..39ffccc19d8a 100644
--- a/drivers/media/video/cx18/cx18-gpio.h
+++ b/drivers/media/video/cx18/cx18-gpio.h
@@ -4,6 +4,7 @@
4 * Derived from ivtv-gpio.h 4 * Derived from ivtv-gpio.h
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
index aa09e557b195..8941f58bed7f 100644
--- a/drivers/media/video/cx18/cx18-i2c.c
+++ b/drivers/media/video/cx18/cx18-i2c.c
@@ -4,6 +4,7 @@
4 * Derived from ivtv-i2c.c 4 * Derived from ivtv-i2c.c
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -27,6 +28,7 @@
27#include "cx18-gpio.h" 28#include "cx18-gpio.h"
28#include "cx18-av-core.h" 29#include "cx18-av-core.h"
29#include "cx18-i2c.h" 30#include "cx18-i2c.h"
31#include "cx18-irq.h"
30 32
31#define CX18_REG_I2C_1_WR 0xf15000 33#define CX18_REG_I2C_1_WR 0xf15000
32#define CX18_REG_I2C_1_RD 0xf15008 34#define CX18_REG_I2C_1_RD 0xf15008
@@ -160,9 +162,9 @@ static void cx18_setscl(void *data, int state)
160 u32 r = cx18_read_reg(cx, addr); 162 u32 r = cx18_read_reg(cx, addr);
161 163
162 if (state) 164 if (state)
163 cx18_write_reg_sync(cx, r | SETSCL_BIT, addr); 165 cx18_write_reg(cx, r | SETSCL_BIT, addr);
164 else 166 else
165 cx18_write_reg_sync(cx, r & ~SETSCL_BIT, addr); 167 cx18_write_reg(cx, r & ~SETSCL_BIT, addr);
166} 168}
167 169
168static void cx18_setsda(void *data, int state) 170static void cx18_setsda(void *data, int state)
@@ -173,9 +175,9 @@ static void cx18_setsda(void *data, int state)
173 u32 r = cx18_read_reg(cx, addr); 175 u32 r = cx18_read_reg(cx, addr);
174 176
175 if (state) 177 if (state)
176 cx18_write_reg_sync(cx, r | SETSDL_BIT, addr); 178 cx18_write_reg(cx, r | SETSDL_BIT, addr);
177 else 179 else
178 cx18_write_reg_sync(cx, r & ~SETSDL_BIT, addr); 180 cx18_write_reg(cx, r & ~SETSDL_BIT, addr);
179} 181}
180 182
181static int cx18_getscl(void *data) 183static int cx18_getscl(void *data)
@@ -396,30 +398,33 @@ int init_cx18_i2c(struct cx18 *cx)
396 if (cx18_read_reg(cx, CX18_REG_I2C_2_WR) != 0x0003c02f) { 398 if (cx18_read_reg(cx, CX18_REG_I2C_2_WR) != 0x0003c02f) {
397 /* Reset/Unreset I2C hardware block */ 399 /* Reset/Unreset I2C hardware block */
398 /* Clock select 220MHz */ 400 /* Clock select 220MHz */
399 cx18_write_reg(cx, 0x10000000, 0xc71004); 401 cx18_write_reg_expect(cx, 0x10000000, 0xc71004,
402 0x00000000, 0x10001000);
400 /* Clock Enable */ 403 /* Clock Enable */
401 cx18_write_reg_sync(cx, 0x10001000, 0xc71024); 404 cx18_write_reg_expect(cx, 0x10001000, 0xc71024,
405 0x00001000, 0x10001000);
402 } 406 }
403 /* courtesy of Steven Toth <stoth@hauppauge.com> */ 407 /* courtesy of Steven Toth <stoth@hauppauge.com> */
404 cx18_write_reg_sync(cx, 0x00c00000, 0xc7001c); 408 cx18_write_reg_expect(cx, 0x00c00000, 0xc7001c, 0x00000000, 0x00c000c0);
405 mdelay(10); 409 mdelay(10);
406 cx18_write_reg_sync(cx, 0x00c000c0, 0xc7001c); 410 cx18_write_reg_expect(cx, 0x00c000c0, 0xc7001c, 0x000000c0, 0x00c000c0);
407 mdelay(10); 411 mdelay(10);
408 cx18_write_reg_sync(cx, 0x00c00000, 0xc7001c); 412 cx18_write_reg_expect(cx, 0x00c00000, 0xc7001c, 0x00000000, 0x00c000c0);
409 mdelay(10); 413 mdelay(10);
410 414
411 /* Set to edge-triggered intrs. */ 415 /* Set to edge-triggered intrs. */
412 cx18_write_reg_sync(cx, 0x00c00000, 0xc730c8); 416 cx18_write_reg(cx, 0x00c00000, 0xc730c8);
413 /* Clear any stale intrs */ 417 /* Clear any stale intrs */
414 cx18_write_reg_sync(cx, 0x00c00000, 0xc730c4); 418 cx18_write_reg_expect(cx, HW2_I2C1_INT|HW2_I2C2_INT, HW2_INT_CLR_STATUS,
419 ~(HW2_I2C1_INT|HW2_I2C2_INT), HW2_I2C1_INT|HW2_I2C2_INT);
415 420
416 /* Hw I2C1 Clock Freq ~100kHz */ 421 /* Hw I2C1 Clock Freq ~100kHz */
417 cx18_write_reg_sync(cx, 0x00021c0f & ~4, CX18_REG_I2C_1_WR); 422 cx18_write_reg(cx, 0x00021c0f & ~4, CX18_REG_I2C_1_WR);
418 cx18_setscl(&cx->i2c_algo_cb_data[0], 1); 423 cx18_setscl(&cx->i2c_algo_cb_data[0], 1);
419 cx18_setsda(&cx->i2c_algo_cb_data[0], 1); 424 cx18_setsda(&cx->i2c_algo_cb_data[0], 1);
420 425
421 /* Hw I2C2 Clock Freq ~100kHz */ 426 /* Hw I2C2 Clock Freq ~100kHz */
422 cx18_write_reg_sync(cx, 0x00021c0f & ~4, CX18_REG_I2C_2_WR); 427 cx18_write_reg(cx, 0x00021c0f & ~4, CX18_REG_I2C_2_WR);
423 cx18_setscl(&cx->i2c_algo_cb_data[1], 1); 428 cx18_setscl(&cx->i2c_algo_cb_data[1], 1);
424 cx18_setsda(&cx->i2c_algo_cb_data[1], 1); 429 cx18_setsda(&cx->i2c_algo_cb_data[1], 1);
425 430
diff --git a/drivers/media/video/cx18/cx18-io.c b/drivers/media/video/cx18/cx18-io.c
index 220fae8d4ad7..ec5b3d7bcc6b 100644
--- a/drivers/media/video/cx18/cx18-io.c
+++ b/drivers/media/video/cx18/cx18-io.c
@@ -24,179 +24,6 @@
24#include "cx18-io.h" 24#include "cx18-io.h"
25#include "cx18-irq.h" 25#include "cx18-irq.h"
26 26
27void cx18_log_statistics(struct cx18 *cx)
28{
29 int i;
30
31 if (!(cx18_debug & CX18_DBGFLG_INFO))
32 return;
33
34 for (i = 0; i <= CX18_MAX_MMIO_RETRIES; i++)
35 CX18_DEBUG_INFO("retried_write[%d] = %d\n", i,
36 atomic_read(&cx->mmio_stats.retried_write[i]));
37 for (i = 0; i <= CX18_MAX_MMIO_RETRIES; i++)
38 CX18_DEBUG_INFO("retried_read[%d] = %d\n", i,
39 atomic_read(&cx->mmio_stats.retried_read[i]));
40 return;
41}
42
43void cx18_raw_writel_retry(struct cx18 *cx, u32 val, void __iomem *addr)
44{
45 int i;
46 for (i = 0; i < CX18_MAX_MMIO_RETRIES; i++) {
47 cx18_raw_writel_noretry(cx, val, addr);
48 if (val == cx18_raw_readl_noretry(cx, addr))
49 break;
50 }
51 cx18_log_write_retries(cx, i, addr);
52}
53
54u32 cx18_raw_readl_retry(struct cx18 *cx, const void __iomem *addr)
55{
56 int i;
57 u32 val;
58 for (i = 0; i < CX18_MAX_MMIO_RETRIES; i++) {
59 val = cx18_raw_readl_noretry(cx, addr);
60 if (val != 0xffffffff) /* PCI bus read error */
61 break;
62 }
63 cx18_log_read_retries(cx, i, addr);
64 return val;
65}
66
67u16 cx18_raw_readw_retry(struct cx18 *cx, const void __iomem *addr)
68{
69 int i;
70 u16 val;
71 for (i = 0; i < CX18_MAX_MMIO_RETRIES; i++) {
72 val = cx18_raw_readw_noretry(cx, addr);
73 if (val != 0xffff) /* PCI bus read error */
74 break;
75 }
76 cx18_log_read_retries(cx, i, addr);
77 return val;
78}
79
80void cx18_writel_retry(struct cx18 *cx, u32 val, void __iomem *addr)
81{
82 int i;
83 for (i = 0; i < CX18_MAX_MMIO_RETRIES; i++) {
84 cx18_writel_noretry(cx, val, addr);
85 if (val == cx18_readl_noretry(cx, addr))
86 break;
87 }
88 cx18_log_write_retries(cx, i, addr);
89}
90
91void _cx18_writel_expect(struct cx18 *cx, u32 val, void __iomem *addr,
92 u32 eval, u32 mask)
93{
94 int i;
95 eval &= mask;
96 for (i = 0; i < CX18_MAX_MMIO_RETRIES; i++) {
97 cx18_writel_noretry(cx, val, addr);
98 if (eval == (cx18_readl_noretry(cx, addr) & mask))
99 break;
100 }
101 cx18_log_write_retries(cx, i, addr);
102}
103
104void cx18_writew_retry(struct cx18 *cx, u16 val, void __iomem *addr)
105{
106 int i;
107 for (i = 0; i < CX18_MAX_MMIO_RETRIES; i++) {
108 cx18_writew_noretry(cx, val, addr);
109 if (val == cx18_readw_noretry(cx, addr))
110 break;
111 }
112 cx18_log_write_retries(cx, i, addr);
113}
114
115void cx18_writeb_retry(struct cx18 *cx, u8 val, void __iomem *addr)
116{
117 int i;
118 for (i = 0; i < CX18_MAX_MMIO_RETRIES; i++) {
119 cx18_writeb_noretry(cx, val, addr);
120 if (val == cx18_readb_noretry(cx, addr))
121 break;
122 }
123 cx18_log_write_retries(cx, i, addr);
124}
125
126u32 cx18_readl_retry(struct cx18 *cx, const void __iomem *addr)
127{
128 int i;
129 u32 val;
130 for (i = 0; i < CX18_MAX_MMIO_RETRIES; i++) {
131 val = cx18_readl_noretry(cx, addr);
132 if (val != 0xffffffff) /* PCI bus read error */
133 break;
134 }
135 cx18_log_read_retries(cx, i, addr);
136 return val;
137}
138
139u16 cx18_readw_retry(struct cx18 *cx, const void __iomem *addr)
140{
141 int i;
142 u16 val;
143 for (i = 0; i < CX18_MAX_MMIO_RETRIES; i++) {
144 val = cx18_readw_noretry(cx, addr);
145 if (val != 0xffff) /* PCI bus read error */
146 break;
147 }
148 cx18_log_read_retries(cx, i, addr);
149 return val;
150}
151
152u8 cx18_readb_retry(struct cx18 *cx, const void __iomem *addr)
153{
154 int i;
155 u8 val;
156 for (i = 0; i < CX18_MAX_MMIO_RETRIES; i++) {
157 val = cx18_readb_noretry(cx, addr);
158 if (val != 0xff) /* PCI bus read error */
159 break;
160 }
161 cx18_log_read_retries(cx, i, addr);
162 return val;
163}
164
165void cx18_memcpy_fromio(struct cx18 *cx, void *to,
166 const void __iomem *from, unsigned int len)
167{
168 const u8 __iomem *src = from;
169 u8 *dst = to;
170
171 /* Align reads on the CX23418's addresses */
172 if ((len > 0) && ((unsigned long) src & 1)) {
173 *dst = cx18_readb(cx, src);
174 len--;
175 dst++;
176 src++;
177 }
178 if ((len > 1) && ((unsigned long) src & 2)) {
179 *((u16 *)dst) = cx18_raw_readw(cx, src);
180 len -= 2;
181 dst += 2;
182 src += 2;
183 }
184 while (len > 3) {
185 *((u32 *)dst) = cx18_raw_readl(cx, src);
186 len -= 4;
187 dst += 4;
188 src += 4;
189 }
190 if (len > 1) {
191 *((u16 *)dst) = cx18_raw_readw(cx, src);
192 len -= 2;
193 dst += 2;
194 src += 2;
195 }
196 if (len > 0)
197 *dst = cx18_readb(cx, src);
198}
199
200void cx18_memset_io(struct cx18 *cx, void __iomem *addr, int val, size_t count) 27void cx18_memset_io(struct cx18 *cx, void __iomem *addr, int val, size_t count)
201{ 28{
202 u8 __iomem *dst = addr; 29 u8 __iomem *dst = addr;
@@ -230,32 +57,35 @@ void cx18_memset_io(struct cx18 *cx, void __iomem *addr, int val, size_t count)
230 57
231void cx18_sw1_irq_enable(struct cx18 *cx, u32 val) 58void cx18_sw1_irq_enable(struct cx18 *cx, u32 val)
232{ 59{
233 u32 r;
234 cx18_write_reg_expect(cx, val, SW1_INT_STATUS, ~val, val); 60 cx18_write_reg_expect(cx, val, SW1_INT_STATUS, ~val, val);
235 r = cx18_read_reg(cx, SW1_INT_ENABLE_PCI); 61 cx->sw1_irq_mask = cx18_read_reg(cx, SW1_INT_ENABLE_PCI) | val;
236 cx18_write_reg(cx, r | val, SW1_INT_ENABLE_PCI); 62 cx18_write_reg(cx, cx->sw1_irq_mask, SW1_INT_ENABLE_PCI);
237} 63}
238 64
239void cx18_sw1_irq_disable(struct cx18 *cx, u32 val) 65void cx18_sw1_irq_disable(struct cx18 *cx, u32 val)
240{ 66{
241 u32 r; 67 cx->sw1_irq_mask = cx18_read_reg(cx, SW1_INT_ENABLE_PCI) & ~val;
242 r = cx18_read_reg(cx, SW1_INT_ENABLE_PCI); 68 cx18_write_reg(cx, cx->sw1_irq_mask, SW1_INT_ENABLE_PCI);
243 cx18_write_reg(cx, r & ~val, SW1_INT_ENABLE_PCI);
244} 69}
245 70
246void cx18_sw2_irq_enable(struct cx18 *cx, u32 val) 71void cx18_sw2_irq_enable(struct cx18 *cx, u32 val)
247{ 72{
248 u32 r;
249 cx18_write_reg_expect(cx, val, SW2_INT_STATUS, ~val, val); 73 cx18_write_reg_expect(cx, val, SW2_INT_STATUS, ~val, val);
250 r = cx18_read_reg(cx, SW2_INT_ENABLE_PCI); 74 cx->sw2_irq_mask = cx18_read_reg(cx, SW2_INT_ENABLE_PCI) | val;
251 cx18_write_reg(cx, r | val, SW2_INT_ENABLE_PCI); 75 cx18_write_reg(cx, cx->sw2_irq_mask, SW2_INT_ENABLE_PCI);
252} 76}
253 77
254void cx18_sw2_irq_disable(struct cx18 *cx, u32 val) 78void cx18_sw2_irq_disable(struct cx18 *cx, u32 val)
255{ 79{
80 cx->sw2_irq_mask = cx18_read_reg(cx, SW2_INT_ENABLE_PCI) & ~val;
81 cx18_write_reg(cx, cx->sw2_irq_mask, SW2_INT_ENABLE_PCI);
82}
83
84void cx18_sw2_irq_disable_cpu(struct cx18 *cx, u32 val)
85{
256 u32 r; 86 u32 r;
257 r = cx18_read_reg(cx, SW2_INT_ENABLE_PCI); 87 r = cx18_read_reg(cx, SW2_INT_ENABLE_CPU);
258 cx18_write_reg(cx, r & ~val, SW2_INT_ENABLE_PCI); 88 cx18_write_reg(cx, r & ~val, SW2_INT_ENABLE_CPU);
259} 89}
260 90
261void cx18_setup_page(struct cx18 *cx, u32 addr) 91void cx18_setup_page(struct cx18 *cx, u32 addr)
diff --git a/drivers/media/video/cx18/cx18-io.h b/drivers/media/video/cx18/cx18-io.h
index 425244453ea7..2635b3a8cc96 100644
--- a/drivers/media/video/cx18/cx18-io.h
+++ b/drivers/media/video/cx18/cx18-io.h
@@ -25,232 +25,125 @@
25 25
26#include "cx18-driver.h" 26#include "cx18-driver.h"
27 27
28static inline void cx18_io_delay(struct cx18 *cx)
29{
30 if (cx->options.mmio_ndelay)
31 ndelay(cx->options.mmio_ndelay);
32}
33
34/* 28/*
35 * Readback and retry of MMIO access for reliability: 29 * Readback and retry of MMIO access for reliability:
36 * The concept was suggested by Steve Toth <stoth@linuxtv.org>. 30 * The concept was suggested by Steve Toth <stoth@linuxtv.org>.
37 * The implmentation is the fault of Andy Walls <awalls@radix.net>. 31 * The implmentation is the fault of Andy Walls <awalls@radix.net>.
32 *
33 * *write* functions are implied to retry the mmio unless suffixed with _noretry
34 * *read* functions never retry the mmio (it never helps to do so)
38 */ 35 */
39 36
40/* Statistics gathering */ 37/* Non byteswapping memory mapped IO */
41static inline 38static inline u32 cx18_raw_readl(struct cx18 *cx, const void __iomem *addr)
42void cx18_log_write_retries(struct cx18 *cx, int i, const void __iomem *addr)
43{
44 if (i > CX18_MAX_MMIO_RETRIES)
45 i = CX18_MAX_MMIO_RETRIES;
46 atomic_inc(&cx->mmio_stats.retried_write[i]);
47 return;
48}
49
50static inline
51void cx18_log_read_retries(struct cx18 *cx, int i, const void __iomem *addr)
52{ 39{
53 if (i > CX18_MAX_MMIO_RETRIES) 40 return __raw_readl(addr);
54 i = CX18_MAX_MMIO_RETRIES;
55 atomic_inc(&cx->mmio_stats.retried_read[i]);
56 return;
57} 41}
58 42
59void cx18_log_statistics(struct cx18 *cx);
60
61/* Non byteswapping memory mapped IO */
62static inline 43static inline
63void cx18_raw_writel_noretry(struct cx18 *cx, u32 val, void __iomem *addr) 44void cx18_raw_writel_noretry(struct cx18 *cx, u32 val, void __iomem *addr)
64{ 45{
65 __raw_writel(val, addr); 46 __raw_writel(val, addr);
66 cx18_io_delay(cx);
67} 47}
68 48
69void cx18_raw_writel_retry(struct cx18 *cx, u32 val, void __iomem *addr);
70
71static inline void cx18_raw_writel(struct cx18 *cx, u32 val, void __iomem *addr) 49static inline void cx18_raw_writel(struct cx18 *cx, u32 val, void __iomem *addr)
72{ 50{
73 if (cx18_retry_mmio) 51 int i;
74 cx18_raw_writel_retry(cx, val, addr); 52 for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
75 else
76 cx18_raw_writel_noretry(cx, val, addr); 53 cx18_raw_writel_noretry(cx, val, addr);
54 if (val == cx18_raw_readl(cx, addr))
55 break;
56 }
77} 57}
78 58
79 59/* Normal memory mapped IO */
80static inline 60static inline u32 cx18_readl(struct cx18 *cx, const void __iomem *addr)
81u32 cx18_raw_readl_noretry(struct cx18 *cx, const void __iomem *addr)
82{
83 u32 ret = __raw_readl(addr);
84 cx18_io_delay(cx);
85 return ret;
86}
87
88u32 cx18_raw_readl_retry(struct cx18 *cx, const void __iomem *addr);
89
90static inline u32 cx18_raw_readl(struct cx18 *cx, const void __iomem *addr)
91{ 61{
92 if (cx18_retry_mmio) 62 return readl(addr);
93 return cx18_raw_readl_retry(cx, addr);
94
95 return cx18_raw_readl_noretry(cx, addr);
96} 63}
97 64
98
99static inline 65static inline
100u16 cx18_raw_readw_noretry(struct cx18 *cx, const void __iomem *addr) 66void cx18_writel_noretry(struct cx18 *cx, u32 val, void __iomem *addr)
101{ 67{
102 u16 ret = __raw_readw(addr); 68 writel(val, addr);
103 cx18_io_delay(cx);
104 return ret;
105} 69}
106 70
107u16 cx18_raw_readw_retry(struct cx18 *cx, const void __iomem *addr); 71static inline void cx18_writel(struct cx18 *cx, u32 val, void __iomem *addr)
108
109static inline u16 cx18_raw_readw(struct cx18 *cx, const void __iomem *addr)
110{ 72{
111 if (cx18_retry_mmio) 73 int i;
112 return cx18_raw_readw_retry(cx, addr); 74 for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
113 75 cx18_writel_noretry(cx, val, addr);
114 return cx18_raw_readw_noretry(cx, addr); 76 if (val == cx18_readl(cx, addr))
77 break;
78 }
115} 79}
116 80
117
118/* Normal memory mapped IO */
119static inline 81static inline
120void cx18_writel_noretry(struct cx18 *cx, u32 val, void __iomem *addr) 82void cx18_writel_expect(struct cx18 *cx, u32 val, void __iomem *addr,
83 u32 eval, u32 mask)
121{ 84{
122 writel(val, addr); 85 int i;
123 cx18_io_delay(cx); 86 u32 r;
87 eval &= mask;
88 for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
89 cx18_writel_noretry(cx, val, addr);
90 r = cx18_readl(cx, addr);
91 if (r == 0xffffffff && eval != 0xffffffff)
92 continue;
93 if (eval == (r & mask))
94 break;
95 }
124} 96}
125 97
126void cx18_writel_retry(struct cx18 *cx, u32 val, void __iomem *addr); 98static inline u16 cx18_readw(struct cx18 *cx, const void __iomem *addr)
127
128static inline void cx18_writel(struct cx18 *cx, u32 val, void __iomem *addr)
129{ 99{
130 if (cx18_retry_mmio) 100 return readw(addr);
131 cx18_writel_retry(cx, val, addr);
132 else
133 cx18_writel_noretry(cx, val, addr);
134} 101}
135 102
136void _cx18_writel_expect(struct cx18 *cx, u32 val, void __iomem *addr,
137 u32 eval, u32 mask);
138
139static inline 103static inline
140void cx18_writew_noretry(struct cx18 *cx, u16 val, void __iomem *addr) 104void cx18_writew_noretry(struct cx18 *cx, u16 val, void __iomem *addr)
141{ 105{
142 writew(val, addr); 106 writew(val, addr);
143 cx18_io_delay(cx);
144} 107}
145 108
146void cx18_writew_retry(struct cx18 *cx, u16 val, void __iomem *addr);
147
148static inline void cx18_writew(struct cx18 *cx, u16 val, void __iomem *addr) 109static inline void cx18_writew(struct cx18 *cx, u16 val, void __iomem *addr)
149{ 110{
150 if (cx18_retry_mmio) 111 int i;
151 cx18_writew_retry(cx, val, addr); 112 for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
152 else
153 cx18_writew_noretry(cx, val, addr); 113 cx18_writew_noretry(cx, val, addr);
114 if (val == cx18_readw(cx, addr))
115 break;
116 }
154} 117}
155 118
119static inline u8 cx18_readb(struct cx18 *cx, const void __iomem *addr)
120{
121 return readb(addr);
122}
156 123
157static inline 124static inline
158void cx18_writeb_noretry(struct cx18 *cx, u8 val, void __iomem *addr) 125void cx18_writeb_noretry(struct cx18 *cx, u8 val, void __iomem *addr)
159{ 126{
160 writeb(val, addr); 127 writeb(val, addr);
161 cx18_io_delay(cx);
162} 128}
163 129
164void cx18_writeb_retry(struct cx18 *cx, u8 val, void __iomem *addr);
165
166static inline void cx18_writeb(struct cx18 *cx, u8 val, void __iomem *addr) 130static inline void cx18_writeb(struct cx18 *cx, u8 val, void __iomem *addr)
167{ 131{
168 if (cx18_retry_mmio) 132 int i;
169 cx18_writeb_retry(cx, val, addr); 133 for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
170 else
171 cx18_writeb_noretry(cx, val, addr); 134 cx18_writeb_noretry(cx, val, addr);
172} 135 if (val == cx18_readb(cx, addr))
173 136 break;
174 137 }
175static inline u32 cx18_readl_noretry(struct cx18 *cx, const void __iomem *addr)
176{
177 u32 ret = readl(addr);
178 cx18_io_delay(cx);
179 return ret;
180}
181
182u32 cx18_readl_retry(struct cx18 *cx, const void __iomem *addr);
183
184static inline u32 cx18_readl(struct cx18 *cx, const void __iomem *addr)
185{
186 if (cx18_retry_mmio)
187 return cx18_readl_retry(cx, addr);
188
189 return cx18_readl_noretry(cx, addr);
190}
191
192
193static inline u16 cx18_readw_noretry(struct cx18 *cx, const void __iomem *addr)
194{
195 u16 ret = readw(addr);
196 cx18_io_delay(cx);
197 return ret;
198}
199
200u16 cx18_readw_retry(struct cx18 *cx, const void __iomem *addr);
201
202static inline u16 cx18_readw(struct cx18 *cx, const void __iomem *addr)
203{
204 if (cx18_retry_mmio)
205 return cx18_readw_retry(cx, addr);
206
207 return cx18_readw_noretry(cx, addr);
208}
209
210
211static inline u8 cx18_readb_noretry(struct cx18 *cx, const void __iomem *addr)
212{
213 u8 ret = readb(addr);
214 cx18_io_delay(cx);
215 return ret;
216}
217
218u8 cx18_readb_retry(struct cx18 *cx, const void __iomem *addr);
219
220static inline u8 cx18_readb(struct cx18 *cx, const void __iomem *addr)
221{
222 if (cx18_retry_mmio)
223 return cx18_readb_retry(cx, addr);
224
225 return cx18_readb_noretry(cx, addr);
226}
227
228
229static inline
230u32 cx18_write_sync_noretry(struct cx18 *cx, u32 val, void __iomem *addr)
231{
232 cx18_writel_noretry(cx, val, addr);
233 return cx18_readl_noretry(cx, addr);
234} 138}
235 139
236static inline 140static inline
237u32 cx18_write_sync_retry(struct cx18 *cx, u32 val, void __iomem *addr) 141void cx18_memcpy_fromio(struct cx18 *cx, void *to,
238{ 142 const void __iomem *from, unsigned int len)
239 cx18_writel_retry(cx, val, addr);
240 return cx18_readl_retry(cx, addr);
241}
242
243static inline u32 cx18_write_sync(struct cx18 *cx, u32 val, void __iomem *addr)
244{ 143{
245 if (cx18_retry_mmio) 144 memcpy_fromio(to, from, len);
246 return cx18_write_sync_retry(cx, val, addr);
247
248 return cx18_write_sync_noretry(cx, val, addr);
249} 145}
250 146
251
252void cx18_memcpy_fromio(struct cx18 *cx, void *to,
253 const void __iomem *from, unsigned int len);
254void cx18_memset_io(struct cx18 *cx, void __iomem *addr, int val, size_t count); 147void cx18_memset_io(struct cx18 *cx, void __iomem *addr, int val, size_t count);
255 148
256 149
@@ -260,136 +153,39 @@ static inline void cx18_write_reg_noretry(struct cx18 *cx, u32 val, u32 reg)
260 cx18_writel_noretry(cx, val, cx->reg_mem + reg); 153 cx18_writel_noretry(cx, val, cx->reg_mem + reg);
261} 154}
262 155
263static inline void cx18_write_reg_retry(struct cx18 *cx, u32 val, u32 reg)
264{
265 cx18_writel_retry(cx, val, cx->reg_mem + reg);
266}
267
268static inline void cx18_write_reg(struct cx18 *cx, u32 val, u32 reg) 156static inline void cx18_write_reg(struct cx18 *cx, u32 val, u32 reg)
269{ 157{
270 if (cx18_retry_mmio) 158 cx18_writel(cx, val, cx->reg_mem + reg);
271 cx18_write_reg_retry(cx, val, reg);
272 else
273 cx18_write_reg_noretry(cx, val, reg);
274}
275
276static inline void _cx18_write_reg_expect(struct cx18 *cx, u32 val, u32 reg,
277 u32 eval, u32 mask)
278{
279 _cx18_writel_expect(cx, val, cx->reg_mem + reg, eval, mask);
280} 159}
281 160
282static inline void cx18_write_reg_expect(struct cx18 *cx, u32 val, u32 reg, 161static inline void cx18_write_reg_expect(struct cx18 *cx, u32 val, u32 reg,
283 u32 eval, u32 mask) 162 u32 eval, u32 mask)
284{ 163{
285 if (cx18_retry_mmio) 164 cx18_writel_expect(cx, val, cx->reg_mem + reg, eval, mask);
286 _cx18_write_reg_expect(cx, val, reg, eval, mask);
287 else
288 cx18_write_reg_noretry(cx, val, reg);
289}
290
291
292static inline u32 cx18_read_reg_noretry(struct cx18 *cx, u32 reg)
293{
294 return cx18_readl_noretry(cx, cx->reg_mem + reg);
295}
296
297static inline u32 cx18_read_reg_retry(struct cx18 *cx, u32 reg)
298{
299 return cx18_readl_retry(cx, cx->reg_mem + reg);
300} 165}
301 166
302static inline u32 cx18_read_reg(struct cx18 *cx, u32 reg) 167static inline u32 cx18_read_reg(struct cx18 *cx, u32 reg)
303{ 168{
304 if (cx18_retry_mmio) 169 return cx18_readl(cx, cx->reg_mem + reg);
305 return cx18_read_reg_retry(cx, reg);
306
307 return cx18_read_reg_noretry(cx, reg);
308}
309
310
311static inline u32 cx18_write_reg_sync_noretry(struct cx18 *cx, u32 val, u32 reg)
312{
313 return cx18_write_sync_noretry(cx, val, cx->reg_mem + reg);
314}
315
316static inline u32 cx18_write_reg_sync_retry(struct cx18 *cx, u32 val, u32 reg)
317{
318 return cx18_write_sync_retry(cx, val, cx->reg_mem + reg);
319}
320
321static inline u32 cx18_write_reg_sync(struct cx18 *cx, u32 val, u32 reg)
322{
323 if (cx18_retry_mmio)
324 return cx18_write_reg_sync_retry(cx, val, reg);
325
326 return cx18_write_reg_sync_noretry(cx, val, reg);
327} 170}
328 171
329 172
330/* Access "encoder memory" region of CX23418 memory mapped I/O */ 173/* Access "encoder memory" region of CX23418 memory mapped I/O */
331static inline void cx18_write_enc_noretry(struct cx18 *cx, u32 val, u32 addr)
332{
333 cx18_writel_noretry(cx, val, cx->enc_mem + addr);
334}
335
336static inline void cx18_write_enc_retry(struct cx18 *cx, u32 val, u32 addr)
337{
338 cx18_writel_retry(cx, val, cx->enc_mem + addr);
339}
340
341static inline void cx18_write_enc(struct cx18 *cx, u32 val, u32 addr) 174static inline void cx18_write_enc(struct cx18 *cx, u32 val, u32 addr)
342{ 175{
343 if (cx18_retry_mmio) 176 cx18_writel(cx, val, cx->enc_mem + addr);
344 cx18_write_enc_retry(cx, val, addr);
345 else
346 cx18_write_enc_noretry(cx, val, addr);
347}
348
349
350static inline u32 cx18_read_enc_noretry(struct cx18 *cx, u32 addr)
351{
352 return cx18_readl_noretry(cx, cx->enc_mem + addr);
353}
354
355static inline u32 cx18_read_enc_retry(struct cx18 *cx, u32 addr)
356{
357 return cx18_readl_retry(cx, cx->enc_mem + addr);
358} 177}
359 178
360static inline u32 cx18_read_enc(struct cx18 *cx, u32 addr) 179static inline u32 cx18_read_enc(struct cx18 *cx, u32 addr)
361{ 180{
362 if (cx18_retry_mmio) 181 return cx18_readl(cx, cx->enc_mem + addr);
363 return cx18_read_enc_retry(cx, addr);
364
365 return cx18_read_enc_noretry(cx, addr);
366}
367
368static inline
369u32 cx18_write_enc_sync_noretry(struct cx18 *cx, u32 val, u32 addr)
370{
371 return cx18_write_sync_noretry(cx, val, cx->enc_mem + addr);
372}
373
374static inline
375u32 cx18_write_enc_sync_retry(struct cx18 *cx, u32 val, u32 addr)
376{
377 return cx18_write_sync_retry(cx, val, cx->enc_mem + addr);
378}
379
380static inline
381u32 cx18_write_enc_sync(struct cx18 *cx, u32 val, u32 addr)
382{
383 if (cx18_retry_mmio)
384 return cx18_write_enc_sync_retry(cx, val, addr);
385
386 return cx18_write_enc_sync_noretry(cx, val, addr);
387} 182}
388 183
389void cx18_sw1_irq_enable(struct cx18 *cx, u32 val); 184void cx18_sw1_irq_enable(struct cx18 *cx, u32 val);
390void cx18_sw1_irq_disable(struct cx18 *cx, u32 val); 185void cx18_sw1_irq_disable(struct cx18 *cx, u32 val);
391void cx18_sw2_irq_enable(struct cx18 *cx, u32 val); 186void cx18_sw2_irq_enable(struct cx18 *cx, u32 val);
392void cx18_sw2_irq_disable(struct cx18 *cx, u32 val); 187void cx18_sw2_irq_disable(struct cx18 *cx, u32 val);
188void cx18_sw2_irq_disable_cpu(struct cx18 *cx, u32 val);
393void cx18_setup_page(struct cx18 *cx, u32 addr); 189void cx18_setup_page(struct cx18 *cx, u32 addr);
394 190
395#endif /* CX18_IO_H */ 191#endif /* CX18_IO_H */
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index f0ca50f5fdde..e6087486f889 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -4,6 +4,7 @@
4 * Derived from ivtv-ioctl.c 4 * Derived from ivtv-ioctl.c
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -237,13 +238,12 @@ static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
237 if (ret) 238 if (ret)
238 return ret; 239 return ret;
239 240
240 if (id->type == CX18_ENC_STREAM_TYPE_VBI && 241 if (!cx18_raw_vbi(cx) && atomic_read(&cx->ana_capturing) > 0)
241 cx->vbi.sliced_in->service_set &&
242 atomic_read(&cx->ana_capturing) > 0)
243 return -EBUSY; 242 return -EBUSY;
244 243
245 cx->vbi.sliced_in->service_set = 0; 244 cx->vbi.sliced_in->service_set = 0;
246 cx18_av_cmd(cx, VIDIOC_S_FMT, &cx->vbi.in); 245 cx->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE;
246 cx18_av_cmd(cx, VIDIOC_S_FMT, fmt);
247 return cx18_g_fmt_vbi_cap(file, fh, fmt); 247 return cx18_g_fmt_vbi_cap(file, fh, fmt);
248} 248}
249 249
@@ -745,14 +745,12 @@ static int cx18_log_status(struct file *file, void *fh)
745 continue; 745 continue;
746 CX18_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n", 746 CX18_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n",
747 s->name, s->s_flags, 747 s->name, s->s_flags,
748 (s->buffers - atomic_read(&s->q_free.buffers)) 748 atomic_read(&s->q_full.buffers) * 100 / s->buffers,
749 * 100 / s->buffers,
750 (s->buffers * s->buf_size) / 1024, s->buffers); 749 (s->buffers * s->buf_size) / 1024, s->buffers);
751 } 750 }
752 CX18_INFO("Read MPEG/VBI: %lld/%lld bytes\n", 751 CX18_INFO("Read MPEG/VBI: %lld/%lld bytes\n",
753 (long long)cx->mpg_data_received, 752 (long long)cx->mpg_data_received,
754 (long long)cx->vbi_data_inserted); 753 (long long)cx->vbi_data_inserted);
755 cx18_log_statistics(cx);
756 CX18_INFO("================== END STATUS CARD #%d ==================\n", cx->num); 754 CX18_INFO("================== END STATUS CARD #%d ==================\n", cx->num);
757 return 0; 755 return 0;
758} 756}
diff --git a/drivers/media/video/cx18/cx18-ioctl.h b/drivers/media/video/cx18/cx18-ioctl.h
index 2222f679d86d..08fe24e9510e 100644
--- a/drivers/media/video/cx18/cx18-ioctl.h
+++ b/drivers/media/video/cx18/cx18-ioctl.h
@@ -4,6 +4,7 @@
4 * Derived from ivtv-ioctl.h 4 * Derived from ivtv-ioctl.h
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx18/cx18-irq.c b/drivers/media/video/cx18/cx18-irq.c
index 5fbfbd0f1493..af2f504eda2b 100644
--- a/drivers/media/video/cx18/cx18-irq.c
+++ b/drivers/media/video/cx18/cx18-irq.c
@@ -2,6 +2,7 @@
2 * cx18 interrupt handling 2 * cx18 interrupt handling
3 * 3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -21,132 +22,9 @@
21 22
22#include "cx18-driver.h" 23#include "cx18-driver.h"
23#include "cx18-io.h" 24#include "cx18-io.h"
24#include "cx18-firmware.h"
25#include "cx18-fileops.h"
26#include "cx18-queue.h"
27#include "cx18-irq.h" 25#include "cx18-irq.h"
28#include "cx18-ioctl.h"
29#include "cx18-mailbox.h" 26#include "cx18-mailbox.h"
30#include "cx18-vbi.h"
31#include "cx18-scb.h" 27#include "cx18-scb.h"
32#include "cx18-dvb.h"
33
34void cx18_work_handler(struct work_struct *work)
35{
36 struct cx18 *cx = container_of(work, struct cx18, work);
37 if (test_and_clear_bit(CX18_F_I_WORK_INITED, &cx->i_flags)) {
38 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
39 /* This thread must use the FIFO scheduler as it
40 * is realtime sensitive. */
41 sched_setscheduler(current, SCHED_FIFO, &param);
42 }
43 if (test_and_clear_bit(CX18_F_I_WORK_HANDLER_DVB, &cx->i_flags))
44 cx18_dvb_work_handler(cx);
45}
46
47static void epu_dma_done(struct cx18 *cx, struct cx18_mailbox *mb)
48{
49 u32 handle = mb->args[0];
50 struct cx18_stream *s = NULL;
51 struct cx18_buffer *buf;
52 u32 off;
53 int i;
54 int id;
55
56 for (i = 0; i < CX18_MAX_STREAMS; i++) {
57 s = &cx->streams[i];
58 if ((handle == s->handle) && (s->dvb.enabled))
59 break;
60 if (s->v4l2dev && handle == s->handle)
61 break;
62 }
63 if (i == CX18_MAX_STREAMS) {
64 CX18_WARN("Got DMA done notification for unknown/inactive"
65 " handle %d\n", handle);
66 mb->error = CXERR_NOT_OPEN;
67 mb->cmd = 0;
68 cx18_mb_ack(cx, mb);
69 return;
70 }
71
72 off = mb->args[1];
73 if (mb->args[2] != 1)
74 CX18_WARN("Ack struct = %d for %s\n",
75 mb->args[2], s->name);
76 id = cx18_read_enc(cx, off);
77 buf = cx18_queue_get_buf_irq(s, id, cx18_read_enc(cx, off + 4));
78 CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id);
79 if (buf) {
80 cx18_buf_sync_for_cpu(s, buf);
81 if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) {
82 CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n",
83 buf->bytesused);
84
85 set_bit(CX18_F_I_WORK_HANDLER_DVB, &cx->i_flags);
86 set_bit(CX18_F_I_HAVE_WORK, &cx->i_flags);
87 } else
88 set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
89 } else {
90 CX18_WARN("Could not find buf %d for stream %s\n",
91 cx18_read_enc(cx, off), s->name);
92 }
93 mb->error = 0;
94 mb->cmd = 0;
95 cx18_mb_ack(cx, mb);
96 wake_up(&cx->dma_waitq);
97 if (s->id != -1)
98 wake_up(&s->waitq);
99}
100
101static void epu_debug(struct cx18 *cx, struct cx18_mailbox *mb)
102{
103 char str[256] = { 0 };
104 char *p;
105
106 if (mb->args[1]) {
107 cx18_setup_page(cx, mb->args[1]);
108 cx18_memcpy_fromio(cx, str, cx->enc_mem + mb->args[1], 252);
109 str[252] = 0;
110 }
111 cx18_mb_ack(cx, mb);
112 CX18_DEBUG_INFO("%x %s\n", mb->args[0], str);
113 p = strchr(str, '.');
114 if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
115 CX18_INFO("FW version: %s\n", p - 1);
116}
117
118static void epu_cmd(struct cx18 *cx, u32 sw1)
119{
120 struct cx18_mailbox mb;
121
122 if (sw1 & IRQ_CPU_TO_EPU) {
123 cx18_memcpy_fromio(cx, &mb, &cx->scb->cpu2epu_mb, sizeof(mb));
124 mb.error = 0;
125
126 switch (mb.cmd) {
127 case CX18_EPU_DMA_DONE:
128 epu_dma_done(cx, &mb);
129 break;
130 case CX18_EPU_DEBUG:
131 epu_debug(cx, &mb);
132 break;
133 default:
134 CX18_WARN("Unknown CPU_TO_EPU mailbox command %#08x\n",
135 mb.cmd);
136 break;
137 }
138 }
139
140 if (sw1 & IRQ_APU_TO_EPU) {
141 cx18_memcpy_fromio(cx, &mb, &cx->scb->apu2epu_mb, sizeof(mb));
142 CX18_WARN("Unknown APU_TO_EPU mailbox command %#08x\n", mb.cmd);
143 }
144
145 if (sw1 & IRQ_HPU_TO_EPU) {
146 cx18_memcpy_fromio(cx, &mb, &cx->scb->hpu2epu_mb, sizeof(mb));
147 CX18_WARN("Unknown HPU_TO_EPU mailbox command %#08x\n", mb.cmd);
148 }
149}
150 28
151static void xpu_ack(struct cx18 *cx, u32 sw2) 29static void xpu_ack(struct cx18 *cx, u32 sw2)
152{ 30{
@@ -154,23 +32,24 @@ static void xpu_ack(struct cx18 *cx, u32 sw2)
154 wake_up(&cx->mb_cpu_waitq); 32 wake_up(&cx->mb_cpu_waitq);
155 if (sw2 & IRQ_APU_TO_EPU_ACK) 33 if (sw2 & IRQ_APU_TO_EPU_ACK)
156 wake_up(&cx->mb_apu_waitq); 34 wake_up(&cx->mb_apu_waitq);
157 if (sw2 & IRQ_HPU_TO_EPU_ACK) 35}
158 wake_up(&cx->mb_hpu_waitq); 36
37static void epu_cmd(struct cx18 *cx, u32 sw1)
38{
39 if (sw1 & IRQ_CPU_TO_EPU)
40 cx18_api_epu_cmd_irq(cx, CPU);
41 if (sw1 & IRQ_APU_TO_EPU)
42 cx18_api_epu_cmd_irq(cx, APU);
159} 43}
160 44
161irqreturn_t cx18_irq_handler(int irq, void *dev_id) 45irqreturn_t cx18_irq_handler(int irq, void *dev_id)
162{ 46{
163 struct cx18 *cx = (struct cx18 *)dev_id; 47 struct cx18 *cx = (struct cx18 *)dev_id;
164 u32 sw1, sw1_mask; 48 u32 sw1, sw2, hw2;
165 u32 sw2, sw2_mask;
166 u32 hw2, hw2_mask;
167 49
168 sw1_mask = cx18_read_reg(cx, SW1_INT_ENABLE_PCI); 50 sw1 = cx18_read_reg(cx, SW1_INT_STATUS) & cx->sw1_irq_mask;
169 sw1 = cx18_read_reg(cx, SW1_INT_STATUS) & sw1_mask; 51 sw2 = cx18_read_reg(cx, SW2_INT_STATUS) & cx->sw2_irq_mask;
170 sw2_mask = cx18_read_reg(cx, SW2_INT_ENABLE_PCI); 52 hw2 = cx18_read_reg(cx, HW2_INT_CLR_STATUS) & cx->hw2_irq_mask;
171 sw2 = cx18_read_reg(cx, SW2_INT_STATUS) & sw2_mask;
172 hw2_mask = cx18_read_reg(cx, HW2_INT_MASK5_PCI);
173 hw2 = cx18_read_reg(cx, HW2_INT_CLR_STATUS) & hw2_mask;
174 53
175 if (sw1) 54 if (sw1)
176 cx18_write_reg_expect(cx, sw1, SW1_INT_STATUS, ~sw1, sw1); 55 cx18_write_reg_expect(cx, sw1, SW1_INT_STATUS, ~sw1, sw1);
@@ -180,7 +59,15 @@ irqreturn_t cx18_irq_handler(int irq, void *dev_id)
180 cx18_write_reg_expect(cx, hw2, HW2_INT_CLR_STATUS, ~hw2, hw2); 59 cx18_write_reg_expect(cx, hw2, HW2_INT_CLR_STATUS, ~hw2, hw2);
181 60
182 if (sw1 || sw2 || hw2) 61 if (sw1 || sw2 || hw2)
183 CX18_DEBUG_HI_IRQ("SW1: %x SW2: %x HW2: %x\n", sw1, sw2, hw2); 62 CX18_DEBUG_HI_IRQ("received interrupts "
63 "SW1: %x SW2: %x HW2: %x\n", sw1, sw2, hw2);
64
65 /*
66 * SW1 responses have to happen first. The sending XPU times out the
67 * incoming mailboxes on us rather rapidly.
68 */
69 if (sw1)
70 epu_cmd(cx, sw1);
184 71
185 /* To do: interrupt-based I2C handling 72 /* To do: interrupt-based I2C handling
186 if (hw2 & (HW2_I2C1_INT|HW2_I2C2_INT)) { 73 if (hw2 & (HW2_I2C1_INT|HW2_I2C2_INT)) {
@@ -190,11 +77,5 @@ irqreturn_t cx18_irq_handler(int irq, void *dev_id)
190 if (sw2) 77 if (sw2)
191 xpu_ack(cx, sw2); 78 xpu_ack(cx, sw2);
192 79
193 if (sw1)
194 epu_cmd(cx, sw1);
195
196 if (test_and_clear_bit(CX18_F_I_HAVE_WORK, &cx->i_flags))
197 queue_work(cx->work_queue, &cx->work);
198
199 return (sw1 || sw2 || hw2) ? IRQ_HANDLED : IRQ_NONE; 80 return (sw1 || sw2 || hw2) ? IRQ_HANDLED : IRQ_NONE;
200} 81}
diff --git a/drivers/media/video/cx18/cx18-irq.h b/drivers/media/video/cx18/cx18-irq.h
index 6173ca3bc9e4..91f0b5278ef9 100644
--- a/drivers/media/video/cx18/cx18-irq.h
+++ b/drivers/media/video/cx18/cx18-irq.h
@@ -2,6 +2,7 @@
2 * cx18 interrupt handling 2 * cx18 interrupt handling
3 * 3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -28,8 +29,7 @@
28#define SW1_INT_ENABLE_PCI 0xc7311c 29#define SW1_INT_ENABLE_PCI 0xc7311c
29#define SW2_INT_SET 0xc73140 30#define SW2_INT_SET 0xc73140
30#define SW2_INT_STATUS 0xc73144 31#define SW2_INT_STATUS 0xc73144
32#define SW2_INT_ENABLE_CPU 0xc73158
31#define SW2_INT_ENABLE_PCI 0xc7315c 33#define SW2_INT_ENABLE_PCI 0xc7315c
32 34
33irqreturn_t cx18_irq_handler(int irq, void *dev_id); 35irqreturn_t cx18_irq_handler(int irq, void *dev_id);
34
35void cx18_work_handler(struct work_struct *work);
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index acff7dfb60df..de5e723fdf44 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -2,6 +2,7 @@
2 * cx18 mailbox functions 2 * cx18 mailbox functions
3 * 3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -26,15 +27,14 @@
26#include "cx18-scb.h" 27#include "cx18-scb.h"
27#include "cx18-irq.h" 28#include "cx18-irq.h"
28#include "cx18-mailbox.h" 29#include "cx18-mailbox.h"
30#include "cx18-queue.h"
31#include "cx18-streams.h"
32
33static const char *rpu_str[] = { "APU", "CPU", "EPU", "HPU" };
29 34
30#define API_FAST (1 << 2) /* Short timeout */ 35#define API_FAST (1 << 2) /* Short timeout */
31#define API_SLOW (1 << 3) /* Additional 300ms timeout */ 36#define API_SLOW (1 << 3) /* Additional 300ms timeout */
32 37
33#define APU 0
34#define CPU 1
35#define EPU 2
36#define HPU 3
37
38struct cx18_api_info { 38struct cx18_api_info {
39 u32 cmd; 39 u32 cmd;
40 u8 flags; /* Flags, see above */ 40 u8 flags; /* Flags, see above */
@@ -82,8 +82,9 @@ static const struct cx18_api_info api_info[] = {
82 API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS, 0), 82 API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS, 0),
83 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0), 83 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0),
84 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST), 84 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST),
85 API_ENTRY(CPU, CX18_APU_RESETAI, API_FAST),
86 API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, API_SLOW), 85 API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, API_SLOW),
86 API_ENTRY(APU, CX18_APU_RESETAI, 0),
87 API_ENTRY(CPU, CX18_CPU_DEBUG_PEEK32, 0),
87 API_ENTRY(0, 0, 0), 88 API_ENTRY(0, 0, 0),
88}; 89};
89 90
@@ -97,70 +98,175 @@ static const struct cx18_api_info *find_api_info(u32 cmd)
97 return NULL; 98 return NULL;
98} 99}
99 100
100static struct cx18_mailbox __iomem *cx18_mb_is_complete(struct cx18 *cx, int rpu, 101static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
101 u32 *state, u32 *irq, u32 *req)
102{ 102{
103 struct cx18_mailbox __iomem *mb = NULL; 103 char argstr[MAX_MB_ARGUMENTS*11+1];
104 int wait_count = 0; 104 char *p;
105 u32 ack; 105 int i;
106
107 switch (rpu) {
108 case APU:
109 mb = &cx->scb->epu2apu_mb;
110 *state = cx18_readl(cx, &cx->scb->apu_state);
111 *irq = cx18_readl(cx, &cx->scb->epu2apu_irq);
112 break;
113 106
114 case CPU: 107 if (!(cx18_debug & CX18_DBGFLG_API))
115 mb = &cx->scb->epu2cpu_mb; 108 return;
116 *state = cx18_readl(cx, &cx->scb->cpu_state);
117 *irq = cx18_readl(cx, &cx->scb->epu2cpu_irq);
118 break;
119 109
120 case HPU: 110 for (i = 0, p = argstr; i < MAX_MB_ARGUMENTS; i++, p += 11) {
121 mb = &cx->scb->epu2hpu_mb; 111 /* kernel snprintf() appends '\0' always */
122 *state = cx18_readl(cx, &cx->scb->hpu_state); 112 snprintf(p, 12, " %#010x", mb->args[i]);
123 *irq = cx18_readl(cx, &cx->scb->epu2hpu_irq);
124 break;
125 } 113 }
114 CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s"
115 "\n", name, mb->request, mb->ack, mb->cmd, mb->error, argstr);
116}
126 117
127 if (mb == NULL)
128 return mb;
129 118
130 do { 119/*
131 *req = cx18_readl(cx, &mb->request); 120 * Functions that run in a work_queue work handling context
132 ack = cx18_readl(cx, &mb->ack); 121 */
133 wait_count++;
134 } while (*req != ack && wait_count < 600);
135 122
136 if (*req == ack) { 123static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
137 (*req)++; 124{
138 if (*req == 0 || *req == 0xffffffff) 125 u32 handle, mdl_ack_count, id;
139 *req = 1; 126 struct cx18_mailbox *mb;
140 return mb; 127 struct cx18_mdl_ack *mdl_ack;
128 struct cx18_stream *s;
129 struct cx18_buffer *buf;
130 int i;
131
132 mb = &order->mb;
133 handle = mb->args[0];
134 s = cx18_handle_to_stream(cx, handle);
135
136 if (s == NULL) {
137 CX18_WARN("Got DMA done notification for unknown/inactive"
138 " handle %d, %s mailbox seq no %d\n", handle,
139 (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
140 "stale" : "good", mb->request);
141 return;
141 } 142 }
142 return NULL; 143
144 mdl_ack_count = mb->args[2];
145 mdl_ack = order->mdl_ack;
146 for (i = 0; i < mdl_ack_count; i++, mdl_ack++) {
147 id = mdl_ack->id;
148 /*
149 * Simple integrity check for processing a stale (and possibly
150 * inconsistent mailbox): make sure the buffer id is in the
151 * valid range for the stream.
152 *
153 * We go through the trouble of dealing with stale mailboxes
154 * because most of the time, the mailbox data is still valid and
155 * unchanged (and in practice the firmware ping-pongs the
156 * two mdl_ack buffers so mdl_acks are not stale).
157 *
158 * There are occasions when we get a half changed mailbox,
159 * which this check catches for a handle & id mismatch. If the
160 * handle and id do correspond, the worst case is that we
161 * completely lost the old buffer, but pick up the new buffer
162 * early (but the new mdl_ack is guaranteed to be good in this
163 * case as the firmware wouldn't point us to a new mdl_ack until
164 * it's filled in).
165 *
166 * cx18_queue_get buf() will detect the lost buffers
167 * and send them back to q_free for fw rotation eventually.
168 */
169 if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
170 !(id >= s->mdl_offset &&
171 id < (s->mdl_offset + s->buffers))) {
172 CX18_WARN("Fell behind! Ignoring stale mailbox with "
173 " inconsistent data. Lost buffer for mailbox "
174 "seq no %d\n", mb->request);
175 break;
176 }
177 buf = cx18_queue_get_buf(s, id, mdl_ack->data_used);
178
179 CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id);
180 if (buf == NULL) {
181 CX18_WARN("Could not find buf %d for stream %s\n",
182 id, s->name);
183 /* Put as many buffers as possible back into fw use */
184 cx18_stream_load_fw_queue(s);
185 continue;
186 }
187
188 if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) {
189 CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n",
190 buf->bytesused);
191 dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
192 buf->bytesused);
193 }
194 /* Put as many buffers as possible back into fw use */
195 cx18_stream_load_fw_queue(s);
196 /* Put back TS buffer, since it was removed from all queues */
197 if (s->type == CX18_ENC_STREAM_TYPE_TS)
198 cx18_stream_put_buf_fw(s, buf);
199 }
200 wake_up(&cx->dma_waitq);
201 if (s->id != -1)
202 wake_up(&s->waitq);
143} 203}
144 204
145long cx18_mb_ack(struct cx18 *cx, const struct cx18_mailbox *mb) 205static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order)
146{ 206{
147 const struct cx18_api_info *info = find_api_info(mb->cmd); 207 char *p;
148 struct cx18_mailbox __iomem *ack_mb; 208 char *str = order->str;
149 u32 ack_irq;
150 u8 rpu = CPU;
151 209
152 if (info == NULL && mb->cmd) { 210 CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str);
153 CX18_WARN("Cannot ack unknown command %x\n", mb->cmd); 211 p = strchr(str, '.');
154 return -EINVAL; 212 if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
155 } 213 CX18_INFO("FW version: %s\n", p - 1);
156 if (info) 214}
157 rpu = info->rpu;
158 215
159 switch (rpu) { 216static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order)
160 case HPU: 217{
161 ack_irq = IRQ_EPU_TO_HPU_ACK; 218 switch (order->rpu) {
162 ack_mb = &cx->scb->hpu2epu_mb; 219 case CPU:
220 {
221 switch (order->mb.cmd) {
222 case CX18_EPU_DMA_DONE:
223 epu_dma_done(cx, order);
224 break;
225 case CX18_EPU_DEBUG:
226 epu_debug(cx, order);
227 break;
228 default:
229 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
230 order->mb.cmd);
231 break;
232 }
163 break; 233 break;
234 }
235 case APU:
236 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
237 order->mb.cmd);
238 break;
239 default:
240 break;
241 }
242}
243
244static
245void free_epu_work_order(struct cx18 *cx, struct cx18_epu_work_order *order)
246{
247 atomic_set(&order->pending, 0);
248}
249
250void cx18_epu_work_handler(struct work_struct *work)
251{
252 struct cx18_epu_work_order *order =
253 container_of(work, struct cx18_epu_work_order, work);
254 struct cx18 *cx = order->cx;
255 epu_cmd(cx, order);
256 free_epu_work_order(cx, order);
257}
258
259
260/*
261 * Functions that run in an interrupt handling context
262 */
263
264static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
265{
266 struct cx18_mailbox __iomem *ack_mb;
267 u32 ack_irq, req;
268
269 switch (order->rpu) {
164 case APU: 270 case APU:
165 ack_irq = IRQ_EPU_TO_APU_ACK; 271 ack_irq = IRQ_EPU_TO_APU_ACK;
166 ack_mb = &cx->scb->apu2epu_mb; 272 ack_mb = &cx->scb->apu2epu_mb;
@@ -170,26 +276,197 @@ long cx18_mb_ack(struct cx18 *cx, const struct cx18_mailbox *mb)
170 ack_mb = &cx->scb->cpu2epu_mb; 276 ack_mb = &cx->scb->cpu2epu_mb;
171 break; 277 break;
172 default: 278 default:
173 CX18_WARN("Unknown RPU for command %x\n", mb->cmd); 279 CX18_WARN("Unhandled RPU (%d) for command %x ack\n",
174 return -EINVAL; 280 order->rpu, order->mb.cmd);
281 return;
175 } 282 }
176 283
177 cx18_setup_page(cx, SCB_OFFSET); 284 req = order->mb.request;
178 cx18_write_sync(cx, mb->request, &ack_mb->ack); 285 /* Don't ack if the RPU has gotten impatient and timed us out */
286 if (req != cx18_readl(cx, &ack_mb->request) ||
287 req == cx18_readl(cx, &ack_mb->ack)) {
288 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
289 "incoming %s to EPU mailbox (sequence no. %u) "
290 "while processing\n",
291 rpu_str[order->rpu], rpu_str[order->rpu], req);
292 order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
293 return;
294 }
295 cx18_writel(cx, req, &ack_mb->ack);
179 cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq); 296 cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq);
180 return 0; 297 return;
298}
299
300static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
301{
302 u32 handle, mdl_ack_offset, mdl_ack_count;
303 struct cx18_mailbox *mb;
304
305 mb = &order->mb;
306 handle = mb->args[0];
307 mdl_ack_offset = mb->args[1];
308 mdl_ack_count = mb->args[2];
309
310 if (handle == CX18_INVALID_TASK_HANDLE ||
311 mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) {
312 if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
313 mb_ack_irq(cx, order);
314 return -1;
315 }
316
317 cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
318 sizeof(struct cx18_mdl_ack) * mdl_ack_count);
319
320 if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
321 mb_ack_irq(cx, order);
322 return 1;
323}
324
325static
326int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
327{
328 u32 str_offset;
329 char *str = order->str;
330
331 str[0] = '\0';
332 str_offset = order->mb.args[1];
333 if (str_offset) {
334 cx18_setup_page(cx, str_offset);
335 cx18_memcpy_fromio(cx, str, cx->enc_mem + str_offset, 252);
336 str[252] = '\0';
337 cx18_setup_page(cx, SCB_OFFSET);
338 }
339
340 if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
341 mb_ack_irq(cx, order);
342
343 return str_offset ? 1 : 0;
181} 344}
182 345
346static inline
347int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
348{
349 int ret = -1;
350
351 switch (order->rpu) {
352 case CPU:
353 {
354 switch (order->mb.cmd) {
355 case CX18_EPU_DMA_DONE:
356 ret = epu_dma_done_irq(cx, order);
357 break;
358 case CX18_EPU_DEBUG:
359 ret = epu_debug_irq(cx, order);
360 break;
361 default:
362 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
363 order->mb.cmd);
364 break;
365 }
366 break;
367 }
368 case APU:
369 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
370 order->mb.cmd);
371 break;
372 default:
373 break;
374 }
375 return ret;
376}
377
378static inline
379struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx)
380{
381 int i;
382 struct cx18_epu_work_order *order = NULL;
383
384 for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) {
385 /*
386 * We only need "pending" atomic to inspect its contents,
387 * and need not do a check and set because:
388 * 1. Any work handler thread only clears "pending" and only
389 * on one, particular work order at a time, per handler thread.
390 * 2. "pending" is only set here, and we're serialized because
391 * we're called in an IRQ handler context.
392 */
393 if (atomic_read(&cx->epu_work_order[i].pending) == 0) {
394 order = &cx->epu_work_order[i];
395 atomic_set(&order->pending, 1);
396 break;
397 }
398 }
399 return order;
400}
401
402void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
403{
404 struct cx18_mailbox __iomem *mb;
405 struct cx18_mailbox *order_mb;
406 struct cx18_epu_work_order *order;
407 int submit;
408
409 switch (rpu) {
410 case CPU:
411 mb = &cx->scb->cpu2epu_mb;
412 break;
413 case APU:
414 mb = &cx->scb->apu2epu_mb;
415 break;
416 default:
417 return;
418 }
419
420 order = alloc_epu_work_order_irq(cx);
421 if (order == NULL) {
422 CX18_WARN("Unable to find blank work order form to schedule "
423 "incoming mailbox command processing\n");
424 return;
425 }
426
427 order->flags = 0;
428 order->rpu = rpu;
429 order_mb = &order->mb;
430
431 /* mb->cmd and mb->args[0] through mb->args[2] */
432 cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32));
433 /* mb->request and mb->ack. N.B. we want to read mb->ack last */
434 cx18_memcpy_fromio(cx, &order_mb->request, &mb->request,
435 2 * sizeof(u32));
436
437 if (order_mb->request == order_mb->ack) {
438 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
439 "incoming %s to EPU mailbox (sequence no. %u)"
440 "\n",
441 rpu_str[rpu], rpu_str[rpu], order_mb->request);
442 dump_mb(cx, order_mb, "incoming");
443 order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
444 }
445
446 /*
447 * Individual EPU command processing is responsible for ack-ing
448 * a non-stale mailbox as soon as possible
449 */
450 submit = epu_cmd_irq(cx, order);
451 if (submit > 0) {
452 queue_work(cx->work_queue, &order->work);
453 }
454}
455
456
457/*
458 * Functions called from a non-interrupt, non work_queue context
459 */
183 460
184static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[]) 461static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
185{ 462{
186 const struct cx18_api_info *info = find_api_info(cmd); 463 const struct cx18_api_info *info = find_api_info(cmd);
187 u32 state = 0, irq = 0, req, oldreq, err; 464 u32 state, irq, req, ack, err;
188 struct cx18_mailbox __iomem *mb; 465 struct cx18_mailbox __iomem *mb;
466 u32 __iomem *xpu_state;
189 wait_queue_head_t *waitq; 467 wait_queue_head_t *waitq;
190 int timeout = 100; 468 struct mutex *mb_lock;
191 int cnt = 0; 469 long int timeout, ret;
192 int sig = 0;
193 int i; 470 int i;
194 471
195 if (info == NULL) { 472 if (info == NULL) {
@@ -201,50 +478,104 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
201 CX18_DEBUG_HI_API("%s\n", info->name); 478 CX18_DEBUG_HI_API("%s\n", info->name);
202 else 479 else
203 CX18_DEBUG_API("%s\n", info->name); 480 CX18_DEBUG_API("%s\n", info->name);
204 cx18_setup_page(cx, SCB_OFFSET);
205 mb = cx18_mb_is_complete(cx, info->rpu, &state, &irq, &req);
206 481
207 if (mb == NULL) { 482 switch (info->rpu) {
208 CX18_ERR("mb %s busy\n", info->name); 483 case APU:
209 return -EBUSY; 484 waitq = &cx->mb_apu_waitq;
485 mb_lock = &cx->epu2apu_mb_lock;
486 irq = IRQ_EPU_TO_APU;
487 mb = &cx->scb->epu2apu_mb;
488 xpu_state = &cx->scb->apu_state;
489 break;
490 case CPU:
491 waitq = &cx->mb_cpu_waitq;
492 mb_lock = &cx->epu2cpu_mb_lock;
493 irq = IRQ_EPU_TO_CPU;
494 mb = &cx->scb->epu2cpu_mb;
495 xpu_state = &cx->scb->cpu_state;
496 break;
497 default:
498 CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu);
499 return -EINVAL;
210 } 500 }
211 501
212 oldreq = req - 1; 502 mutex_lock(mb_lock);
503 /*
504 * Wait for an in-use mailbox to complete
505 *
506 * If the XPU is responding with Ack's, the mailbox shouldn't be in
507 * a busy state, since we serialize access to it on our end.
508 *
509 * If the wait for ack after sending a previous command was interrupted
510 * by a signal, we may get here and find a busy mailbox. After waiting,
511 * mark it "not busy" from our end, if the XPU hasn't ack'ed it still.
512 */
513 state = cx18_readl(cx, xpu_state);
514 req = cx18_readl(cx, &mb->request);
515 timeout = msecs_to_jiffies(10);
516 ret = wait_event_timeout(*waitq,
517 (ack = cx18_readl(cx, &mb->ack)) == req,
518 timeout);
519 if (req != ack) {
520 /* waited long enough, make the mbox "not busy" from our end */
521 cx18_writel(cx, req, &mb->ack);
522 CX18_ERR("mbox was found stuck busy when setting up for %s; "
523 "clearing busy and trying to proceed\n", info->name);
524 } else if (ret != timeout)
525 CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n",
526 jiffies_to_msecs(timeout-ret));
527
528 /* Build the outgoing mailbox */
529 req = ((req & 0xfffffffe) == 0xfffffffe) ? 1 : req + 1;
530
213 cx18_writel(cx, cmd, &mb->cmd); 531 cx18_writel(cx, cmd, &mb->cmd);
214 for (i = 0; i < args; i++) 532 for (i = 0; i < args; i++)
215 cx18_writel(cx, data[i], &mb->args[i]); 533 cx18_writel(cx, data[i], &mb->args[i]);
216 cx18_writel(cx, 0, &mb->error); 534 cx18_writel(cx, 0, &mb->error);
217 cx18_writel(cx, req, &mb->request); 535 cx18_writel(cx, req, &mb->request);
536 cx18_writel(cx, req - 1, &mb->ack); /* ensure ack & req are distinct */
218 537
219 switch (info->rpu) { 538 /*
220 case APU: waitq = &cx->mb_apu_waitq; break; 539 * Notify the XPU and wait for it to send an Ack back
221 case CPU: waitq = &cx->mb_cpu_waitq; break; 540 */
222 case EPU: waitq = &cx->mb_epu_waitq; break; 541 timeout = msecs_to_jiffies((info->flags & API_FAST) ? 10 : 20);
223 case HPU: waitq = &cx->mb_hpu_waitq; break; 542
224 default: return -EINVAL; 543 CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n",
225 } 544 irq, info->name);
226 if (info->flags & API_FAST)
227 timeout /= 2;
228 cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq); 545 cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq);
229 546
230 while (!sig && cx18_readl(cx, &mb->ack) != cx18_readl(cx, &mb->request) 547 ret = wait_event_timeout(
231 && cnt < 660) { 548 *waitq,
232 if (cnt > 200 && !in_atomic()) 549 cx18_readl(cx, &mb->ack) == cx18_readl(cx, &mb->request),
233 sig = cx18_msleep_timeout(10, 1); 550 timeout);
234 cnt++; 551
235 } 552 if (ret == 0) {
236 if (sig) 553 /* Timed out */
237 return -EINTR; 554 mutex_unlock(mb_lock);
238 if (cnt == 660) { 555 CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU "
239 cx18_writel(cx, oldreq, &mb->request); 556 "acknowledgement\n",
240 CX18_ERR("mb %s failed\n", info->name); 557 info->name, jiffies_to_msecs(timeout));
241 return -EINVAL; 558 return -EINVAL;
242 } 559 }
560
561 if (ret != timeout)
562 CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
563 jiffies_to_msecs(timeout-ret), info->name);
564
565 /* Collect data returned by the XPU */
243 for (i = 0; i < MAX_MB_ARGUMENTS; i++) 566 for (i = 0; i < MAX_MB_ARGUMENTS; i++)
244 data[i] = cx18_readl(cx, &mb->args[i]); 567 data[i] = cx18_readl(cx, &mb->args[i]);
245 err = cx18_readl(cx, &mb->error); 568 err = cx18_readl(cx, &mb->error);
246 if (!in_atomic() && (info->flags & API_SLOW)) 569 mutex_unlock(mb_lock);
570
571 /*
572 * Wait for XPU to perform extra actions for the caller in some cases.
573 * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all buffers
574 * back in a burst shortly thereafter
575 */
576 if (info->flags & API_SLOW)
247 cx18_msleep_timeout(300, 0); 577 cx18_msleep_timeout(300, 0);
578
248 if (err) 579 if (err)
249 CX18_DEBUG_API("mailbox error %08x for command %s\n", err, 580 CX18_DEBUG_API("mailbox error %08x for command %s\n", err,
250 info->name); 581 info->name);
@@ -253,12 +584,7 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
253 584
254int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[]) 585int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[])
255{ 586{
256 int res = cx18_api_call(cx, cmd, args, data); 587 return cx18_api_call(cx, cmd, args, data);
257
258 /* Allow a single retry, probably already too late though.
259 If there is no free mailbox then that is usually an indication
260 of a more serious problem. */
261 return (res == -EBUSY) ? cx18_api_call(cx, cmd, args, data) : res;
262} 588}
263 589
264static int cx18_set_filter_param(struct cx18_stream *s) 590static int cx18_set_filter_param(struct cx18_stream *s)
@@ -281,8 +607,9 @@ static int cx18_set_filter_param(struct cx18_stream *s)
281int cx18_api_func(void *priv, u32 cmd, int in, int out, 607int cx18_api_func(void *priv, u32 cmd, int in, int out,
282 u32 data[CX2341X_MBOX_MAX_DATA]) 608 u32 data[CX2341X_MBOX_MAX_DATA])
283{ 609{
284 struct cx18 *cx = priv; 610 struct cx18_api_func_private *api_priv = priv;
285 struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_MPG]; 611 struct cx18 *cx = api_priv->cx;
612 struct cx18_stream *s = api_priv->s;
286 613
287 switch (cmd) { 614 switch (cmd) {
288 case CX2341X_ENC_SET_OUTPUT_PORT: 615 case CX2341X_ENC_SET_OUTPUT_PORT:
diff --git a/drivers/media/video/cx18/cx18-mailbox.h b/drivers/media/video/cx18/cx18-mailbox.h
index d995641536b3..ce2b6686aa00 100644
--- a/drivers/media/video/cx18/cx18-mailbox.h
+++ b/drivers/media/video/cx18/cx18-mailbox.h
@@ -2,6 +2,7 @@
2 * cx18 mailbox functions 2 * cx18 mailbox functions
3 * 3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -30,8 +31,24 @@
30#define MB_RESERVED_HANDLE_0 0 31#define MB_RESERVED_HANDLE_0 0
31#define MB_RESERVED_HANDLE_1 0xFFFFFFFF 32#define MB_RESERVED_HANDLE_1 0xFFFFFFFF
32 33
34#define APU 0
35#define CPU 1
36#define EPU 2
37#define HPU 3
38
33struct cx18; 39struct cx18;
34 40
41/*
42 * This structure is used by CPU to provide completed buffers information
43 * Its structure is dictrated by the layout of the SCB, required by the
44 * firmware, but its defintion needs to be here, instead of in cx18-scb.h,
45 * for mailbox work order scheduling
46 */
47struct cx18_mdl_ack {
48 u32 id; /* ID of a completed MDL */
49 u32 data_used; /* Total data filled in the MDL for buffer 'id' */
50};
51
35/* The cx18_mailbox struct is the mailbox structure which is used for passing 52/* The cx18_mailbox struct is the mailbox structure which is used for passing
36 messages between processors */ 53 messages between processors */
37struct cx18_mailbox { 54struct cx18_mailbox {
@@ -62,12 +79,22 @@ struct cx18_mailbox {
62 u32 error; 79 u32 error;
63}; 80};
64 81
82struct cx18_stream;
83
84struct cx18_api_func_private {
85 struct cx18 *cx;
86 struct cx18_stream *s;
87};
88
65int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[]); 89int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[]);
66int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS], u32 cmd, 90int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS], u32 cmd,
67 int args, ...); 91 int args, ...);
68int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...); 92int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...);
69int cx18_api_func(void *priv, u32 cmd, int in, int out, 93int cx18_api_func(void *priv, u32 cmd, int in, int out,
70 u32 data[CX2341X_MBOX_MAX_DATA]); 94 u32 data[CX2341X_MBOX_MAX_DATA]);
71long cx18_mb_ack(struct cx18 *cx, const struct cx18_mailbox *mb); 95
96void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu);
97
98void cx18_epu_work_handler(struct work_struct *work);
72 99
73#endif 100#endif
diff --git a/drivers/media/video/cx18/cx18-queue.c b/drivers/media/video/cx18/cx18-queue.c
index 174682c2582f..8d9441e88c4e 100644
--- a/drivers/media/video/cx18/cx18-queue.c
+++ b/drivers/media/video/cx18/cx18-queue.c
@@ -4,6 +4,7 @@
4 * Derived from ivtv-queue.c 4 * Derived from ivtv-queue.c
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -41,91 +42,126 @@ void cx18_queue_init(struct cx18_queue *q)
41 q->bytesused = 0; 42 q->bytesused = 0;
42} 43}
43 44
44void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf, 45struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
45 struct cx18_queue *q) 46 struct cx18_queue *q, int to_front)
46{ 47{
47 unsigned long flags = 0; 48 /* clear the buffer if it is not to be enqueued to the full queue */
48 49 if (q != &s->q_full) {
49 /* clear the buffer if it is going to be enqueued to the free queue */
50 if (q == &s->q_free) {
51 buf->bytesused = 0; 50 buf->bytesused = 0;
52 buf->readpos = 0; 51 buf->readpos = 0;
53 buf->b_flags = 0; 52 buf->b_flags = 0;
53 buf->skipped = 0;
54 } 54 }
55 spin_lock_irqsave(&s->qlock, flags); 55
56 list_add_tail(&buf->list, &q->list); 56 mutex_lock(&s->qlock);
57 atomic_inc(&q->buffers); 57
58 /* q_busy is restricted to a max buffer count imposed by firmware */
59 if (q == &s->q_busy &&
60 atomic_read(&q->buffers) >= CX18_MAX_FW_MDLS_PER_STREAM)
61 q = &s->q_free;
62
63 if (to_front)
64 list_add(&buf->list, &q->list); /* LIFO */
65 else
66 list_add_tail(&buf->list, &q->list); /* FIFO */
58 q->bytesused += buf->bytesused - buf->readpos; 67 q->bytesused += buf->bytesused - buf->readpos;
59 spin_unlock_irqrestore(&s->qlock, flags); 68 atomic_inc(&q->buffers);
69
70 mutex_unlock(&s->qlock);
71 return q;
60} 72}
61 73
62struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q) 74struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
63{ 75{
64 struct cx18_buffer *buf = NULL; 76 struct cx18_buffer *buf = NULL;
65 unsigned long flags = 0;
66 77
67 spin_lock_irqsave(&s->qlock, flags); 78 mutex_lock(&s->qlock);
68 if (!list_empty(&q->list)) { 79 if (!list_empty(&q->list)) {
69 buf = list_entry(q->list.next, struct cx18_buffer, list); 80 buf = list_first_entry(&q->list, struct cx18_buffer, list);
70 list_del_init(q->list.next); 81 list_del_init(&buf->list);
71 atomic_dec(&q->buffers);
72 q->bytesused -= buf->bytesused - buf->readpos; 82 q->bytesused -= buf->bytesused - buf->readpos;
83 buf->skipped = 0;
84 atomic_dec(&q->buffers);
73 } 85 }
74 spin_unlock_irqrestore(&s->qlock, flags); 86 mutex_unlock(&s->qlock);
75 return buf; 87 return buf;
76} 88}
77 89
78struct cx18_buffer *cx18_queue_get_buf_irq(struct cx18_stream *s, u32 id, 90struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
79 u32 bytesused) 91 u32 bytesused)
80{ 92{
81 struct cx18 *cx = s->cx; 93 struct cx18 *cx = s->cx;
82 struct list_head *p; 94 struct cx18_buffer *buf;
83 95 struct cx18_buffer *tmp;
84 spin_lock(&s->qlock); 96 struct cx18_buffer *ret = NULL;
85 list_for_each(p, &s->q_free.list) { 97
86 struct cx18_buffer *buf = 98 mutex_lock(&s->qlock);
87 list_entry(p, struct cx18_buffer, list); 99 list_for_each_entry_safe(buf, tmp, &s->q_busy.list, list) {
88 100 if (buf->id != id) {
89 if (buf->id != id) 101 buf->skipped++;
102 if (buf->skipped >= atomic_read(&s->q_busy.buffers)-1) {
103 /* buffer must have fallen out of rotation */
104 CX18_WARN("Skipped %s, buffer %d, %d "
105 "times - it must have dropped out of "
106 "rotation\n", s->name, buf->id,
107 buf->skipped);
108 /* move it to q_free */
109 list_move_tail(&buf->list, &s->q_free.list);
110 buf->bytesused = buf->readpos = buf->b_flags =
111 buf->skipped = 0;
112 atomic_dec(&s->q_busy.buffers);
113 atomic_inc(&s->q_free.buffers);
114 }
90 continue; 115 continue;
116 }
91 117
92 buf->bytesused = bytesused; 118 buf->bytesused = bytesused;
93 atomic_dec(&s->q_free.buffers); 119 /* Sync the buffer before we release the qlock */
94 atomic_inc(&s->q_full.buffers); 120 cx18_buf_sync_for_cpu(s, buf);
95 s->q_full.bytesused += buf->bytesused; 121 if (s->type == CX18_ENC_STREAM_TYPE_TS) {
96 list_move_tail(&buf->list, &s->q_full.list); 122 /*
123 * TS doesn't use q_full. As we pull the buffer off of
124 * the queue here, the caller will have to put it back.
125 */
126 list_del_init(&buf->list);
127 } else {
128 /* Move buffer from q_busy to q_full */
129 list_move_tail(&buf->list, &s->q_full.list);
130 set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
131 s->q_full.bytesused += buf->bytesused;
132 atomic_inc(&s->q_full.buffers);
133 }
134 atomic_dec(&s->q_busy.buffers);
97 135
98 spin_unlock(&s->qlock); 136 ret = buf;
99 return buf; 137 break;
100 } 138 }
101 spin_unlock(&s->qlock); 139 mutex_unlock(&s->qlock);
102 CX18_ERR("Cannot find buffer %d for stream %s\n", id, s->name); 140 return ret;
103 return NULL;
104} 141}
105 142
106/* Move all buffers of a queue to q_free, while flushing the buffers */ 143/* Move all buffers of a queue to q_free, while flushing the buffers */
107static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q) 144static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
108{ 145{
109 unsigned long flags;
110 struct cx18_buffer *buf; 146 struct cx18_buffer *buf;
111 147
112 if (q == &s->q_free) 148 if (q == &s->q_free)
113 return; 149 return;
114 150
115 spin_lock_irqsave(&s->qlock, flags); 151 mutex_lock(&s->qlock);
116 while (!list_empty(&q->list)) { 152 while (!list_empty(&q->list)) {
117 buf = list_entry(q->list.next, struct cx18_buffer, list); 153 buf = list_first_entry(&q->list, struct cx18_buffer, list);
118 list_move_tail(q->list.next, &s->q_free.list); 154 list_move_tail(&buf->list, &s->q_free.list);
119 buf->bytesused = buf->readpos = buf->b_flags = 0; 155 buf->bytesused = buf->readpos = buf->b_flags = buf->skipped = 0;
120 atomic_inc(&s->q_free.buffers); 156 atomic_inc(&s->q_free.buffers);
121 } 157 }
122 cx18_queue_init(q); 158 cx18_queue_init(q);
123 spin_unlock_irqrestore(&s->qlock, flags); 159 mutex_unlock(&s->qlock);
124} 160}
125 161
126void cx18_flush_queues(struct cx18_stream *s) 162void cx18_flush_queues(struct cx18_stream *s)
127{ 163{
128 cx18_queue_flush(s, &s->q_io); 164 cx18_queue_flush(s, &s->q_busy);
129 cx18_queue_flush(s, &s->q_full); 165 cx18_queue_flush(s, &s->q_full);
130} 166}
131 167
diff --git a/drivers/media/video/cx18/cx18-queue.h b/drivers/media/video/cx18/cx18-queue.h
index 7f93bb13c09f..456cec3bc28f 100644
--- a/drivers/media/video/cx18/cx18-queue.h
+++ b/drivers/media/video/cx18/cx18-queue.h
@@ -4,6 +4,7 @@
4 * Derived from ivtv-queue.h 4 * Derived from ivtv-queue.h
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -42,11 +43,26 @@ static inline void cx18_buf_sync_for_device(struct cx18_stream *s,
42void cx18_buf_swap(struct cx18_buffer *buf); 43void cx18_buf_swap(struct cx18_buffer *buf);
43 44
44/* cx18_queue utility functions */ 45/* cx18_queue utility functions */
46struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
47 struct cx18_queue *q, int to_front);
48
49static inline
50struct cx18_queue *cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
51 struct cx18_queue *q)
52{
53 return _cx18_enqueue(s, buf, q, 0); /* FIFO */
54}
55
56static inline
57struct cx18_queue *cx18_push(struct cx18_stream *s, struct cx18_buffer *buf,
58 struct cx18_queue *q)
59{
60 return _cx18_enqueue(s, buf, q, 1); /* LIFO */
61}
62
45void cx18_queue_init(struct cx18_queue *q); 63void cx18_queue_init(struct cx18_queue *q);
46void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
47 struct cx18_queue *q);
48struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q); 64struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q);
49struct cx18_buffer *cx18_queue_get_buf_irq(struct cx18_stream *s, u32 id, 65struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
50 u32 bytesused); 66 u32 bytesused);
51void cx18_flush_queues(struct cx18_stream *s); 67void cx18_flush_queues(struct cx18_stream *s);
52 68
diff --git a/drivers/media/video/cx18/cx18-scb.c b/drivers/media/video/cx18/cx18-scb.c
index f56d3772aa67..34b4d03c55cd 100644
--- a/drivers/media/video/cx18/cx18-scb.c
+++ b/drivers/media/video/cx18/cx18-scb.c
@@ -2,6 +2,7 @@
2 * cx18 System Control Block initialization 2 * cx18 System Control Block initialization
3 * 3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -117,6 +118,5 @@ void cx18_init_scb(struct cx18 *cx)
117 cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu_state), 118 cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu_state),
118 &cx->scb->ipc_offset); 119 &cx->scb->ipc_offset);
119 120
120 cx18_writel(cx, 1, &cx->scb->hpu_state);
121 cx18_writel(cx, 1, &cx->scb->epu_state); 121 cx18_writel(cx, 1, &cx->scb->epu_state);
122} 122}
diff --git a/drivers/media/video/cx18/cx18-scb.h b/drivers/media/video/cx18/cx18-scb.h
index 594713bbed68..1dc1c431f5a1 100644
--- a/drivers/media/video/cx18/cx18-scb.h
+++ b/drivers/media/video/cx18/cx18-scb.h
@@ -2,6 +2,7 @@
2 * cx18 System Control Block initialization 2 * cx18 System Control Block initialization
3 * 3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -85,12 +86,6 @@ struct cx18_mdl {
85 u32 length; /* Length of the buffer segment */ 86 u32 length; /* Length of the buffer segment */
86}; 87};
87 88
88/* This structure is used by CPU to provide completed buffers information */
89struct cx18_mdl_ack {
90 u32 id; /* ID of a completed MDL */
91 u32 data_used; /* Total data filled in the MDL for buffer 'id' */
92};
93
94struct cx18_scb { 89struct cx18_scb {
95 /* These fields form the System Control Block which is used at boot time 90 /* These fields form the System Control Block which is used at boot time
96 for localizing the IPC data as well as the code positions for all 91 for localizing the IPC data as well as the code positions for all
@@ -276,7 +271,7 @@ struct cx18_scb {
276 struct cx18_mailbox hpu2epu_mb; 271 struct cx18_mailbox hpu2epu_mb;
277 struct cx18_mailbox ppu2epu_mb; 272 struct cx18_mailbox ppu2epu_mb;
278 273
279 struct cx18_mdl_ack cpu_mdl_ack[CX18_MAX_STREAMS][2]; 274 struct cx18_mdl_ack cpu_mdl_ack[CX18_MAX_STREAMS][CX18_MAX_MDL_ACKS];
280 struct cx18_mdl cpu_mdl[1]; 275 struct cx18_mdl cpu_mdl[1];
281}; 276};
282 277
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index e5ff7705b7a1..63c336c95ff5 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -4,6 +4,7 @@
4 * Derived from ivtv-streams.c 4 * Derived from ivtv-streams.c
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -110,7 +111,6 @@ static void cx18_stream_init(struct cx18 *cx, int type)
110{ 111{
111 struct cx18_stream *s = &cx->streams[type]; 112 struct cx18_stream *s = &cx->streams[type];
112 struct video_device *dev = s->v4l2dev; 113 struct video_device *dev = s->v4l2dev;
113 u32 max_size = cx->options.megabytes[type] * 1024 * 1024;
114 114
115 /* we need to keep v4l2dev, so restore it afterwards */ 115 /* we need to keep v4l2dev, so restore it afterwards */
116 memset(s, 0, sizeof(*s)); 116 memset(s, 0, sizeof(*s));
@@ -123,21 +123,15 @@ static void cx18_stream_init(struct cx18 *cx, int type)
123 s->handle = CX18_INVALID_TASK_HANDLE; 123 s->handle = CX18_INVALID_TASK_HANDLE;
124 124
125 s->dma = cx18_stream_info[type].dma; 125 s->dma = cx18_stream_info[type].dma;
126 s->buffers = cx->stream_buffers[type];
126 s->buf_size = cx->stream_buf_size[type]; 127 s->buf_size = cx->stream_buf_size[type];
127 if (s->buf_size) 128
128 s->buffers = max_size / s->buf_size; 129 mutex_init(&s->qlock);
129 if (s->buffers > 63) {
130 /* Each stream has a maximum of 63 buffers,
131 ensure we do not exceed that. */
132 s->buffers = 63;
133 s->buf_size = (max_size / s->buffers) & ~0xfff;
134 }
135 spin_lock_init(&s->qlock);
136 init_waitqueue_head(&s->waitq); 130 init_waitqueue_head(&s->waitq);
137 s->id = -1; 131 s->id = -1;
138 cx18_queue_init(&s->q_free); 132 cx18_queue_init(&s->q_free);
133 cx18_queue_init(&s->q_busy);
139 cx18_queue_init(&s->q_full); 134 cx18_queue_init(&s->q_full);
140 cx18_queue_init(&s->q_io);
141} 135}
142 136
143static int cx18_prep_dev(struct cx18 *cx, int type) 137static int cx18_prep_dev(struct cx18 *cx, int type)
@@ -167,7 +161,7 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
167 /* User explicitly selected 0 buffers for these streams, so don't 161 /* User explicitly selected 0 buffers for these streams, so don't
168 create them. */ 162 create them. */
169 if (cx18_stream_info[type].dma != PCI_DMA_NONE && 163 if (cx18_stream_info[type].dma != PCI_DMA_NONE &&
170 cx->options.megabytes[type] == 0) { 164 cx->stream_buffers[type] == 0) {
171 CX18_INFO("Disabled %s device\n", cx18_stream_info[type].name); 165 CX18_INFO("Disabled %s device\n", cx18_stream_info[type].name);
172 return 0; 166 return 0;
173 } 167 }
@@ -267,8 +261,9 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
267 261
268 switch (vfl_type) { 262 switch (vfl_type) {
269 case VFL_TYPE_GRABBER: 263 case VFL_TYPE_GRABBER:
270 CX18_INFO("Registered device video%d for %s (%d MB)\n", 264 CX18_INFO("Registered device video%d for %s (%d x %d kB)\n",
271 num, s->name, cx->options.megabytes[type]); 265 num, s->name, cx->stream_buffers[type],
266 cx->stream_buf_size[type]/1024);
272 break; 267 break;
273 268
274 case VFL_TYPE_RADIO: 269 case VFL_TYPE_RADIO:
@@ -277,10 +272,11 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
277 break; 272 break;
278 273
279 case VFL_TYPE_VBI: 274 case VFL_TYPE_VBI:
280 if (cx->options.megabytes[type]) 275 if (cx->stream_buffers[type])
281 CX18_INFO("Registered device vbi%d for %s (%d MB)\n", 276 CX18_INFO("Registered device vbi%d for %s "
282 num, 277 "(%d x %d bytes)\n",
283 s->name, cx->options.megabytes[type]); 278 num, s->name, cx->stream_buffers[type],
279 cx->stream_buf_size[type]);
284 else 280 else
285 CX18_INFO("Registered device vbi%d for %s\n", 281 CX18_INFO("Registered device vbi%d for %s\n",
286 num, s->name); 282 num, s->name);
@@ -344,7 +340,7 @@ void cx18_streams_cleanup(struct cx18 *cx, int unregister)
344static void cx18_vbi_setup(struct cx18_stream *s) 340static void cx18_vbi_setup(struct cx18_stream *s)
345{ 341{
346 struct cx18 *cx = s->cx; 342 struct cx18 *cx = s->cx;
347 int raw = cx->vbi.sliced_in->service_set == 0; 343 int raw = cx18_raw_vbi(cx);
348 u32 data[CX2341X_MBOX_MAX_DATA]; 344 u32 data[CX2341X_MBOX_MAX_DATA];
349 int lines; 345 int lines;
350 346
@@ -362,8 +358,7 @@ static void cx18_vbi_setup(struct cx18_stream *s)
362 cx18_av_cmd(cx, VIDIOC_S_FMT, &cx->vbi.in); 358 cx18_av_cmd(cx, VIDIOC_S_FMT, &cx->vbi.in);
363 359
364 /* determine number of lines and total number of VBI bytes. 360 /* determine number of lines and total number of VBI bytes.
365 A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1 361 A raw line takes 1444 bytes: 4 byte SAV code + 2 * 720
366 The '- 1' byte is probably an unused U or V byte. Or something...
367 A sliced line takes 51 bytes: 4 byte frame header, 4 byte internal 362 A sliced line takes 51 bytes: 4 byte frame header, 4 byte internal
368 header, 42 data bytes + checksum (to be confirmed) */ 363 header, 42 data bytes + checksum (to be confirmed) */
369 if (raw) { 364 if (raw) {
@@ -381,14 +376,15 @@ static void cx18_vbi_setup(struct cx18_stream *s)
381 /* Lines per field */ 376 /* Lines per field */
382 data[1] = (lines / 2) | ((lines / 2) << 16); 377 data[1] = (lines / 2) | ((lines / 2) << 16);
383 /* bytes per line */ 378 /* bytes per line */
384 data[2] = (raw ? cx->vbi.raw_size : cx->vbi.sliced_size); 379 data[2] = (raw ? cx->vbi.raw_decoder_line_size
380 : cx->vbi.sliced_decoder_line_size);
385 /* Every X number of frames a VBI interrupt arrives 381 /* Every X number of frames a VBI interrupt arrives
386 (frames as in 25 or 30 fps) */ 382 (frames as in 25 or 30 fps) */
387 data[3] = 1; 383 data[3] = 1;
388 /* Setup VBI for the cx25840 digitizer */ 384 /* Setup VBI for the cx25840 digitizer */
389 if (raw) { 385 if (raw) {
390 data[4] = 0x20602060; 386 data[4] = 0x20602060;
391 data[5] = 0x30703070; 387 data[5] = 0x307090d0;
392 } else { 388 } else {
393 data[4] = 0xB0F0B0F0; 389 data[4] = 0xB0F0B0F0;
394 data[5] = 0xA0E0A0E0; 390 data[5] = 0xA0E0A0E0;
@@ -401,11 +397,52 @@ static void cx18_vbi_setup(struct cx18_stream *s)
401 cx18_api(cx, CX18_CPU_SET_RAW_VBI_PARAM, 6, data); 397 cx18_api(cx, CX18_CPU_SET_RAW_VBI_PARAM, 6, data);
402} 398}
403 399
400struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s,
401 struct cx18_buffer *buf)
402{
403 struct cx18 *cx = s->cx;
404 struct cx18_queue *q;
405
406 /* Don't give it to the firmware, if we're not running a capture */
407 if (s->handle == CX18_INVALID_TASK_HANDLE ||
408 !test_bit(CX18_F_S_STREAMING, &s->s_flags))
409 return cx18_enqueue(s, buf, &s->q_free);
410
411 q = cx18_enqueue(s, buf, &s->q_busy);
412 if (q != &s->q_busy)
413 return q; /* The firmware has the max buffers it can handle */
414
415 cx18_buf_sync_for_device(s, buf);
416 cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
417 (void __iomem *) &cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
418 1, buf->id, s->buf_size);
419 return q;
420}
421
422void cx18_stream_load_fw_queue(struct cx18_stream *s)
423{
424 struct cx18_queue *q;
425 struct cx18_buffer *buf;
426
427 if (atomic_read(&s->q_free.buffers) == 0 ||
428 atomic_read(&s->q_busy.buffers) >= CX18_MAX_FW_MDLS_PER_STREAM)
429 return;
430
431 /* Move from q_free to q_busy notifying the firmware, until the limit */
432 do {
433 buf = cx18_dequeue(s, &s->q_free);
434 if (buf == NULL)
435 break;
436 q = cx18_stream_put_buf_fw(s, buf);
437 } while (atomic_read(&s->q_busy.buffers) < CX18_MAX_FW_MDLS_PER_STREAM
438 && q == &s->q_busy);
439}
440
404int cx18_start_v4l2_encode_stream(struct cx18_stream *s) 441int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
405{ 442{
406 u32 data[MAX_MB_ARGUMENTS]; 443 u32 data[MAX_MB_ARGUMENTS];
407 struct cx18 *cx = s->cx; 444 struct cx18 *cx = s->cx;
408 struct list_head *p; 445 struct cx18_buffer *buf;
409 int ts = 0; 446 int ts = 0;
410 int captype = 0; 447 int captype = 0;
411 448
@@ -434,8 +471,8 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
434 captype = CAPTURE_CHANNEL_TYPE_PCM; 471 captype = CAPTURE_CHANNEL_TYPE_PCM;
435 break; 472 break;
436 case CX18_ENC_STREAM_TYPE_VBI: 473 case CX18_ENC_STREAM_TYPE_VBI:
437 captype = cx->vbi.sliced_in->service_set ? 474 captype = cx18_raw_vbi(cx) ?
438 CAPTURE_CHANNEL_TYPE_SLICED_VBI : CAPTURE_CHANNEL_TYPE_VBI; 475 CAPTURE_CHANNEL_TYPE_VBI : CAPTURE_CHANNEL_TYPE_SLICED_VBI;
439 cx->vbi.frame = 0; 476 cx->vbi.frame = 0;
440 cx->vbi.inserted_frame = 0; 477 cx->vbi.inserted_frame = 0;
441 memset(cx->vbi.sliced_mpeg_size, 478 memset(cx->vbi.sliced_mpeg_size,
@@ -457,6 +494,8 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
457 cx18_vapi(cx, CX18_CPU_SET_CHANNEL_TYPE, 2, s->handle, captype); 494 cx18_vapi(cx, CX18_CPU_SET_CHANNEL_TYPE, 2, s->handle, captype);
458 495
459 if (atomic_read(&cx->ana_capturing) == 0 && !ts) { 496 if (atomic_read(&cx->ana_capturing) == 0 && !ts) {
497 struct cx18_api_func_private priv;
498
460 /* Stuff from Windows, we don't know what it is */ 499 /* Stuff from Windows, we don't know what it is */
461 cx18_vapi(cx, CX18_CPU_SET_VER_CROP_LINE, 2, s->handle, 0); 500 cx18_vapi(cx, CX18_CPU_SET_VER_CROP_LINE, 2, s->handle, 0);
462 cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 3, 1); 501 cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 3, 1);
@@ -476,7 +515,9 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
476 cx18_vapi_result(cx, data, CX18_CPU_SET_INDEXTABLE, 1, 0); 515 cx18_vapi_result(cx, data, CX18_CPU_SET_INDEXTABLE, 1, 0);
477 516
478 /* Setup API for Stream */ 517 /* Setup API for Stream */
479 cx2341x_update(cx, cx18_api_func, NULL, &cx->params); 518 priv.cx = cx;
519 priv.s = s;
520 cx2341x_update(&priv, cx18_api_func, NULL, &cx->params);
480 } 521 }
481 522
482 if (atomic_read(&cx->tot_capturing) == 0) { 523 if (atomic_read(&cx->tot_capturing) == 0) {
@@ -488,16 +529,17 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
488 (void __iomem *)&cx->scb->cpu_mdl_ack[s->type][0] - cx->enc_mem, 529 (void __iomem *)&cx->scb->cpu_mdl_ack[s->type][0] - cx->enc_mem,
489 (void __iomem *)&cx->scb->cpu_mdl_ack[s->type][1] - cx->enc_mem); 530 (void __iomem *)&cx->scb->cpu_mdl_ack[s->type][1] - cx->enc_mem);
490 531
491 list_for_each(p, &s->q_free.list) { 532 /* Init all the cpu_mdls for this stream */
492 struct cx18_buffer *buf = list_entry(p, struct cx18_buffer, list); 533 cx18_flush_queues(s);
493 534 mutex_lock(&s->qlock);
535 list_for_each_entry(buf, &s->q_free.list, list) {
494 cx18_writel(cx, buf->dma_handle, 536 cx18_writel(cx, buf->dma_handle,
495 &cx->scb->cpu_mdl[buf->id].paddr); 537 &cx->scb->cpu_mdl[buf->id].paddr);
496 cx18_writel(cx, s->buf_size, &cx->scb->cpu_mdl[buf->id].length); 538 cx18_writel(cx, s->buf_size, &cx->scb->cpu_mdl[buf->id].length);
497 cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
498 (void __iomem *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
499 1, buf->id, s->buf_size);
500 } 539 }
540 mutex_unlock(&s->qlock);
541 cx18_stream_load_fw_queue(s);
542
501 /* begin_capture */ 543 /* begin_capture */
502 if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) { 544 if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) {
503 CX18_DEBUG_WARN("Error starting capture!\n"); 545 CX18_DEBUG_WARN("Error starting capture!\n");
@@ -506,9 +548,15 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
506 cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, 1); 548 cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, 1);
507 else 549 else
508 cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 1, s->handle); 550 cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 1, s->handle);
551 clear_bit(CX18_F_S_STREAMING, &s->s_flags);
552 /* FIXME - CX18_F_S_STREAMOFF as well? */
509 cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle); 553 cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle);
510 cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle); 554 cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
511 /* FIXME - clean-up DSP0_INT mask, i_flags, s_flags, etc. */ 555 s->handle = CX18_INVALID_TASK_HANDLE;
556 if (atomic_read(&cx->tot_capturing) == 0) {
557 set_bit(CX18_F_I_EOS, &cx->i_flags);
558 cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK);
559 }
512 return -EINVAL; 560 return -EINVAL;
513 } 561 }
514 562
@@ -560,9 +608,6 @@ int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
560 CX18_INFO("ignoring gop_end: not (yet?) supported by the firmware\n"); 608 CX18_INFO("ignoring gop_end: not (yet?) supported by the firmware\n");
561 } 609 }
562 610
563 /* Tell the CX23418 it can't use our buffers anymore */
564 cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle);
565
566 if (s->type != CX18_ENC_STREAM_TYPE_TS) 611 if (s->type != CX18_ENC_STREAM_TYPE_TS)
567 atomic_dec(&cx->ana_capturing); 612 atomic_dec(&cx->ana_capturing);
568 atomic_dec(&cx->tot_capturing); 613 atomic_dec(&cx->tot_capturing);
@@ -570,6 +615,9 @@ int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
570 /* Clear capture and no-read bits */ 615 /* Clear capture and no-read bits */
571 clear_bit(CX18_F_S_STREAMING, &s->s_flags); 616 clear_bit(CX18_F_S_STREAMING, &s->s_flags);
572 617
618 /* Tell the CX23418 it can't use our buffers anymore */
619 cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle);
620
573 cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle); 621 cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
574 s->handle = CX18_INVALID_TASK_HANDLE; 622 s->handle = CX18_INVALID_TASK_HANDLE;
575 623
@@ -595,3 +643,21 @@ u32 cx18_find_handle(struct cx18 *cx)
595 } 643 }
596 return CX18_INVALID_TASK_HANDLE; 644 return CX18_INVALID_TASK_HANDLE;
597} 645}
646
647struct cx18_stream *cx18_handle_to_stream(struct cx18 *cx, u32 handle)
648{
649 int i;
650 struct cx18_stream *s;
651
652 if (handle == CX18_INVALID_TASK_HANDLE)
653 return NULL;
654
655 for (i = 0; i < CX18_MAX_STREAMS; i++) {
656 s = &cx->streams[i];
657 if (s->handle != handle)
658 continue;
659 if (s->v4l2dev || s->dvb.enabled)
660 return s;
661 }
662 return NULL;
663}
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h
index f327e947b24f..420e0a172945 100644
--- a/drivers/media/video/cx18/cx18-streams.h
+++ b/drivers/media/video/cx18/cx18-streams.h
@@ -4,6 +4,7 @@
4 * Derived from ivtv-streams.h 4 * Derived from ivtv-streams.h
5 * 5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -22,11 +23,15 @@
22 */ 23 */
23 24
24u32 cx18_find_handle(struct cx18 *cx); 25u32 cx18_find_handle(struct cx18 *cx);
26struct cx18_stream *cx18_handle_to_stream(struct cx18 *cx, u32 handle);
25int cx18_streams_setup(struct cx18 *cx); 27int cx18_streams_setup(struct cx18 *cx);
26int cx18_streams_register(struct cx18 *cx); 28int cx18_streams_register(struct cx18 *cx);
27void cx18_streams_cleanup(struct cx18 *cx, int unregister); 29void cx18_streams_cleanup(struct cx18 *cx, int unregister);
28 30
29/* Capture related */ 31/* Capture related */
32void cx18_stream_load_fw_queue(struct cx18_stream *s);
33struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s,
34 struct cx18_buffer *buf);
30int cx18_start_v4l2_encode_stream(struct cx18_stream *s); 35int cx18_start_v4l2_encode_stream(struct cx18_stream *s);
31int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end); 36int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end);
32 37
diff --git a/drivers/media/video/cx18/cx18-vbi.c b/drivers/media/video/cx18/cx18-vbi.c
index 22e76ee3f447..fb595bd548e8 100644
--- a/drivers/media/video/cx18/cx18-vbi.c
+++ b/drivers/media/video/cx18/cx18-vbi.c
@@ -160,11 +160,14 @@ void cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf,
160 return; 160 return;
161 161
162 /* Raw VBI data */ 162 /* Raw VBI data */
163 if (cx->vbi.sliced_in->service_set == 0) { 163 if (cx18_raw_vbi(cx)) {
164 u8 type; 164 u8 type;
165 165
166 cx18_buf_swap(buf); 166 cx18_buf_swap(buf);
167 167
168 /* Skip 12 bytes of header that gets stuffed in */
169 size -= 12;
170 memcpy(p, &buf->buf[12], size);
168 type = p[3]; 171 type = p[3];
169 172
170 size = buf->bytesused = compress_raw_buf(cx, p, size); 173 size = buf->bytesused = compress_raw_buf(cx, p, size);
diff --git a/drivers/media/video/cx18/cx18-version.h b/drivers/media/video/cx18/cx18-version.h
index 9f6be2d457fb..84c0ff13b607 100644
--- a/drivers/media/video/cx18/cx18-version.h
+++ b/drivers/media/video/cx18/cx18-version.h
@@ -25,7 +25,7 @@
25#define CX18_DRIVER_NAME "cx18" 25#define CX18_DRIVER_NAME "cx18"
26#define CX18_DRIVER_VERSION_MAJOR 1 26#define CX18_DRIVER_VERSION_MAJOR 1
27#define CX18_DRIVER_VERSION_MINOR 0 27#define CX18_DRIVER_VERSION_MINOR 0
28#define CX18_DRIVER_VERSION_PATCHLEVEL 1 28#define CX18_DRIVER_VERSION_PATCHLEVEL 4
29 29
30#define CX18_VERSION __stringify(CX18_DRIVER_VERSION_MAJOR) "." __stringify(CX18_DRIVER_VERSION_MINOR) "." __stringify(CX18_DRIVER_VERSION_PATCHLEVEL) 30#define CX18_VERSION __stringify(CX18_DRIVER_VERSION_MAJOR) "." __stringify(CX18_DRIVER_VERSION_MINOR) "." __stringify(CX18_DRIVER_VERSION_PATCHLEVEL)
31#define CX18_DRIVER_VERSION KERNEL_VERSION(CX18_DRIVER_VERSION_MAJOR, \ 31#define CX18_DRIVER_VERSION KERNEL_VERSION(CX18_DRIVER_VERSION_MAJOR, \
diff --git a/drivers/media/video/cx18/cx23418.h b/drivers/media/video/cx18/cx23418.h
index 668f968d7761..601f3a2ab742 100644
--- a/drivers/media/video/cx18/cx23418.h
+++ b/drivers/media/video/cx18/cx23418.h
@@ -44,6 +44,7 @@
44 44
45/* All commands for CPU have the following mask set */ 45/* All commands for CPU have the following mask set */
46#define CPU_CMD_MASK 0x20000000 46#define CPU_CMD_MASK 0x20000000
47#define CPU_CMD_MASK_DEBUG (CPU_CMD_MASK | 0x00000000)
47#define CPU_CMD_MASK_ACK (CPU_CMD_MASK | 0x80000000) 48#define CPU_CMD_MASK_ACK (CPU_CMD_MASK | 0x80000000)
48#define CPU_CMD_MASK_CAPTURE (CPU_CMD_MASK | 0x00020000) 49#define CPU_CMD_MASK_CAPTURE (CPU_CMD_MASK | 0x00020000)
49#define CPU_CMD_MASK_TS (CPU_CMD_MASK | 0x00040000) 50#define CPU_CMD_MASK_TS (CPU_CMD_MASK | 0x00040000)
@@ -71,6 +72,11 @@
71 0/zero/NULL means "I have nothing to say" */ 72 0/zero/NULL means "I have nothing to say" */
72#define CX18_EPU_DEBUG (EPU_CMD_MASK_DEBUG | 0x0003) 73#define CX18_EPU_DEBUG (EPU_CMD_MASK_DEBUG | 0x0003)
73 74
75/* Reads memory/registers (32-bit)
76 IN[0] - Address
77 OUT[1] - Value */
78#define CX18_CPU_DEBUG_PEEK32 (CPU_CMD_MASK_DEBUG | 0x0003)
79
74/* Description: This command starts streaming with the set channel type 80/* Description: This command starts streaming with the set channel type
75 IN[0] - Task handle. Handle of the task to start 81 IN[0] - Task handle. Handle of the task to start
76 ReturnCode - One of the ERR_CAPTURE_... */ 82 ReturnCode - One of the ERR_CAPTURE_... */
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 00831f3ef8f5..798d24024353 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * Support for a cx23417 mpeg encoder via cx23885 host port. 3 * Support for a cx23417 mpeg encoder via cx23885 host port.
4 * 4 *
5 * (c) 2004 Jelle Foks <jelle@foks.8m.com> 5 * (c) 2004 Jelle Foks <jelle@foks.us>
6 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> 6 * (c) 2004 Gerd Knorr <kraxel@bytesex.org>
7 * (c) 2008 Steven Toth <stoth@linuxtv.org> 7 * (c) 2008 Steven Toth <stoth@linuxtv.org>
8 * - CX23885/7/8 support 8 * - CX23885/7/8 support
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index dac5ccc9ba72..caa098beeecf 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -158,6 +158,10 @@ struct cx23885_board cx23885_boards[] = {
158 .name = "Leadtek Winfast PxDVR3200 H", 158 .name = "Leadtek Winfast PxDVR3200 H",
159 .portc = CX23885_MPEG_DVB, 159 .portc = CX23885_MPEG_DVB,
160 }, 160 },
161 [CX23885_BOARD_COMPRO_VIDEOMATE_E650F] = {
162 .name = "Compro VideoMate E650F",
163 .portc = CX23885_MPEG_DVB,
164 },
161}; 165};
162const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards); 166const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
163 167
@@ -237,6 +241,10 @@ struct cx23885_subid cx23885_subids[] = {
237 .subvendor = 0x107d, 241 .subvendor = 0x107d,
238 .subdevice = 0x6681, 242 .subdevice = 0x6681,
239 .card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H, 243 .card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H,
244 }, {
245 .subvendor = 0x185b,
246 .subdevice = 0xe800,
247 .card = CX23885_BOARD_COMPRO_VIDEOMATE_E650F,
240 }, 248 },
241}; 249};
242const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids); 250const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -390,6 +398,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
390 case CX23885_BOARD_HAUPPAUGE_HVR1500: 398 case CX23885_BOARD_HAUPPAUGE_HVR1500:
391 case CX23885_BOARD_HAUPPAUGE_HVR1500Q: 399 case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
392 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: 400 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
401 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
393 /* Tuner Reset Command */ 402 /* Tuner Reset Command */
394 bitmask = 0x04; 403 bitmask = 0x04;
395 break; 404 break;
@@ -530,6 +539,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
530 cx_set(GP0_IO, 0x000f000f); 539 cx_set(GP0_IO, 0x000f000f);
531 break; 540 break;
532 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: 541 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
542 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
533 /* GPIO-2 xc3028 tuner reset */ 543 /* GPIO-2 xc3028 tuner reset */
534 544
535 /* The following GPIO's are on the internal AVCore (cx25840) */ 545 /* The following GPIO's are on the internal AVCore (cx25840) */
@@ -630,6 +640,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
630 case CX23885_BOARD_HAUPPAUGE_HVR1700: 640 case CX23885_BOARD_HAUPPAUGE_HVR1700:
631 case CX23885_BOARD_HAUPPAUGE_HVR1400: 641 case CX23885_BOARD_HAUPPAUGE_HVR1400:
632 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: 642 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
643 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
633 default: 644 default:
634 ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ 645 ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
635 ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ 646 ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
@@ -644,6 +655,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
644 case CX23885_BOARD_HAUPPAUGE_HVR1800lp: 655 case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
645 case CX23885_BOARD_HAUPPAUGE_HVR1700: 656 case CX23885_BOARD_HAUPPAUGE_HVR1700:
646 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: 657 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
658 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
647 request_module("cx25840"); 659 request_module("cx25840");
648 break; 660 break;
649 } 661 }
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index e1aac07b3158..1c454128a9df 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -502,6 +502,7 @@ static int dvb_register(struct cx23885_tsport *port)
502 break; 502 break;
503 } 503 }
504 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: 504 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
505 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
505 i2c_bus = &dev->i2c_bus[0]; 506 i2c_bus = &dev->i2c_bus[0];
506 507
507 fe0->dvb.frontend = dvb_attach(zl10353_attach, 508 fe0->dvb.frontend = dvb_attach(zl10353_attach,
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index 1d53f54cd943..67828029fc69 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -66,6 +66,7 @@
66#define CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP 10 66#define CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP 10
67#define CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP 11 67#define CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP 11
68#define CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H 12 68#define CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H 12
69#define CX23885_BOARD_COMPRO_VIDEOMATE_E650F 13
69 70
70/* Currently unsupported by the driver: PAL/H, NTSC/Kr, SECAM B/G/H/LC */ 71/* Currently unsupported by the driver: PAL/H, NTSC/Kr, SECAM B/G/H/LC */
71#define CX23885_NORMS (\ 72#define CX23885_NORMS (\
diff --git a/drivers/media/video/cx25840/Kconfig b/drivers/media/video/cx25840/Kconfig
index de515dadadc2..451133ad41ff 100644
--- a/drivers/media/video/cx25840/Kconfig
+++ b/drivers/media/video/cx25840/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_CX25840 1config VIDEO_CX25840
2 tristate "Conexant CX2584x audio/video decoders" 2 tristate "Conexant CX2584x audio/video decoders"
3 depends on VIDEO_V4L2 && I2C && EXPERIMENTAL 3 depends on VIDEO_V4L2 && I2C
4 ---help--- 4 ---help---
5 Support for the Conexant CX2584x audio/video decoders. 5 Support for the Conexant CX2584x audio/video decoders.
6 6
diff --git a/drivers/media/video/cx25840/cx25840-audio.c b/drivers/media/video/cx25840/cx25840-audio.c
index d6421e1e8f6a..d199d80ea0a3 100644
--- a/drivers/media/video/cx25840/cx25840-audio.c
+++ b/drivers/media/video/cx25840/cx25840-audio.c
@@ -25,7 +25,7 @@
25 25
26static int set_audclk_freq(struct i2c_client *client, u32 freq) 26static int set_audclk_freq(struct i2c_client *client, u32 freq)
27{ 27{
28 struct cx25840_state *state = i2c_get_clientdata(client); 28 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
29 29
30 if (freq != 32000 && freq != 44100 && freq != 48000) 30 if (freq != 32000 && freq != 44100 && freq != 48000)
31 return -EINVAL; 31 return -EINVAL;
@@ -193,7 +193,7 @@ static int set_audclk_freq(struct i2c_client *client, u32 freq)
193 193
194void cx25840_audio_set_path(struct i2c_client *client) 194void cx25840_audio_set_path(struct i2c_client *client)
195{ 195{
196 struct cx25840_state *state = i2c_get_clientdata(client); 196 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
197 197
198 /* assert soft reset */ 198 /* assert soft reset */
199 cx25840_and_or(client, 0x810, ~0x1, 0x01); 199 cx25840_and_or(client, 0x810, ~0x1, 0x01);
@@ -235,7 +235,7 @@ void cx25840_audio_set_path(struct i2c_client *client)
235 235
236static int get_volume(struct i2c_client *client) 236static int get_volume(struct i2c_client *client)
237{ 237{
238 struct cx25840_state *state = i2c_get_clientdata(client); 238 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
239 int vol; 239 int vol;
240 240
241 if (state->unmute_volume >= 0) 241 if (state->unmute_volume >= 0)
@@ -252,7 +252,7 @@ static int get_volume(struct i2c_client *client)
252 252
253static void set_volume(struct i2c_client *client, int volume) 253static void set_volume(struct i2c_client *client, int volume)
254{ 254{
255 struct cx25840_state *state = i2c_get_clientdata(client); 255 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
256 int vol; 256 int vol;
257 257
258 if (state->unmute_volume >= 0) { 258 if (state->unmute_volume >= 0) {
@@ -340,14 +340,14 @@ static void set_balance(struct i2c_client *client, int balance)
340 340
341static int get_mute(struct i2c_client *client) 341static int get_mute(struct i2c_client *client)
342{ 342{
343 struct cx25840_state *state = i2c_get_clientdata(client); 343 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
344 344
345 return state->unmute_volume >= 0; 345 return state->unmute_volume >= 0;
346} 346}
347 347
348static void set_mute(struct i2c_client *client, int mute) 348static void set_mute(struct i2c_client *client, int mute)
349{ 349{
350 struct cx25840_state *state = i2c_get_clientdata(client); 350 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
351 351
352 if (mute && state->unmute_volume == -1) { 352 if (mute && state->unmute_volume == -1) {
353 int vol = get_volume(client); 353 int vol = get_volume(client);
@@ -365,7 +365,7 @@ static void set_mute(struct i2c_client *client, int mute)
365 365
366int cx25840_audio(struct i2c_client *client, unsigned int cmd, void *arg) 366int cx25840_audio(struct i2c_client *client, unsigned int cmd, void *arg)
367{ 367{
368 struct cx25840_state *state = i2c_get_clientdata(client); 368 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
369 struct v4l2_control *ctrl = arg; 369 struct v4l2_control *ctrl = arg;
370 int retval; 370 int retval;
371 371
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 4da8cd74f00e..2ad277189da8 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -191,7 +191,7 @@ static void cx25840_work_handler(struct work_struct *work)
191static void cx25840_initialize(struct i2c_client *client) 191static void cx25840_initialize(struct i2c_client *client)
192{ 192{
193 DEFINE_WAIT(wait); 193 DEFINE_WAIT(wait);
194 struct cx25840_state *state = i2c_get_clientdata(client); 194 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
195 struct workqueue_struct *q; 195 struct workqueue_struct *q;
196 196
197 /* datasheet startup in numbered steps, refer to page 3-77 */ 197 /* datasheet startup in numbered steps, refer to page 3-77 */
@@ -259,7 +259,7 @@ static void cx25840_initialize(struct i2c_client *client)
259static void cx23885_initialize(struct i2c_client *client) 259static void cx23885_initialize(struct i2c_client *client)
260{ 260{
261 DEFINE_WAIT(wait); 261 DEFINE_WAIT(wait);
262 struct cx25840_state *state = i2c_get_clientdata(client); 262 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
263 struct workqueue_struct *q; 263 struct workqueue_struct *q;
264 264
265 /* Internal Reset */ 265 /* Internal Reset */
@@ -350,7 +350,7 @@ static void cx23885_initialize(struct i2c_client *client)
350 350
351void cx25840_std_setup(struct i2c_client *client) 351void cx25840_std_setup(struct i2c_client *client)
352{ 352{
353 struct cx25840_state *state = i2c_get_clientdata(client); 353 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
354 v4l2_std_id std = state->std; 354 v4l2_std_id std = state->std;
355 int hblank, hactive, burst, vblank, vactive, sc; 355 int hblank, hactive, burst, vblank, vactive, sc;
356 int vblank656, src_decimation; 356 int vblank656, src_decimation;
@@ -497,7 +497,7 @@ void cx25840_std_setup(struct i2c_client *client)
497 497
498static void input_change(struct i2c_client *client) 498static void input_change(struct i2c_client *client)
499{ 499{
500 struct cx25840_state *state = i2c_get_clientdata(client); 500 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
501 v4l2_std_id std = state->std; 501 v4l2_std_id std = state->std;
502 502
503 /* Follow step 8c and 8d of section 3.16 in the cx25840 datasheet */ 503 /* Follow step 8c and 8d of section 3.16 in the cx25840 datasheet */
@@ -551,7 +551,7 @@ static void input_change(struct i2c_client *client)
551static int set_input(struct i2c_client *client, enum cx25840_video_input vid_input, 551static int set_input(struct i2c_client *client, enum cx25840_video_input vid_input,
552 enum cx25840_audio_input aud_input) 552 enum cx25840_audio_input aud_input)
553{ 553{
554 struct cx25840_state *state = i2c_get_clientdata(client); 554 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
555 u8 is_composite = (vid_input >= CX25840_COMPOSITE1 && 555 u8 is_composite = (vid_input >= CX25840_COMPOSITE1 &&
556 vid_input <= CX25840_COMPOSITE8); 556 vid_input <= CX25840_COMPOSITE8);
557 u8 reg; 557 u8 reg;
@@ -671,7 +671,7 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
671 671
672static int set_v4lstd(struct i2c_client *client) 672static int set_v4lstd(struct i2c_client *client)
673{ 673{
674 struct cx25840_state *state = i2c_get_clientdata(client); 674 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
675 u8 fmt = 0; /* zero is autodetect */ 675 u8 fmt = 0; /* zero is autodetect */
676 u8 pal_m = 0; 676 u8 pal_m = 0;
677 677
@@ -720,9 +720,10 @@ static int set_v4lstd(struct i2c_client *client)
720 720
721/* ----------------------------------------------------------------------- */ 721/* ----------------------------------------------------------------------- */
722 722
723static int set_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl) 723static int cx25840_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
724{ 724{
725 struct cx25840_state *state = i2c_get_clientdata(client); 725 struct cx25840_state *state = to_state(sd);
726 struct i2c_client *client = v4l2_get_subdevdata(sd);
726 727
727 switch (ctrl->id) { 728 switch (ctrl->id) {
728 case CX25840_CID_ENABLE_PVR150_WORKAROUND: 729 case CX25840_CID_ENABLE_PVR150_WORKAROUND:
@@ -786,9 +787,10 @@ static int set_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl)
786 return 0; 787 return 0;
787} 788}
788 789
789static int get_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl) 790static int cx25840_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
790{ 791{
791 struct cx25840_state *state = i2c_get_clientdata(client); 792 struct cx25840_state *state = to_state(sd);
793 struct i2c_client *client = v4l2_get_subdevdata(sd);
792 794
793 switch (ctrl->id) { 795 switch (ctrl->id) {
794 case CX25840_CID_ENABLE_PVR150_WORKAROUND: 796 case CX25840_CID_ENABLE_PVR150_WORKAROUND:
@@ -823,21 +825,23 @@ static int get_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl)
823 825
824/* ----------------------------------------------------------------------- */ 826/* ----------------------------------------------------------------------- */
825 827
826static int get_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt) 828static int cx25840_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
827{ 829{
830 struct i2c_client *client = v4l2_get_subdevdata(sd);
831
828 switch (fmt->type) { 832 switch (fmt->type) {
829 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: 833 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
830 return cx25840_vbi(client, VIDIOC_G_FMT, fmt); 834 return cx25840_vbi(client, VIDIOC_G_FMT, fmt);
831 default: 835 default:
832 return -EINVAL; 836 return -EINVAL;
833 } 837 }
834
835 return 0; 838 return 0;
836} 839}
837 840
838static int set_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt) 841static int cx25840_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
839{ 842{
840 struct cx25840_state *state = i2c_get_clientdata(client); 843 struct cx25840_state *state = to_state(sd);
844 struct i2c_client *client = v4l2_get_subdevdata(sd);
841 struct v4l2_pix_format *pix; 845 struct v4l2_pix_format *pix;
842 int HSC, VSC, Vsrc, Hsrc, filter, Vlines; 846 int HSC, VSC, Vsrc, Hsrc, filter, Vlines;
843 int is_50Hz = !(state->std & V4L2_STD_525_60); 847 int is_50Hz = !(state->std & V4L2_STD_525_60);
@@ -914,7 +918,7 @@ static void log_video_status(struct i2c_client *client)
914 "0xD", "0xE", "0xF" 918 "0xD", "0xE", "0xF"
915 }; 919 };
916 920
917 struct cx25840_state *state = i2c_get_clientdata(client); 921 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
918 u8 vidfmt_sel = cx25840_read(client, 0x400) & 0xf; 922 u8 vidfmt_sel = cx25840_read(client, 0x400) & 0xf;
919 u8 gen_stat1 = cx25840_read(client, 0x40d); 923 u8 gen_stat1 = cx25840_read(client, 0x40d);
920 u8 gen_stat2 = cx25840_read(client, 0x40e); 924 u8 gen_stat2 = cx25840_read(client, 0x40e);
@@ -944,7 +948,7 @@ static void log_video_status(struct i2c_client *client)
944 948
945static void log_audio_status(struct i2c_client *client) 949static void log_audio_status(struct i2c_client *client)
946{ 950{
947 struct cx25840_state *state = i2c_get_clientdata(client); 951 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
948 u8 download_ctl = cx25840_read(client, 0x803); 952 u8 download_ctl = cx25840_read(client, 0x803);
949 u8 mod_det_stat0 = cx25840_read(client, 0x804); 953 u8 mod_det_stat0 = cx25840_read(client, 0x804);
950 u8 mod_det_stat1 = cx25840_read(client, 0x805); 954 u8 mod_det_stat1 = cx25840_read(client, 0x805);
@@ -1097,21 +1101,12 @@ static void log_audio_status(struct i2c_client *client)
1097 1101
1098/* ----------------------------------------------------------------------- */ 1102/* ----------------------------------------------------------------------- */
1099 1103
1100static int cx25840_command(struct i2c_client *client, unsigned int cmd, 1104static int cx25840_init(struct v4l2_subdev *sd, u32 val)
1101 void *arg)
1102{ 1105{
1103 struct cx25840_state *state = i2c_get_clientdata(client); 1106 struct cx25840_state *state = to_state(sd);
1104 struct v4l2_tuner *vt = arg; 1107 struct i2c_client *client = v4l2_get_subdevdata(sd);
1105 struct v4l2_routing *route = arg;
1106
1107 /* ignore these commands */
1108 switch (cmd) {
1109 case TUNER_SET_TYPE_ADDR:
1110 return 0;
1111 }
1112 1108
1113 if (!state->is_initialized) { 1109 if (!state->is_initialized) {
1114 v4l_dbg(1, cx25840_debug, client, "cmd %08x triggered fw load\n", cmd);
1115 /* initialize on first use */ 1110 /* initialize on first use */
1116 state->is_initialized = 1; 1111 state->is_initialized = 1;
1117 if (state->is_cx25836) 1112 if (state->is_cx25836)
@@ -1121,50 +1116,69 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
1121 else 1116 else
1122 cx25840_initialize(client); 1117 cx25840_initialize(client);
1123 } 1118 }
1119 return 0;
1120}
1124 1121
1125 switch (cmd) {
1126#ifdef CONFIG_VIDEO_ADV_DEBUG 1122#ifdef CONFIG_VIDEO_ADV_DEBUG
1127 /* ioctls to allow direct access to the 1123static int cx25840_g_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
1128 * cx25840 registers for testing */ 1124{
1129 case VIDIOC_DBG_G_REGISTER: 1125 struct i2c_client *client = v4l2_get_subdevdata(sd);
1130 case VIDIOC_DBG_S_REGISTER:
1131 {
1132 struct v4l2_register *reg = arg;
1133
1134 if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
1135 return -EINVAL;
1136 if (!capable(CAP_SYS_ADMIN))
1137 return -EPERM;
1138 1126
1139 if (cmd == VIDIOC_DBG_G_REGISTER) 1127 if (!v4l2_chip_match_i2c_client(client,
1140 reg->val = cx25840_read(client, reg->reg & 0x0fff); 1128 reg->match_type, reg->match_chip))
1141 else 1129 return -EINVAL;
1142 cx25840_write(client, reg->reg & 0x0fff, reg->val & 0xff); 1130 if (!capable(CAP_SYS_ADMIN))
1143 break; 1131 return -EPERM;
1144 } 1132 reg->val = cx25840_read(client, reg->reg & 0x0fff);
1133 return 0;
1134}
1135
1136static int cx25840_s_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
1137{
1138 struct i2c_client *client = v4l2_get_subdevdata(sd);
1139
1140 if (!v4l2_chip_match_i2c_client(client,
1141 reg->match_type, reg->match_chip))
1142 return -EINVAL;
1143 if (!capable(CAP_SYS_ADMIN))
1144 return -EPERM;
1145 cx25840_write(client, reg->reg & 0x0fff, reg->val & 0xff);
1146 return 0;
1147}
1145#endif 1148#endif
1146 1149
1147 case VIDIOC_INT_DECODE_VBI_LINE: 1150static int cx25840_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi)
1148 return cx25840_vbi(client, cmd, arg); 1151{
1152 struct i2c_client *client = v4l2_get_subdevdata(sd);
1149 1153
1150 case VIDIOC_INT_AUDIO_CLOCK_FREQ: 1154 return cx25840_vbi(client, VIDIOC_INT_DECODE_VBI_LINE, vbi);
1151 return cx25840_audio(client, cmd, arg); 1155}
1156
1157static int cx25840_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
1158{
1159 struct i2c_client *client = v4l2_get_subdevdata(sd);
1160
1161 return cx25840_audio(client, VIDIOC_INT_AUDIO_CLOCK_FREQ, &freq);
1162}
1163
1164static int cx25840_s_stream(struct v4l2_subdev *sd, int enable)
1165{
1166 struct cx25840_state *state = to_state(sd);
1167 struct i2c_client *client = v4l2_get_subdevdata(sd);
1152 1168
1153 case VIDIOC_STREAMON: 1169 v4l_dbg(1, cx25840_debug, client, "%s output\n",
1154 v4l_dbg(1, cx25840_debug, client, "enable output\n"); 1170 enable ? "enable" : "disable");
1171 if (enable) {
1155 if (state->is_cx23885) { 1172 if (state->is_cx23885) {
1156 u8 v = (cx25840_read(client, 0x421) | 0x0b); 1173 u8 v = (cx25840_read(client, 0x421) | 0x0b);
1157 cx25840_write(client, 0x421, v); 1174 cx25840_write(client, 0x421, v);
1158 } else { 1175 } else {
1159 cx25840_write(client, 0x115, 1176 cx25840_write(client, 0x115,
1160 state->is_cx25836 ? 0x0c : 0x8c); 1177 state->is_cx25836 ? 0x0c : 0x8c);
1161 cx25840_write(client, 0x116, 1178 cx25840_write(client, 0x116,
1162 state->is_cx25836 ? 0x04 : 0x07); 1179 state->is_cx25836 ? 0x04 : 0x07);
1163 } 1180 }
1164 break; 1181 } else {
1165
1166 case VIDIOC_STREAMOFF:
1167 v4l_dbg(1, cx25840_debug, client, "disable output\n");
1168 if (state->is_cx23885) { 1182 if (state->is_cx23885) {
1169 u8 v = cx25840_read(client, 0x421) & ~(0x0b); 1183 u8 v = cx25840_read(client, 0x421) & ~(0x0b);
1170 cx25840_write(client, 0x421, v); 1184 cx25840_write(client, 0x421, v);
@@ -1172,133 +1186,136 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
1172 cx25840_write(client, 0x115, 0x00); 1186 cx25840_write(client, 0x115, 0x00);
1173 cx25840_write(client, 0x116, 0x00); 1187 cx25840_write(client, 0x116, 0x00);
1174 } 1188 }
1175 break; 1189 }
1190 return 0;
1191}
1176 1192
1177 case VIDIOC_LOG_STATUS: 1193static int cx25840_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
1178 log_video_status(client); 1194{
1179 if (!state->is_cx25836) 1195 struct cx25840_state *state = to_state(sd);
1180 log_audio_status(client);
1181 break;
1182 1196
1183 case VIDIOC_G_CTRL: 1197 switch (qc->id) {
1184 return get_v4lctrl(client, (struct v4l2_control *)arg); 1198 case V4L2_CID_BRIGHTNESS:
1199 case V4L2_CID_CONTRAST:
1200 case V4L2_CID_SATURATION:
1201 case V4L2_CID_HUE:
1202 return v4l2_ctrl_query_fill_std(qc);
1203 default:
1204 break;
1205 }
1206 if (state->is_cx25836)
1207 return -EINVAL;
1185 1208
1186 case VIDIOC_S_CTRL: 1209 switch (qc->id) {
1187 return set_v4lctrl(client, (struct v4l2_control *)arg); 1210 case V4L2_CID_AUDIO_VOLUME:
1211 return v4l2_ctrl_query_fill(qc, 0, 65535,
1212 65535 / 100, state->default_volume);
1213 case V4L2_CID_AUDIO_MUTE:
1214 case V4L2_CID_AUDIO_BALANCE:
1215 case V4L2_CID_AUDIO_BASS:
1216 case V4L2_CID_AUDIO_TREBLE:
1217 return v4l2_ctrl_query_fill_std(qc);
1218 default:
1219 return -EINVAL;
1220 }
1221 return -EINVAL;
1222}
1188 1223
1189 case VIDIOC_QUERYCTRL: 1224static int cx25840_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
1190 { 1225{
1191 struct v4l2_queryctrl *qc = arg; 1226 struct cx25840_state *state = to_state(sd);
1227 struct i2c_client *client = v4l2_get_subdevdata(sd);
1192 1228
1193 switch (qc->id) { 1229 if (state->radio == 0 && state->std == std)
1194 case V4L2_CID_BRIGHTNESS: 1230 return 0;
1195 case V4L2_CID_CONTRAST: 1231 state->radio = 0;
1196 case V4L2_CID_SATURATION: 1232 state->std = std;
1197 case V4L2_CID_HUE: 1233 return set_v4lstd(client);
1198 return v4l2_ctrl_query_fill_std(qc); 1234}
1199 default:
1200 break;
1201 }
1202 if (state->is_cx25836)
1203 return -EINVAL;
1204 1235
1205 switch (qc->id) { 1236static int cx25840_s_radio(struct v4l2_subdev *sd)
1206 case V4L2_CID_AUDIO_VOLUME: 1237{
1207 return v4l2_ctrl_query_fill(qc, 0, 65535, 1238 struct cx25840_state *state = to_state(sd);
1208 65535 / 100, state->default_volume);
1209 case V4L2_CID_AUDIO_MUTE:
1210 case V4L2_CID_AUDIO_BALANCE:
1211 case V4L2_CID_AUDIO_BASS:
1212 case V4L2_CID_AUDIO_TREBLE:
1213 return v4l2_ctrl_query_fill_std(qc);
1214 default:
1215 return -EINVAL;
1216 }
1217 return -EINVAL;
1218 }
1219 1239
1220 case VIDIOC_G_STD: 1240 state->radio = 1;
1221 *(v4l2_std_id *)arg = state->std; 1241 return 0;
1222 break; 1242}
1223 1243
1224 case VIDIOC_S_STD: 1244static int cx25840_s_video_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
1225 if (state->radio == 0 && state->std == *(v4l2_std_id *)arg) 1245{
1226 return 0; 1246 struct cx25840_state *state = to_state(sd);
1227 state->radio = 0; 1247 struct i2c_client *client = v4l2_get_subdevdata(sd);
1228 state->std = *(v4l2_std_id *)arg;
1229 return set_v4lstd(client);
1230 1248
1231 case AUDC_SET_RADIO: 1249 return set_input(client, route->input, state->aud_input);
1232 state->radio = 1; 1250}
1233 break;
1234 1251
1235 case VIDIOC_INT_G_VIDEO_ROUTING: 1252static int cx25840_s_audio_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
1236 route->input = state->vid_input; 1253{
1237 route->output = 0; 1254 struct cx25840_state *state = to_state(sd);
1238 break; 1255 struct i2c_client *client = v4l2_get_subdevdata(sd);
1239 1256
1240 case VIDIOC_INT_S_VIDEO_ROUTING: 1257 if (state->is_cx25836)
1241 return set_input(client, route->input, state->aud_input); 1258 return -EINVAL;
1259 return set_input(client, state->vid_input, route->input);
1260}
1242 1261
1243 case VIDIOC_INT_G_AUDIO_ROUTING: 1262static int cx25840_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
1244 if (state->is_cx25836) 1263{
1245 return -EINVAL; 1264 struct cx25840_state *state = to_state(sd);
1246 route->input = state->aud_input; 1265 struct i2c_client *client = v4l2_get_subdevdata(sd);
1247 route->output = 0;
1248 break;
1249 1266
1250 case VIDIOC_INT_S_AUDIO_ROUTING: 1267 if (!state->is_cx25836)
1251 if (state->is_cx25836) 1268 input_change(client);
1252 return -EINVAL; 1269 return 0;
1253 return set_input(client, state->vid_input, route->input); 1270}
1254 1271
1255 case VIDIOC_S_FREQUENCY: 1272static int cx25840_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1256 if (!state->is_cx25836) { 1273{
1257 input_change(client); 1274 struct cx25840_state *state = to_state(sd);
1258 } 1275 struct i2c_client *client = v4l2_get_subdevdata(sd);
1259 break; 1276 u8 vpres = cx25840_read(client, 0x40e) & 0x20;
1277 u8 mode;
1278 int val = 0;
1260 1279
1261 case VIDIOC_G_TUNER: 1280 if (state->radio)
1262 { 1281 return 0;
1263 u8 vpres = cx25840_read(client, 0x40e) & 0x20;
1264 u8 mode;
1265 int val = 0;
1266 1282
1267 if (state->radio) 1283 vt->signal = vpres ? 0xffff : 0x0;
1268 break; 1284 if (state->is_cx25836)
1285 return 0;
1269 1286
1270 vt->signal = vpres ? 0xffff : 0x0; 1287 vt->capability |=
1271 if (state->is_cx25836) 1288 V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 |
1272 break; 1289 V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
1273 1290
1274 vt->capability |= 1291 mode = cx25840_read(client, 0x804);
1275 V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 |
1276 V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
1277 1292
1278 mode = cx25840_read(client, 0x804); 1293 /* get rxsubchans and audmode */
1294 if ((mode & 0xf) == 1)
1295 val |= V4L2_TUNER_SUB_STEREO;
1296 else
1297 val |= V4L2_TUNER_SUB_MONO;
1279 1298
1280 /* get rxsubchans and audmode */ 1299 if (mode == 2 || mode == 4)
1281 if ((mode & 0xf) == 1) 1300 val = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
1282 val |= V4L2_TUNER_SUB_STEREO;
1283 else
1284 val |= V4L2_TUNER_SUB_MONO;
1285 1301
1286 if (mode == 2 || mode == 4) 1302 if (mode & 0x10)
1287 val = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1303 val |= V4L2_TUNER_SUB_SAP;
1288 1304
1289 if (mode & 0x10) 1305 vt->rxsubchans = val;
1290 val |= V4L2_TUNER_SUB_SAP; 1306 vt->audmode = state->audmode;
1307 return 0;
1308}
1291 1309
1292 vt->rxsubchans = val; 1310static int cx25840_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1293 vt->audmode = state->audmode; 1311{
1294 break; 1312 struct cx25840_state *state = to_state(sd);
1295 } 1313 struct i2c_client *client = v4l2_get_subdevdata(sd);
1296 1314
1297 case VIDIOC_S_TUNER: 1315 if (state->radio || state->is_cx25836)
1298 if (state->radio || state->is_cx25836) 1316 return 0;
1299 break;
1300 1317
1301 switch (vt->audmode) { 1318 switch (vt->audmode) {
1302 case V4L2_TUNER_MODE_MONO: 1319 case V4L2_TUNER_MODE_MONO:
1303 /* mono -> mono 1320 /* mono -> mono
1304 stereo -> mono 1321 stereo -> mono
@@ -1326,41 +1343,100 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
1326 break; 1343 break;
1327 default: 1344 default:
1328 return -EINVAL; 1345 return -EINVAL;
1329 } 1346 }
1330 state->audmode = vt->audmode; 1347 state->audmode = vt->audmode;
1331 break; 1348 return 0;
1349}
1332 1350
1333 case VIDIOC_G_FMT: 1351static int cx25840_reset(struct v4l2_subdev *sd, u32 val)
1334 return get_v4lfmt(client, (struct v4l2_format *)arg); 1352{
1353 struct cx25840_state *state = to_state(sd);
1354 struct i2c_client *client = v4l2_get_subdevdata(sd);
1335 1355
1336 case VIDIOC_S_FMT: 1356 if (state->is_cx25836)
1337 return set_v4lfmt(client, (struct v4l2_format *)arg); 1357 cx25836_initialize(client);
1358 else if (state->is_cx23885)
1359 cx23885_initialize(client);
1360 else
1361 cx25840_initialize(client);
1362 return 0;
1363}
1338 1364
1339 case VIDIOC_INT_RESET: 1365static int cx25840_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
1340 if (state->is_cx25836) 1366{
1341 cx25836_initialize(client); 1367 struct cx25840_state *state = to_state(sd);
1342 else if (state->is_cx23885) 1368 struct i2c_client *client = v4l2_get_subdevdata(sd);
1343 cx23885_initialize(client);
1344 else
1345 cx25840_initialize(client);
1346 break;
1347 1369
1348 case VIDIOC_G_CHIP_IDENT: 1370 return v4l2_chip_ident_i2c_client(client, chip, state->id, state->rev);
1349 return v4l2_chip_ident_i2c_client(client, arg, state->id, state->rev); 1371}
1350 1372
1351 default: 1373static int cx25840_log_status(struct v4l2_subdev *sd)
1352 return -EINVAL; 1374{
1353 } 1375 struct cx25840_state *state = to_state(sd);
1376 struct i2c_client *client = v4l2_get_subdevdata(sd);
1354 1377
1378 log_video_status(client);
1379 if (!state->is_cx25836)
1380 log_audio_status(client);
1355 return 0; 1381 return 0;
1356} 1382}
1357 1383
1384static int cx25840_command(struct i2c_client *client, unsigned cmd, void *arg)
1385{
1386 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
1387}
1388
1389/* ----------------------------------------------------------------------- */
1390
1391static const struct v4l2_subdev_core_ops cx25840_core_ops = {
1392 .log_status = cx25840_log_status,
1393 .g_chip_ident = cx25840_g_chip_ident,
1394 .g_ctrl = cx25840_g_ctrl,
1395 .s_ctrl = cx25840_s_ctrl,
1396 .queryctrl = cx25840_queryctrl,
1397 .reset = cx25840_reset,
1398 .init = cx25840_init,
1399#ifdef CONFIG_VIDEO_ADV_DEBUG
1400 .g_register = cx25840_g_register,
1401 .s_register = cx25840_s_register,
1402#endif
1403};
1404
1405static const struct v4l2_subdev_tuner_ops cx25840_tuner_ops = {
1406 .s_frequency = cx25840_s_frequency,
1407 .s_std = cx25840_s_std,
1408 .s_radio = cx25840_s_radio,
1409 .g_tuner = cx25840_g_tuner,
1410 .s_tuner = cx25840_s_tuner,
1411};
1412
1413static const struct v4l2_subdev_audio_ops cx25840_audio_ops = {
1414 .s_clock_freq = cx25840_s_clock_freq,
1415 .s_routing = cx25840_s_audio_routing,
1416};
1417
1418static const struct v4l2_subdev_video_ops cx25840_video_ops = {
1419 .s_routing = cx25840_s_video_routing,
1420 .g_fmt = cx25840_g_fmt,
1421 .s_fmt = cx25840_s_fmt,
1422 .decode_vbi_line = cx25840_decode_vbi_line,
1423 .s_stream = cx25840_s_stream,
1424};
1425
1426static const struct v4l2_subdev_ops cx25840_ops = {
1427 .core = &cx25840_core_ops,
1428 .tuner = &cx25840_tuner_ops,
1429 .audio = &cx25840_audio_ops,
1430 .video = &cx25840_video_ops,
1431};
1432
1358/* ----------------------------------------------------------------------- */ 1433/* ----------------------------------------------------------------------- */
1359 1434
1360static int cx25840_probe(struct i2c_client *client, 1435static int cx25840_probe(struct i2c_client *client,
1361 const struct i2c_device_id *did) 1436 const struct i2c_device_id *did)
1362{ 1437{
1363 struct cx25840_state *state; 1438 struct cx25840_state *state;
1439 struct v4l2_subdev *sd;
1364 u32 id; 1440 u32 id;
1365 u16 device_id; 1441 u16 device_id;
1366 1442
@@ -1392,10 +1468,11 @@ static int cx25840_probe(struct i2c_client *client,
1392 } 1468 }
1393 1469
1394 state = kzalloc(sizeof(struct cx25840_state), GFP_KERNEL); 1470 state = kzalloc(sizeof(struct cx25840_state), GFP_KERNEL);
1395 if (state == NULL) { 1471 if (state == NULL)
1396 return -ENOMEM; 1472 return -ENOMEM;
1397 }
1398 1473
1474 sd = &state->sd;
1475 v4l2_i2c_subdev_init(sd, client, &cx25840_ops);
1399 /* Note: revision '(device_id & 0x0f) == 2' was never built. The 1476 /* Note: revision '(device_id & 0x0f) == 2' was never built. The
1400 marking skips from 0x1 == 22 to 0x3 == 23. */ 1477 marking skips from 0x1 == 22 to 0x3 == 23. */
1401 v4l_info(client, "cx25%3x-2%x found @ 0x%x (%s)\n", 1478 v4l_info(client, "cx25%3x-2%x found @ 0x%x (%s)\n",
@@ -1403,7 +1480,6 @@ static int cx25840_probe(struct i2c_client *client,
1403 (device_id & 0x0f) < 3 ? (device_id & 0x0f) + 1 : (device_id & 0x0f), 1480 (device_id & 0x0f) < 3 ? (device_id & 0x0f) + 1 : (device_id & 0x0f),
1404 client->addr << 1, client->adapter->name); 1481 client->addr << 1, client->adapter->name);
1405 1482
1406 i2c_set_clientdata(client, state);
1407 state->c = client; 1483 state->c = client;
1408 state->is_cx25836 = ((device_id & 0xff00) == 0x8300); 1484 state->is_cx25836 = ((device_id & 0xff00) == 0x8300);
1409 state->is_cx23885 = (device_id == 0x0000) || (device_id == 0x1313); 1485 state->is_cx23885 = (device_id == 0x0000) || (device_id == 0x1313);
@@ -1430,7 +1506,10 @@ static int cx25840_probe(struct i2c_client *client,
1430 1506
1431static int cx25840_remove(struct i2c_client *client) 1507static int cx25840_remove(struct i2c_client *client)
1432{ 1508{
1433 kfree(i2c_get_clientdata(client)); 1509 struct v4l2_subdev *sd = i2c_get_clientdata(client);
1510
1511 v4l2_device_unregister_subdev(sd);
1512 kfree(to_state(sd));
1434 return 0; 1513 return 0;
1435} 1514}
1436 1515
diff --git a/drivers/media/video/cx25840/cx25840-core.h b/drivers/media/video/cx25840/cx25840-core.h
index b87337e590b4..be0558277ca3 100644
--- a/drivers/media/video/cx25840/cx25840-core.h
+++ b/drivers/media/video/cx25840/cx25840-core.h
@@ -22,6 +22,7 @@
22 22
23 23
24#include <linux/videodev2.h> 24#include <linux/videodev2.h>
25#include <media/v4l2-device.h>
25#include <linux/i2c.h> 26#include <linux/i2c.h>
26 27
27/* ENABLE_PVR150_WORKAROUND activates a workaround for a hardware bug that is 28/* ENABLE_PVR150_WORKAROUND activates a workaround for a hardware bug that is
@@ -34,6 +35,7 @@
34 35
35struct cx25840_state { 36struct cx25840_state {
36 struct i2c_client *c; 37 struct i2c_client *c;
38 struct v4l2_subdev sd;
37 int pvr150_workaround; 39 int pvr150_workaround;
38 int radio; 40 int radio;
39 v4l2_std_id std; 41 v4l2_std_id std;
@@ -53,6 +55,11 @@ struct cx25840_state {
53 struct work_struct fw_work; /* work entry for fw load */ 55 struct work_struct fw_work; /* work entry for fw load */
54}; 56};
55 57
58static inline struct cx25840_state *to_state(struct v4l2_subdev *sd)
59{
60 return container_of(sd, struct cx25840_state, sd);
61}
62
56/* ----------------------------------------------------------------------- */ 63/* ----------------------------------------------------------------------- */
57/* cx25850-core.c */ 64/* cx25850-core.c */
58int cx25840_write(struct i2c_client *client, u16 addr, u8 value); 65int cx25840_write(struct i2c_client *client, u16 addr, u8 value);
diff --git a/drivers/media/video/cx25840/cx25840-firmware.c b/drivers/media/video/cx25840/cx25840-firmware.c
index 8d489a4b9570..0b2dceb74108 100644
--- a/drivers/media/video/cx25840/cx25840-firmware.c
+++ b/drivers/media/video/cx25840/cx25840-firmware.c
@@ -91,7 +91,7 @@ static int fw_write(struct i2c_client *client, const u8 *data, int size)
91 91
92int cx25840_loadfw(struct i2c_client *client) 92int cx25840_loadfw(struct i2c_client *client)
93{ 93{
94 struct cx25840_state *state = i2c_get_clientdata(client); 94 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
95 const struct firmware *fw = NULL; 95 const struct firmware *fw = NULL;
96 u8 buffer[FWSEND]; 96 u8 buffer[FWSEND];
97 const u8 *ptr; 97 const u8 *ptr;
diff --git a/drivers/media/video/cx25840/cx25840-vbi.c b/drivers/media/video/cx25840/cx25840-vbi.c
index 58e6ef1c28a0..03f09b288eb8 100644
--- a/drivers/media/video/cx25840/cx25840-vbi.c
+++ b/drivers/media/video/cx25840/cx25840-vbi.c
@@ -84,7 +84,7 @@ static int decode_vps(u8 * dst, u8 * p)
84 84
85int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg) 85int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
86{ 86{
87 struct cx25840_state *state = i2c_get_clientdata(client); 87 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
88 struct v4l2_format *fmt; 88 struct v4l2_format *fmt;
89 struct v4l2_sliced_vbi_format *svbi; 89 struct v4l2_sliced_vbi_format *svbi;
90 90
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 06f171ab6149..66c755c116dc 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -742,7 +742,6 @@ static int __devinit snd_cx88_create(struct snd_card *card,
742 core = cx88_core_get(pci); 742 core = cx88_core_get(pci);
743 if (NULL == core) { 743 if (NULL == core) {
744 err = -EINVAL; 744 err = -EINVAL;
745 kfree (chip);
746 return err; 745 return err;
747 } 746 }
748 747
@@ -812,7 +811,7 @@ static int __devinit cx88_audio_initdev(struct pci_dev *pci,
812 811
813 err = snd_cx88_create(card, pci, &chip); 812 err = snd_cx88_create(card, pci, &chip);
814 if (err < 0) 813 if (err < 0)
815 return (err); 814 goto error;
816 815
817 err = snd_cx88_pcm(chip, 0, "CX88 Digital"); 816 err = snd_cx88_pcm(chip, 0, "CX88 Digital");
818 if (err < 0) 817 if (err < 0)
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index d3ae5b4dfca7..e162a70748c5 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -3,7 +3,7 @@
3 * Support for a cx23416 mpeg encoder via cx2388x host port. 3 * Support for a cx23416 mpeg encoder via cx2388x host port.
4 * "blackbird" reference design. 4 * "blackbird" reference design.
5 * 5 *
6 * (c) 2004 Jelle Foks <jelle@foks.8m.com> 6 * (c) 2004 Jelle Foks <jelle@foks.us>
7 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> 7 * (c) 2004 Gerd Knorr <kraxel@bytesex.org>
8 * 8 *
9 * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> 9 * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
@@ -39,7 +39,7 @@
39#include "cx88.h" 39#include "cx88.h"
40 40
41MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards"); 41MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards");
42MODULE_AUTHOR("Jelle Foks <jelle@foks.8m.com>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); 42MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
43MODULE_LICENSE("GPL"); 43MODULE_LICENSE("GPL");
44 44
45static unsigned int mpegbufs = 32; 45static unsigned int mpegbufs = 32;
@@ -1244,8 +1244,16 @@ static int cx8802_blackbird_advise_acquire(struct cx8802_driver *drv)
1244 * We're being given access to re-arrange the GPIOs. 1244 * We're being given access to re-arrange the GPIOs.
1245 * Take the bus off the cx22702 and put the cx23416 on it. 1245 * Take the bus off the cx22702 and put the cx23416 on it.
1246 */ 1246 */
1247 cx_clear(MO_GP0_IO, 0x00000080); /* cx22702 in reset */ 1247 /* Toggle reset on cx22702 leaving i2c active */
1248 cx_set(MO_GP0_IO, 0x00000004); /* Disable the cx22702 */ 1248 cx_set(MO_GP0_IO, 0x00000080);
1249 udelay(1000);
1250 cx_clear(MO_GP0_IO, 0x00000080);
1251 udelay(50);
1252 cx_set(MO_GP0_IO, 0x00000080);
1253 udelay(1000);
1254 /* tri-state the cx22702 pins */
1255 cx_set(MO_GP0_IO, 0x00000004);
1256 udelay(1000);
1249 break; 1257 break;
1250 default: 1258 default:
1251 err = -ENODEV; 1259 err = -ENODEV;
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 5bcbb4cc7c2a..733ede34f93a 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1237,7 +1237,6 @@ static const struct cx88_board cx88_boards[] = {
1237 }, 1237 },
1238 }, 1238 },
1239 [CX88_BOARD_WINFAST_DTV2000H] = { 1239 [CX88_BOARD_WINFAST_DTV2000H] = {
1240 /* video inputs and radio still in testing */
1241 .name = "WinFast DTV2000 H", 1240 .name = "WinFast DTV2000 H",
1242 .tuner_type = TUNER_PHILIPS_FMD1216ME_MK3, 1241 .tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
1243 .radio_type = UNSET, 1242 .radio_type = UNSET,
@@ -1251,7 +1250,35 @@ static const struct cx88_board cx88_boards[] = {
1251 .gpio1 = 0x00008203, 1250 .gpio1 = 0x00008203,
1252 .gpio2 = 0x00017304, 1251 .gpio2 = 0x00017304,
1253 .gpio3 = 0x02000000, 1252 .gpio3 = 0x02000000,
1253 }, {
1254 .type = CX88_VMUX_COMPOSITE1,
1255 .vmux = 1,
1256 .gpio0 = 0x0001d701,
1257 .gpio1 = 0x0000b207,
1258 .gpio2 = 0x0001d701,
1259 .gpio3 = 0x02000000,
1260 }, {
1261 .type = CX88_VMUX_COMPOSITE2,
1262 .vmux = 2,
1263 .gpio0 = 0x0001d503,
1264 .gpio1 = 0x0000b207,
1265 .gpio2 = 0x0001d503,
1266 .gpio3 = 0x02000000,
1267 }, {
1268 .type = CX88_VMUX_SVIDEO,
1269 .vmux = 3,
1270 .gpio0 = 0x0001d701,
1271 .gpio1 = 0x0000b207,
1272 .gpio2 = 0x0001d701,
1273 .gpio3 = 0x02000000,
1254 }}, 1274 }},
1275 .radio = {
1276 .type = CX88_RADIO,
1277 .gpio0 = 0x00015702,
1278 .gpio1 = 0x0000f207,
1279 .gpio2 = 0x00015702,
1280 .gpio3 = 0x02000000,
1281 },
1255 .mpeg = CX88_MPEG_DVB, 1282 .mpeg = CX88_MPEG_DVB,
1256 }, 1283 },
1257 [CX88_BOARD_GENIATECH_DVBS] = { 1284 [CX88_BOARD_GENIATECH_DVBS] = {
@@ -1847,6 +1874,18 @@ static const struct cx88_board cx88_boards[] = {
1847 } }, 1874 } },
1848 .mpeg = CX88_MPEG_DVB, 1875 .mpeg = CX88_MPEG_DVB,
1849 }, 1876 },
1877 [CX88_BOARD_TBS_8910] = {
1878 .name = "TBS 8910 DVB-S",
1879 .tuner_type = UNSET,
1880 .radio_type = UNSET,
1881 .tuner_addr = ADDR_UNSET,
1882 .radio_addr = ADDR_UNSET,
1883 .input = {{
1884 .type = CX88_VMUX_DVB,
1885 .vmux = 0,
1886 } },
1887 .mpeg = CX88_MPEG_DVB,
1888 },
1850 [CX88_BOARD_TBS_8920] = { 1889 [CX88_BOARD_TBS_8920] = {
1851 .name = "TBS 8920 DVB-S/S2", 1890 .name = "TBS 8920 DVB-S/S2",
1852 .tuner_type = TUNER_ABSENT, 1891 .tuner_type = TUNER_ABSENT,
@@ -1859,6 +1898,18 @@ static const struct cx88_board cx88_boards[] = {
1859 } }, 1898 } },
1860 .mpeg = CX88_MPEG_DVB, 1899 .mpeg = CX88_MPEG_DVB,
1861 }, 1900 },
1901 [CX88_BOARD_PROF_6200] = {
1902 .name = "Prof 6200 DVB-S",
1903 .tuner_type = UNSET,
1904 .radio_type = UNSET,
1905 .tuner_addr = ADDR_UNSET,
1906 .radio_addr = ADDR_UNSET,
1907 .input = {{
1908 .type = CX88_VMUX_DVB,
1909 .vmux = 0,
1910 } },
1911 .mpeg = CX88_MPEG_DVB,
1912 },
1862 [CX88_BOARD_PROF_7300] = { 1913 [CX88_BOARD_PROF_7300] = {
1863 .name = "PROF 7300 DVB-S/S2", 1914 .name = "PROF 7300 DVB-S/S2",
1864 .tuner_type = UNSET, 1915 .tuner_type = UNSET,
@@ -1871,6 +1922,18 @@ static const struct cx88_board cx88_boards[] = {
1871 } }, 1922 } },
1872 .mpeg = CX88_MPEG_DVB, 1923 .mpeg = CX88_MPEG_DVB,
1873 }, 1924 },
1925 [CX88_BOARD_SATTRADE_ST4200] = {
1926 .name = "SATTRADE ST4200 DVB-S/S2",
1927 .tuner_type = UNSET,
1928 .radio_type = UNSET,
1929 .tuner_addr = ADDR_UNSET,
1930 .radio_addr = ADDR_UNSET,
1931 .input = {{
1932 .type = CX88_VMUX_DVB,
1933 .vmux = 0,
1934 } },
1935 .mpeg = CX88_MPEG_DVB,
1936 },
1874}; 1937};
1875 1938
1876/* ------------------------------------------------------------------ */ 1939/* ------------------------------------------------------------------ */
@@ -1897,7 +1960,11 @@ static const struct cx88_subid cx88_subids[] = {
1897 .subvendor = PCI_VENDOR_ID_ATI, 1960 .subvendor = PCI_VENDOR_ID_ATI,
1898 .subdevice = 0x00f8, 1961 .subdevice = 0x00f8,
1899 .card = CX88_BOARD_ATI_WONDER_PRO, 1962 .card = CX88_BOARD_ATI_WONDER_PRO,
1900 },{ 1963 }, {
1964 .subvendor = PCI_VENDOR_ID_ATI,
1965 .subdevice = 0x00f9,
1966 .card = CX88_BOARD_ATI_WONDER_PRO,
1967 }, {
1901 .subvendor = 0x107d, 1968 .subvendor = 0x107d,
1902 .subdevice = 0x6611, 1969 .subdevice = 0x6611,
1903 .card = CX88_BOARD_WINFAST2000XP_EXPERT, 1970 .card = CX88_BOARD_WINFAST2000XP_EXPERT,
@@ -2257,13 +2324,25 @@ static const struct cx88_subid cx88_subids[] = {
2257 .subdevice = 0x2011, 2324 .subdevice = 0x2011,
2258 .card = CX88_BOARD_OMICOM_SS4_PCI, 2325 .card = CX88_BOARD_OMICOM_SS4_PCI,
2259 }, { 2326 }, {
2327 .subvendor = 0x8910,
2328 .subdevice = 0x8888,
2329 .card = CX88_BOARD_TBS_8910,
2330 }, {
2260 .subvendor = 0x8920, 2331 .subvendor = 0x8920,
2261 .subdevice = 0x8888, 2332 .subdevice = 0x8888,
2262 .card = CX88_BOARD_TBS_8920, 2333 .card = CX88_BOARD_TBS_8920,
2263 }, { 2334 }, {
2335 .subvendor = 0xb022,
2336 .subdevice = 0x3022,
2337 .card = CX88_BOARD_PROF_6200,
2338 }, {
2264 .subvendor = 0xB033, 2339 .subvendor = 0xB033,
2265 .subdevice = 0x3033, 2340 .subdevice = 0x3033,
2266 .card = CX88_BOARD_PROF_7300, 2341 .card = CX88_BOARD_PROF_7300,
2342 }, {
2343 .subvendor = 0xb200,
2344 .subdevice = 0x4200,
2345 .card = CX88_BOARD_SATTRADE_ST4200,
2267 }, 2346 },
2268}; 2347};
2269 2348
@@ -2874,8 +2953,11 @@ static void cx88_card_setup(struct cx88_core *core)
2874 case CX88_BOARD_TEVII_S420: 2953 case CX88_BOARD_TEVII_S420:
2875 case CX88_BOARD_TEVII_S460: 2954 case CX88_BOARD_TEVII_S460:
2876 case CX88_BOARD_OMICOM_SS4_PCI: 2955 case CX88_BOARD_OMICOM_SS4_PCI:
2956 case CX88_BOARD_TBS_8910:
2877 case CX88_BOARD_TBS_8920: 2957 case CX88_BOARD_TBS_8920:
2958 case CX88_BOARD_PROF_6200:
2878 case CX88_BOARD_PROF_7300: 2959 case CX88_BOARD_PROF_7300:
2960 case CX88_BOARD_SATTRADE_ST4200:
2879 cx_write(MO_SRST_IO, 0); 2961 cx_write(MO_SRST_IO, 0);
2880 msleep(100); 2962 msleep(100);
2881 cx_write(MO_SRST_IO, 1); 2963 cx_write(MO_SRST_IO, 1);
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index 60705b08bfe8..b045874ad04f 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -844,6 +844,9 @@ static int set_tvaudio(struct cx88_core *core)
844 } else if (V4L2_STD_SECAM_L & norm) { 844 } else if (V4L2_STD_SECAM_L & norm) {
845 core->tvaudio = WW_L; 845 core->tvaudio = WW_L;
846 846
847 } else if ((V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H) & norm) {
848 core->tvaudio = WW_BG;
849
847 } else if (V4L2_STD_SECAM_DK & norm) { 850 } else if (V4L2_STD_SECAM_DK & norm) {
848 core->tvaudio = WW_DK; 851 core->tvaudio = WW_DK;
849 852
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 309ca5e68063..da4dd4913d9f 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -406,7 +406,7 @@ static int tevii_dvbs_set_voltage(struct dvb_frontend *fe,
406 cx_write(MO_GP0_IO, 0x00006060); 406 cx_write(MO_GP0_IO, 0x00006060);
407 break; 407 break;
408 case SEC_VOLTAGE_OFF: 408 case SEC_VOLTAGE_OFF:
409 printk("LNB Voltage SEC_VOLTAGE_off\n"); 409 printk("LNB Voltage SEC_VOLTAGE_off\n");
410 break; 410 break;
411 } 411 }
412 412
@@ -606,7 +606,7 @@ static int dvb_register(struct cx8802_dev *dev)
606 /* Get the first frontend */ 606 /* Get the first frontend */
607 fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1); 607 fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
608 if (!fe0) 608 if (!fe0)
609 return -EINVAL; 609 goto frontend_detach;
610 610
611 /* multi-frontend gate control is undefined or defaults to fe0 */ 611 /* multi-frontend gate control is undefined or defaults to fe0 */
612 dev->frontends.gate = 0; 612 dev->frontends.gate = 0;
@@ -653,38 +653,35 @@ static int dvb_register(struct cx8802_dev *dev)
653 } 653 }
654 break; 654 break;
655 case CX88_BOARD_HAUPPAUGE_HVR3000: 655 case CX88_BOARD_HAUPPAUGE_HVR3000:
656 /* MFE frontend 1 */
657 mfe_shared = 1;
658 dev->frontends.gate = 2;
656 /* DVB-S init */ 659 /* DVB-S init */
657 fe0->dvb.frontend = dvb_attach(cx24123_attach, 660 fe0->dvb.frontend = dvb_attach(cx24123_attach,
658 &hauppauge_novas_config, 661 &hauppauge_novas_config,
659 &dev->core->i2c_adap); 662 &dev->core->i2c_adap);
660 if (fe0->dvb.frontend) { 663 if (fe0->dvb.frontend) {
661 if (!dvb_attach(isl6421_attach, fe0->dvb.frontend, 664 if (!dvb_attach(isl6421_attach,
662 &dev->core->i2c_adap, 0x08, ISL6421_DCL, 0x00)) { 665 fe0->dvb.frontend,
663 dprintk( 1, "%s(): HVR3000 - DVB-S LNB Init: failed\n", __func__); 666 &dev->core->i2c_adap,
664 } 667 0x08, ISL6421_DCL, 0x00))
665 } else { 668 goto frontend_detach;
666 dprintk( 1, "%s(): HVR3000 - DVB-S Init: failed\n", __func__);
667 } 669 }
668 /* DVB-T init */ 670 /* MFE frontend 2 */
669 fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2); 671 fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2);
670 if (fe1) { 672 if (!fe1)
671 dev->frontends.gate = 2; 673 goto frontend_detach;
672 mfe_shared = 1; 674 /* DVB-T init */
673 fe1->dvb.frontend = dvb_attach(cx22702_attach, 675 fe1->dvb.frontend = dvb_attach(cx22702_attach,
674 &hauppauge_hvr_config, 676 &hauppauge_hvr_config,
675 &dev->core->i2c_adap); 677 &dev->core->i2c_adap);
676 if (fe1->dvb.frontend) { 678 if (fe1->dvb.frontend) {
677 fe1->dvb.frontend->id = 1; 679 fe1->dvb.frontend->id = 1;
678 if(!dvb_attach(simple_tuner_attach, fe1->dvb.frontend, 680 if (!dvb_attach(simple_tuner_attach,
679 &dev->core->i2c_adap, 0x61, 681 fe1->dvb.frontend,
680 TUNER_PHILIPS_FMD1216ME_MK3)) { 682 &dev->core->i2c_adap,
681 dprintk( 1, "%s(): HVR3000 - DVB-T misc Init: failed\n", __func__); 683 0x61, TUNER_PHILIPS_FMD1216ME_MK3))
682 } 684 goto frontend_detach;
683 } else {
684 dprintk( 1, "%s(): HVR3000 - DVB-T Init: failed\n", __func__);
685 }
686 } else {
687 dprintk( 1, "%s(): HVR3000 - DVB-T Init: can't find frontend 2.\n", __func__);
688 } 685 }
689 break; 686 break;
690 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS: 687 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
@@ -998,50 +995,51 @@ static int dvb_register(struct cx8802_dev *dev)
998 } 995 }
999 break; 996 break;
1000 case CX88_BOARD_HAUPPAUGE_HVR4000: 997 case CX88_BOARD_HAUPPAUGE_HVR4000:
998 /* MFE frontend 1 */
999 mfe_shared = 1;
1000 dev->frontends.gate = 2;
1001 /* DVB-S/S2 Init */ 1001 /* DVB-S/S2 Init */
1002 fe0->dvb.frontend = dvb_attach(cx24116_attach, 1002 fe0->dvb.frontend = dvb_attach(cx24116_attach,
1003 &hauppauge_hvr4000_config, 1003 &hauppauge_hvr4000_config,
1004 &dev->core->i2c_adap); 1004 &dev->core->i2c_adap);
1005 if (fe0->dvb.frontend) { 1005 if (fe0->dvb.frontend) {
1006 if(!dvb_attach(isl6421_attach, fe0->dvb.frontend, 1006 if (!dvb_attach(isl6421_attach,
1007 &dev->core->i2c_adap, 0x08, ISL6421_DCL, 0x00)) { 1007 fe0->dvb.frontend,
1008 dprintk( 1, "%s(): HVR4000 - DVB-S LNB Init: failed\n", __func__); 1008 &dev->core->i2c_adap,
1009 } 1009 0x08, ISL6421_DCL, 0x00))
1010 } else { 1010 goto frontend_detach;
1011 dprintk( 1, "%s(): HVR4000 - DVB-S Init: failed\n", __func__);
1012 } 1011 }
1013 /* DVB-T Init */ 1012 /* MFE frontend 2 */
1014 fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2); 1013 fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2);
1015 if (fe1) { 1014 if (!fe1)
1016 dev->frontends.gate = 2; 1015 goto frontend_detach;
1017 mfe_shared = 1; 1016 /* DVB-T Init */
1018 fe1->dvb.frontend = dvb_attach(cx22702_attach, 1017 fe1->dvb.frontend = dvb_attach(cx22702_attach,
1019 &hauppauge_hvr_config, 1018 &hauppauge_hvr_config,
1020 &dev->core->i2c_adap); 1019 &dev->core->i2c_adap);
1021 if (fe1->dvb.frontend) { 1020 if (fe1->dvb.frontend) {
1022 fe1->dvb.frontend->id = 1; 1021 fe1->dvb.frontend->id = 1;
1023 if(!dvb_attach(simple_tuner_attach, fe1->dvb.frontend, 1022 if (!dvb_attach(simple_tuner_attach,
1024 &dev->core->i2c_adap, 0x61, 1023 fe1->dvb.frontend,
1025 TUNER_PHILIPS_FMD1216ME_MK3)) { 1024 &dev->core->i2c_adap,
1026 dprintk( 1, "%s(): HVR4000 - DVB-T misc Init: failed\n", __func__); 1025 0x61, TUNER_PHILIPS_FMD1216ME_MK3))
1027 } 1026 goto frontend_detach;
1028 } else {
1029 dprintk( 1, "%s(): HVR4000 - DVB-T Init: failed\n", __func__);
1030 }
1031 } else {
1032 dprintk( 1, "%s(): HVR4000 - DVB-T Init: can't find frontend 2.\n", __func__);
1033 } 1027 }
1034 break; 1028 break;
1035 case CX88_BOARD_HAUPPAUGE_HVR4000LITE: 1029 case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
1036 fe0->dvb.frontend = dvb_attach(cx24116_attach, 1030 fe0->dvb.frontend = dvb_attach(cx24116_attach,
1037 &hauppauge_hvr4000_config, 1031 &hauppauge_hvr4000_config,
1038 &dev->core->i2c_adap); 1032 &dev->core->i2c_adap);
1039 if (fe0->dvb.frontend) { 1033 if (fe0->dvb.frontend) {
1040 dvb_attach(isl6421_attach, fe0->dvb.frontend, 1034 if (!dvb_attach(isl6421_attach,
1041 &dev->core->i2c_adap, 1035 fe0->dvb.frontend,
1042 0x08, ISL6421_DCL, 0x00); 1036 &dev->core->i2c_adap,
1037 0x08, ISL6421_DCL, 0x00))
1038 goto frontend_detach;
1043 } 1039 }
1044 break; 1040 break;
1041 case CX88_BOARD_PROF_6200:
1042 case CX88_BOARD_TBS_8910:
1045 case CX88_BOARD_TEVII_S420: 1043 case CX88_BOARD_TEVII_S420:
1046 fe0->dvb.frontend = dvb_attach(stv0299_attach, 1044 fe0->dvb.frontend = dvb_attach(stv0299_attach,
1047 &tevii_tuner_sharp_config, 1045 &tevii_tuner_sharp_config,
@@ -1070,21 +1068,18 @@ static int dvb_register(struct cx8802_dev *dev)
1070 fe0->dvb.frontend = dvb_attach(cx24116_attach, 1068 fe0->dvb.frontend = dvb_attach(cx24116_attach,
1071 &tevii_s460_config, 1069 &tevii_s460_config,
1072 &core->i2c_adap); 1070 &core->i2c_adap);
1073 if (fe0->dvb.frontend != NULL) { 1071 if (fe0->dvb.frontend != NULL)
1074 core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
1075 fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage; 1072 fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
1076 }
1077 break; 1073 break;
1078 case CX88_BOARD_OMICOM_SS4_PCI: 1074 case CX88_BOARD_OMICOM_SS4_PCI:
1079 case CX88_BOARD_TBS_8920: 1075 case CX88_BOARD_TBS_8920:
1080 case CX88_BOARD_PROF_7300: 1076 case CX88_BOARD_PROF_7300:
1077 case CX88_BOARD_SATTRADE_ST4200:
1081 fe0->dvb.frontend = dvb_attach(cx24116_attach, 1078 fe0->dvb.frontend = dvb_attach(cx24116_attach,
1082 &hauppauge_hvr4000_config, 1079 &hauppauge_hvr4000_config,
1083 &core->i2c_adap); 1080 &core->i2c_adap);
1084 if (fe0->dvb.frontend != NULL) { 1081 if (fe0->dvb.frontend != NULL)
1085 core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
1086 fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage; 1082 fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
1087 }
1088 break; 1083 break;
1089 default: 1084 default:
1090 printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n", 1085 printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
@@ -1092,11 +1087,11 @@ static int dvb_register(struct cx8802_dev *dev)
1092 break; 1087 break;
1093 } 1088 }
1094 1089
1095 if ( (NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend) ) { 1090 if ( (NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend) ) {
1096 printk(KERN_ERR 1091 printk(KERN_ERR
1097 "%s/2: frontend initialization failed\n", 1092 "%s/2: frontend initialization failed\n",
1098 core->name); 1093 core->name);
1099 return -EINVAL; 1094 goto frontend_detach;
1100 } 1095 }
1101 /* define general-purpose callback pointer */ 1096 /* define general-purpose callback pointer */
1102 fe0->dvb.frontend->callback = cx88_tuner_callback; 1097 fe0->dvb.frontend->callback = cx88_tuner_callback;
@@ -1133,40 +1128,44 @@ static int cx8802_dvb_advise_acquire(struct cx8802_driver *drv)
1133 * on the bus. Take the bus from the cx23416 and enable the 1128 * on the bus. Take the bus from the cx23416 and enable the
1134 * cx22702 demod 1129 * cx22702 demod
1135 */ 1130 */
1136 cx_set(MO_GP0_IO, 0x00000080); /* cx22702 out of reset and enable */ 1131 /* Toggle reset on cx22702 leaving i2c active */
1132 cx_set(MO_GP0_IO, 0x00000080);
1133 udelay(1000);
1134 cx_clear(MO_GP0_IO, 0x00000080);
1135 udelay(50);
1136 cx_set(MO_GP0_IO, 0x00000080);
1137 udelay(1000);
1138 /* enable the cx22702 pins */
1137 cx_clear(MO_GP0_IO, 0x00000004); 1139 cx_clear(MO_GP0_IO, 0x00000004);
1138 udelay(1000); 1140 udelay(1000);
1139 break; 1141 break;
1140 1142
1141 case CX88_BOARD_HAUPPAUGE_HVR3000: 1143 case CX88_BOARD_HAUPPAUGE_HVR3000:
1142 case CX88_BOARD_HAUPPAUGE_HVR4000: 1144 case CX88_BOARD_HAUPPAUGE_HVR4000:
1143 if(core->dvbdev->frontends.active_fe_id == 1) { 1145 /* Toggle reset on cx22702 leaving i2c active */
1144 /* DVB-S/S2 Enabled */ 1146 cx_set(MO_GP0_IO, 0x00000080);
1145 1147 udelay(1000);
1146 /* Toggle reset on cx22702 leaving i2c active */ 1148 cx_clear(MO_GP0_IO, 0x00000080);
1147 cx_write(MO_GP0_IO, (core->board.input[0].gpio0 & 0x0000ff00) | 0x00000080); 1149 udelay(50);
1148 udelay(1000); 1150 cx_set(MO_GP0_IO, 0x00000080);
1149 cx_clear(MO_GP0_IO, 0x00000080); 1151 udelay(1000);
1150 udelay(50); 1152 switch (core->dvbdev->frontends.active_fe_id) {
1151 cx_set(MO_GP0_IO, 0x00000080); /* cx22702 out of reset */ 1153 case 1: /* DVB-S/S2 Enabled */
1152 cx_set(MO_GP0_IO, 0x00000004); /* tri-state the cx22702 pins */ 1154 /* tri-state the cx22702 pins */
1153 udelay(1000); 1155 cx_set(MO_GP0_IO, 0x00000004);
1154 1156 /* Take the cx24116/cx24123 out of reset */
1155 cx_write(MO_SRST_IO, 1); /* Take the cx24116/cx24123 out of reset */ 1157 cx_write(MO_SRST_IO, 1);
1156 core->dvbdev->ts_gen_cntrl = 0x02; /* Parallel IO */ 1158 core->dvbdev->ts_gen_cntrl = 0x02; /* Parallel IO */
1157 } else 1159 break;
1158 if (core->dvbdev->frontends.active_fe_id == 2) { 1160 case 2: /* DVB-T Enabled */
1159 /* DVB-T Enabled */
1160
1161 /* Put the cx24116/cx24123 into reset */ 1161 /* Put the cx24116/cx24123 into reset */
1162 cx_write(MO_SRST_IO, 0); 1162 cx_write(MO_SRST_IO, 0);
1163 1163 /* enable the cx22702 pins */
1164 /* cx22702 out of reset and enable it */
1165 cx_set(MO_GP0_IO, 0x00000080);
1166 cx_clear(MO_GP0_IO, 0x00000004); 1164 cx_clear(MO_GP0_IO, 0x00000004);
1167 core->dvbdev->ts_gen_cntrl = 0x0c; /* Serial IO */ 1165 core->dvbdev->ts_gen_cntrl = 0x0c; /* Serial IO */
1168 udelay(1000); 1166 break;
1169 } 1167 }
1168 udelay(1000);
1170 break; 1169 break;
1171 1170
1172 default: 1171 default:
@@ -1199,8 +1198,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
1199{ 1198{
1200 struct cx88_core *core = drv->core; 1199 struct cx88_core *core = drv->core;
1201 struct cx8802_dev *dev = drv->core->dvbdev; 1200 struct cx8802_dev *dev = drv->core->dvbdev;
1202 int err, i; 1201 int err;
1203 struct videobuf_dvb_frontend *fe;
1204 1202
1205 dprintk( 1, "%s\n", __func__); 1203 dprintk( 1, "%s\n", __func__);
1206 dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n", 1204 dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
@@ -1216,31 +1214,47 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
1216 /* If vp3054 isn't enabled, a stub will just return 0 */ 1214 /* If vp3054 isn't enabled, a stub will just return 0 */
1217 err = vp3054_i2c_probe(dev); 1215 err = vp3054_i2c_probe(dev);
1218 if (0 != err) 1216 if (0 != err)
1219 goto fail_core; 1217 goto fail_probe;
1220 1218
1221 /* dvb stuff */ 1219 /* dvb stuff */
1222 printk(KERN_INFO "%s/2: cx2388x based DVB/ATSC card\n", core->name); 1220 printk(KERN_INFO "%s/2: cx2388x based DVB/ATSC card\n", core->name);
1223 dev->ts_gen_cntrl = 0x0c; 1221 dev->ts_gen_cntrl = 0x0c;
1224 1222
1225 for (i = 1; i <= core->board.num_frontends; i++) { 1223 err = -ENODEV;
1226 fe = videobuf_dvb_get_frontend(&core->dvbdev->frontends, i); 1224 if (core->board.num_frontends) {
1227 if (!fe) { 1225 struct videobuf_dvb_frontend *fe;
1228 printk(KERN_ERR "%s() failed to get frontend(%d)\n", __func__, i); 1226 int i;
1229 continue; 1227
1228 for (i = 1; i <= core->board.num_frontends; i++) {
1229 fe = videobuf_dvb_get_frontend(&core->dvbdev->frontends, i);
1230 if (fe == NULL) {
1231 printk(KERN_ERR "%s() failed to get frontend(%d)\n",
1232 __func__, i);
1233 goto fail_probe;
1234 }
1235 videobuf_queue_sg_init(&fe->dvb.dvbq, &dvb_qops,
1236 &dev->pci->dev, &dev->slock,
1237 V4L2_BUF_TYPE_VIDEO_CAPTURE,
1238 V4L2_FIELD_TOP,
1239 sizeof(struct cx88_buffer),
1240 dev);
1241 /* init struct videobuf_dvb */
1242 fe->dvb.name = dev->core->name;
1230 } 1243 }
1231 videobuf_queue_sg_init(&fe->dvb.dvbq, &dvb_qops, 1244 } else {
1232 &dev->pci->dev, &dev->slock, 1245 /* no frontends allocated */
1233 V4L2_BUF_TYPE_VIDEO_CAPTURE, 1246 printk(KERN_ERR "%s/2 .num_frontends should be non-zero\n",
1234 V4L2_FIELD_TOP, 1247 core->name);
1235 sizeof(struct cx88_buffer), 1248 goto fail_core;
1236 dev);
1237 /* init struct videobuf_dvb */
1238 fe->dvb.name = dev->core->name;
1239 } 1249 }
1240 err = dvb_register(dev); 1250 err = dvb_register(dev);
1241 if (err != 0) 1251 if (err)
1252 /* frontends/adapter de-allocated in dvb_register */
1242 printk(KERN_ERR "%s/2: dvb_register failed (err = %d)\n", 1253 printk(KERN_ERR "%s/2: dvb_register failed (err = %d)\n",
1243 core->name, err); 1254 core->name, err);
1255 return err;
1256fail_probe:
1257 videobuf_dvb_dealloc_frontends(&core->dvbdev->frontends);
1244fail_core: 1258fail_core:
1245 return err; 1259 return err;
1246} 1260}
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 3ebdcd1d83f8..a04fee235db6 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -3,7 +3,7 @@
3 * Support for the mpeg transport stream transfers 3 * Support for the mpeg transport stream transfers
4 * PCI function #2 of the cx2388x. 4 * PCI function #2 of the cx2388x.
5 * 5 *
6 * (c) 2004 Jelle Foks <jelle@foks.8m.com> 6 * (c) 2004 Jelle Foks <jelle@foks.us>
7 * (c) 2004 Chris Pascoe <c.pascoe@itee.uq.edu.au> 7 * (c) 2004 Chris Pascoe <c.pascoe@itee.uq.edu.au>
8 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> 8 * (c) 2004 Gerd Knorr <kraxel@bytesex.org>
9 * 9 *
@@ -34,7 +34,7 @@
34/* ------------------------------------------------------------------ */ 34/* ------------------------------------------------------------------ */
35 35
36MODULE_DESCRIPTION("mpeg driver for cx2388x based TV cards"); 36MODULE_DESCRIPTION("mpeg driver for cx2388x based TV cards");
37MODULE_AUTHOR("Jelle Foks <jelle@foks.8m.com>"); 37MODULE_AUTHOR("Jelle Foks <jelle@foks.us>");
38MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>"); 38MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
39MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); 39MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
40MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index f4240965be32..20649b25f7ba 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -53,12 +53,11 @@
53/* ----------------------------------------------------------- */ 53/* ----------------------------------------------------------- */
54/* defines and enums */ 54/* defines and enums */
55 55
56/* Currently unsupported by the driver: PAL/H, NTSC/Kr, SECAM B/G/H/LC */ 56/* Currently unsupported by the driver: PAL/H, NTSC/Kr, SECAM/LC */
57#define CX88_NORMS (\ 57#define CX88_NORMS (V4L2_STD_ALL \
58 V4L2_STD_NTSC_M| V4L2_STD_NTSC_M_JP| V4L2_STD_NTSC_443 | \ 58 & ~V4L2_STD_PAL_H \
59 V4L2_STD_PAL_BG| V4L2_STD_PAL_DK | V4L2_STD_PAL_I | \ 59 & ~V4L2_STD_NTSC_M_KR \
60 V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | \ 60 & ~V4L2_STD_SECAM_LC)
61 V4L2_STD_PAL_60| V4L2_STD_SECAM_L | V4L2_STD_SECAM_DK )
62 61
63#define FORMAT_FLAGS_PACKED 0x01 62#define FORMAT_FLAGS_PACKED 0x01
64#define FORMAT_FLAGS_PLANAR 0x02 63#define FORMAT_FLAGS_PLANAR 0x02
@@ -229,6 +228,9 @@ extern struct sram_channel cx88_sram_channels[];
229#define CX88_BOARD_TEVII_S420 73 228#define CX88_BOARD_TEVII_S420 73
230#define CX88_BOARD_PROLINK_PV_GLOBAL_XTREME 74 229#define CX88_BOARD_PROLINK_PV_GLOBAL_XTREME 74
231#define CX88_BOARD_PROF_7300 75 230#define CX88_BOARD_PROF_7300 75
231#define CX88_BOARD_SATTRADE_ST4200 76
232#define CX88_BOARD_TBS_8910 77
233#define CX88_BOARD_PROF_6200 78
232 234
233enum cx88_itype { 235enum cx88_itype {
234 CX88_VMUX_COMPOSITE1 = 1, 236 CX88_VMUX_COMPOSITE1 = 1,
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 7a8d49ef646e..15c03f0e69ad 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -424,11 +424,12 @@ static int em28xx_audio_init(struct em28xx *dev)
424 struct snd_pcm *pcm; 424 struct snd_pcm *pcm;
425 struct snd_card *card; 425 struct snd_card *card;
426 static int devnr; 426 static int devnr;
427 int ret, err; 427 int err;
428 428
429 if (dev->has_audio_class) { 429 if (dev->has_alsa_audio != 1) {
430 /* This device does not support the extension (in this case 430 /* This device does not support the extension (in this case
431 the device is expecting the snd-usb-audio module */ 431 the device is expecting the snd-usb-audio module or
432 doesn't have analog audio support at all) */
432 return 0; 433 return 0;
433 } 434 }
434 435
@@ -449,7 +450,12 @@ static int em28xx_audio_init(struct em28xx *dev)
449 } 450 }
450 451
451 spin_lock_init(&adev->slock); 452 spin_lock_init(&adev->slock);
452 ret = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm); 453 err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm);
454 if (err < 0) {
455 snd_card_free(card);
456 return err;
457 }
458
453 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_em28xx_pcm_capture); 459 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_em28xx_pcm_capture);
454 pcm->info_flags = 0; 460 pcm->info_flags = 0;
455 pcm->private_data = dev; 461 pcm->private_data = dev;
@@ -461,7 +467,7 @@ static int em28xx_audio_init(struct em28xx *dev)
461 err = snd_card_register(card); 467 err = snd_card_register(card);
462 if (err < 0) { 468 if (err < 0) {
463 snd_card_free(card); 469 snd_card_free(card);
464 return -ENOMEM; 470 return err;
465 } 471 }
466 adev->sndcard = card; 472 adev->sndcard = card;
467 adev->udev = dev->udev; 473 adev->udev = dev->udev;
@@ -475,9 +481,10 @@ static int em28xx_audio_fini(struct em28xx *dev)
475 if (dev == NULL) 481 if (dev == NULL)
476 return 0; 482 return 0;
477 483
478 if (dev->has_audio_class) { 484 if (dev->has_alsa_audio != 1) {
479 /* This device does not support the extension (in this case 485 /* This device does not support the extension (in this case
480 the device is expecting the snd-usb-audio module */ 486 the device is expecting the snd-usb-audio module or
487 doesn't have analog audio support at all) */
481 return 0; 488 return 0;
482 } 489 }
483 490
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index d65d0572403b..e776699b62f9 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -37,6 +37,8 @@
37 37
38#include "em28xx.h" 38#include "em28xx.h"
39 39
40#define DRIVER_NAME "em28xx"
41
40static int tuner = -1; 42static int tuner = -1;
41module_param(tuner, int, 0444); 43module_param(tuner, int, 0444);
42MODULE_PARM_DESC(tuner, "tuner type"); 44MODULE_PARM_DESC(tuner, "tuner type");
@@ -45,122 +47,177 @@ static unsigned int disable_ir;
45module_param(disable_ir, int, 0444); 47module_param(disable_ir, int, 0444);
46MODULE_PARM_DESC(disable_ir, "disable infrared remote support"); 48MODULE_PARM_DESC(disable_ir, "disable infrared remote support");
47 49
50static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
51module_param_array(card, int, NULL, 0444);
52MODULE_PARM_DESC(card, "card type");
53
54/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS */
55static unsigned long em28xx_devused;
56
48struct em28xx_hash_table { 57struct em28xx_hash_table {
49 unsigned long hash; 58 unsigned long hash;
50 unsigned int model; 59 unsigned int model;
51 unsigned int tuner; 60 unsigned int tuner;
52}; 61};
53 62
63/*
64 * Reset sequences for analog/digital modes
65 */
66
67/* Reset for the most [analog] boards */
68static struct em28xx_reg_seq default_analog[] = {
69 {EM28XX_R08_GPIO, 0x6d, ~EM_GPIO_4, 10},
70 { -1, -1, -1, -1},
71};
72
73/* Reset for the most [digital] boards */
74static struct em28xx_reg_seq default_digital[] = {
75 {EM28XX_R08_GPIO, 0x6e, ~EM_GPIO_4, 10},
76 { -1, -1, -1, -1},
77};
78
79/* Board Hauppauge WinTV HVR 900 analog */
80static struct em28xx_reg_seq hauppauge_wintv_hvr_900_analog[] = {
81 {EM28XX_R08_GPIO, 0x2d, ~EM_GPIO_4, 10},
82 {0x05, 0xff, 0x10, 10},
83 { -1, -1, -1, -1},
84};
85
86/* Board Hauppauge WinTV HVR 900 digital */
87static struct em28xx_reg_seq hauppauge_wintv_hvr_900_digital[] = {
88 {EM28XX_R08_GPIO, 0x2e, ~EM_GPIO_4, 10},
89 {EM2880_R04_GPO, 0x04, 0x0f, 10},
90 {EM2880_R04_GPO, 0x0c, 0x0f, 10},
91 { -1, -1, -1, -1},
92};
93
94/* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */
95static struct em28xx_reg_seq em2880_msi_digivox_ad_analog[] = {
96 {EM28XX_R08_GPIO, 0x69, ~EM_GPIO_4, 10},
97 { -1, -1, -1, -1},
98};
99
100/* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */
101
102/* Board - EM2870 Kworld 355u
103 Analog - No input analog */
104
105/* Callback for the most boards */
106static struct em28xx_reg_seq default_tuner_gpio[] = {
107 {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10},
108 {EM28XX_R08_GPIO, 0, EM_GPIO_4, 10},
109 {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10},
110 { -1, -1, -1, -1},
111};
112
113/*
114 * Board definitions
115 */
54struct em28xx_board em28xx_boards[] = { 116struct em28xx_board em28xx_boards[] = {
55 [EM2750_BOARD_UNKNOWN] = { 117 [EM2750_BOARD_UNKNOWN] = {
56 .name = "Unknown EM2750/EM2751 webcam grabber", 118 .name = "Unknown EM2750/EM2751 webcam grabber",
57 .vchannels = 1, 119 .xclk = EM28XX_XCLK_FREQUENCY_48MHZ,
120 .tuner_type = TUNER_ABSENT, /* This is a webcam */
58 .input = { { 121 .input = { {
59 .type = EM28XX_VMUX_COMPOSITE1, 122 .type = EM28XX_VMUX_COMPOSITE1,
60 .vmux = 0, 123 .vmux = 0,
61 .amux = 0, 124 .amux = EM28XX_AMUX_VIDEO,
62 } }, 125 } },
63 }, 126 },
64 [EM2800_BOARD_UNKNOWN] = { 127 [EM2800_BOARD_UNKNOWN] = {
65 .name = "Unknown EM2800 video grabber", 128 .name = "Unknown EM2800 video grabber",
66 .is_em2800 = 1, 129 .is_em2800 = 1,
67 .vchannels = 2,
68 .tda9887_conf = TDA9887_PRESENT, 130 .tda9887_conf = TDA9887_PRESENT,
69 .decoder = EM28XX_SAA7113, 131 .decoder = EM28XX_SAA711X,
70 .input = { { 132 .tuner_type = TUNER_ABSENT,
133 .input = { {
71 .type = EM28XX_VMUX_COMPOSITE1, 134 .type = EM28XX_VMUX_COMPOSITE1,
72 .vmux = SAA7115_COMPOSITE0, 135 .vmux = SAA7115_COMPOSITE0,
73 .amux = 1, 136 .amux = EM28XX_AMUX_LINE_IN,
74 }, { 137 }, {
75 .type = EM28XX_VMUX_SVIDEO, 138 .type = EM28XX_VMUX_SVIDEO,
76 .vmux = SAA7115_SVIDEO3, 139 .vmux = SAA7115_SVIDEO3,
77 .amux = 1, 140 .amux = EM28XX_AMUX_LINE_IN,
78 } }, 141 } },
79 }, 142 },
80 [EM2820_BOARD_UNKNOWN] = { 143 [EM2820_BOARD_UNKNOWN] = {
81 .name = "Unknown EM2750/28xx video grabber", 144 .name = "Unknown EM2750/28xx video grabber",
82 .is_em2800 = 0, 145 .tuner_type = TUNER_ABSENT,
83 .tuner_type = TUNER_ABSENT,
84 }, 146 },
85 [EM2750_BOARD_DLCW_130] = { 147 [EM2750_BOARD_DLCW_130] = {
86 /* Beijing Huaqi Information Digital Technology Co., Ltd */ 148 /* Beijing Huaqi Information Digital Technology Co., Ltd */
87 .name = "Huaqi DLCW-130", 149 .name = "Huaqi DLCW-130",
88 .valid = EM28XX_BOARD_NOT_VALIDATED, 150 .valid = EM28XX_BOARD_NOT_VALIDATED,
89 .vchannels = 1, 151 .xclk = EM28XX_XCLK_FREQUENCY_48MHZ,
152 .tuner_type = TUNER_ABSENT, /* This is a webcam */
90 .input = { { 153 .input = { {
91 .type = EM28XX_VMUX_COMPOSITE1, 154 .type = EM28XX_VMUX_COMPOSITE1,
92 .vmux = 0, 155 .vmux = 0,
93 .amux = 0, 156 .amux = EM28XX_AMUX_VIDEO,
94 } }, 157 } },
95 }, 158 },
96 [EM2820_BOARD_KWORLD_PVRTV2800RF] = { 159 [EM2820_BOARD_KWORLD_PVRTV2800RF] = {
97 .name = "Kworld PVR TV 2800 RF", 160 .name = "Kworld PVR TV 2800 RF",
98 .is_em2800 = 0,
99 .vchannels = 2,
100 .tuner_type = TUNER_TEMIC_PAL, 161 .tuner_type = TUNER_TEMIC_PAL,
101 .tda9887_conf = TDA9887_PRESENT, 162 .tda9887_conf = TDA9887_PRESENT,
102 .decoder = EM28XX_SAA7113, 163 .decoder = EM28XX_SAA711X,
103 .input = { { 164 .input = { {
104 .type = EM28XX_VMUX_COMPOSITE1, 165 .type = EM28XX_VMUX_COMPOSITE1,
105 .vmux = SAA7115_COMPOSITE0, 166 .vmux = SAA7115_COMPOSITE0,
106 .amux = 1, 167 .amux = EM28XX_AMUX_LINE_IN,
107 }, { 168 }, {
108 .type = EM28XX_VMUX_SVIDEO, 169 .type = EM28XX_VMUX_SVIDEO,
109 .vmux = SAA7115_SVIDEO3, 170 .vmux = SAA7115_SVIDEO3,
110 .amux = 1, 171 .amux = EM28XX_AMUX_LINE_IN,
111 } }, 172 } },
112 }, 173 },
113 [EM2820_BOARD_TERRATEC_CINERGY_250] = { 174 [EM2820_BOARD_TERRATEC_CINERGY_250] = {
114 .name = "Terratec Cinergy 250 USB", 175 .name = "Terratec Cinergy 250 USB",
115 .vchannels = 3,
116 .tuner_type = TUNER_LG_PAL_NEW_TAPC, 176 .tuner_type = TUNER_LG_PAL_NEW_TAPC,
117 .tda9887_conf = TDA9887_PRESENT, 177 .tda9887_conf = TDA9887_PRESENT,
118 .decoder = EM28XX_SAA7113, 178 .decoder = EM28XX_SAA711X,
119 .input = { { 179 .input = { {
120 .type = EM28XX_VMUX_TELEVISION, 180 .type = EM28XX_VMUX_TELEVISION,
121 .vmux = SAA7115_COMPOSITE2, 181 .vmux = SAA7115_COMPOSITE2,
122 .amux = 1, 182 .amux = EM28XX_AMUX_LINE_IN,
123 }, { 183 }, {
124 .type = EM28XX_VMUX_COMPOSITE1, 184 .type = EM28XX_VMUX_COMPOSITE1,
125 .vmux = SAA7115_COMPOSITE0, 185 .vmux = SAA7115_COMPOSITE0,
126 .amux = 1, 186 .amux = EM28XX_AMUX_LINE_IN,
127 }, { 187 }, {
128 .type = EM28XX_VMUX_SVIDEO, 188 .type = EM28XX_VMUX_SVIDEO,
129 .vmux = SAA7115_SVIDEO3, 189 .vmux = SAA7115_SVIDEO3,
130 .amux = 1, 190 .amux = EM28XX_AMUX_LINE_IN,
131 } }, 191 } },
132 }, 192 },
133 [EM2820_BOARD_PINNACLE_USB_2] = { 193 [EM2820_BOARD_PINNACLE_USB_2] = {
134 .name = "Pinnacle PCTV USB 2", 194 .name = "Pinnacle PCTV USB 2",
135 .vchannels = 3,
136 .tuner_type = TUNER_LG_PAL_NEW_TAPC, 195 .tuner_type = TUNER_LG_PAL_NEW_TAPC,
137 .tda9887_conf = TDA9887_PRESENT, 196 .tda9887_conf = TDA9887_PRESENT,
138 .decoder = EM28XX_SAA7113, 197 .decoder = EM28XX_SAA711X,
139 .input = { { 198 .input = { {
140 .type = EM28XX_VMUX_TELEVISION, 199 .type = EM28XX_VMUX_TELEVISION,
141 .vmux = SAA7115_COMPOSITE2, 200 .vmux = SAA7115_COMPOSITE2,
142 .amux = 0, 201 .amux = EM28XX_AMUX_VIDEO,
143 }, { 202 }, {
144 .type = EM28XX_VMUX_COMPOSITE1, 203 .type = EM28XX_VMUX_COMPOSITE1,
145 .vmux = SAA7115_COMPOSITE0, 204 .vmux = SAA7115_COMPOSITE0,
146 .amux = 1, 205 .amux = EM28XX_AMUX_LINE_IN,
147 }, { 206 }, {
148 .type = EM28XX_VMUX_SVIDEO, 207 .type = EM28XX_VMUX_SVIDEO,
149 .vmux = SAA7115_SVIDEO3, 208 .vmux = SAA7115_SVIDEO3,
150 .amux = 1, 209 .amux = EM28XX_AMUX_LINE_IN,
151 } }, 210 } },
152 }, 211 },
153 [EM2820_BOARD_HAUPPAUGE_WINTV_USB_2] = { 212 [EM2820_BOARD_HAUPPAUGE_WINTV_USB_2] = {
154 .name = "Hauppauge WinTV USB 2", 213 .name = "Hauppauge WinTV USB 2",
155 .vchannels = 3,
156 .tuner_type = TUNER_PHILIPS_FM1236_MK3, 214 .tuner_type = TUNER_PHILIPS_FM1236_MK3,
157 .tda9887_conf = TDA9887_PRESENT | 215 .tda9887_conf = TDA9887_PRESENT |
158 TDA9887_PORT1_ACTIVE| 216 TDA9887_PORT1_ACTIVE|
159 TDA9887_PORT2_ACTIVE, 217 TDA9887_PORT2_ACTIVE,
160 .decoder = EM28XX_TVP5150, 218 .decoder = EM28XX_TVP5150,
161 .has_msp34xx = 1, 219 .has_msp34xx = 1,
162 /*FIXME: S-Video not tested */ 220 .input = { {
163 .input = { {
164 .type = EM28XX_VMUX_TELEVISION, 221 .type = EM28XX_VMUX_TELEVISION,
165 .vmux = TVP5150_COMPOSITE0, 222 .vmux = TVP5150_COMPOSITE0,
166 .amux = MSP_INPUT_DEFAULT, 223 .amux = MSP_INPUT_DEFAULT,
@@ -174,327 +231,305 @@ struct em28xx_board em28xx_boards[] = {
174 [EM2820_BOARD_DLINK_USB_TV] = { 231 [EM2820_BOARD_DLINK_USB_TV] = {
175 .name = "D-Link DUB-T210 TV Tuner", 232 .name = "D-Link DUB-T210 TV Tuner",
176 .valid = EM28XX_BOARD_NOT_VALIDATED, 233 .valid = EM28XX_BOARD_NOT_VALIDATED,
177 .vchannels = 3,
178 .is_em2800 = 0,
179 .tuner_type = TUNER_LG_PAL_NEW_TAPC, 234 .tuner_type = TUNER_LG_PAL_NEW_TAPC,
180 .tda9887_conf = TDA9887_PRESENT, 235 .tda9887_conf = TDA9887_PRESENT,
181 .decoder = EM28XX_SAA7113, 236 .decoder = EM28XX_SAA711X,
182 .input = { { 237 .input = { {
183 .type = EM28XX_VMUX_TELEVISION, 238 .type = EM28XX_VMUX_TELEVISION,
184 .vmux = SAA7115_COMPOSITE2, 239 .vmux = SAA7115_COMPOSITE2,
185 .amux = 1, 240 .amux = EM28XX_AMUX_LINE_IN,
186 }, { 241 }, {
187 .type = EM28XX_VMUX_COMPOSITE1, 242 .type = EM28XX_VMUX_COMPOSITE1,
188 .vmux = SAA7115_COMPOSITE0, 243 .vmux = SAA7115_COMPOSITE0,
189 .amux = 1, 244 .amux = EM28XX_AMUX_LINE_IN,
190 }, { 245 }, {
191 .type = EM28XX_VMUX_SVIDEO, 246 .type = EM28XX_VMUX_SVIDEO,
192 .vmux = SAA7115_SVIDEO3, 247 .vmux = SAA7115_SVIDEO3,
193 .amux = 1, 248 .amux = EM28XX_AMUX_LINE_IN,
194 } }, 249 } },
195 }, 250 },
196 [EM2820_BOARD_HERCULES_SMART_TV_USB2] = { 251 [EM2820_BOARD_HERCULES_SMART_TV_USB2] = {
197 .name = "Hercules Smart TV USB 2.0", 252 .name = "Hercules Smart TV USB 2.0",
198 .valid = EM28XX_BOARD_NOT_VALIDATED, 253 .valid = EM28XX_BOARD_NOT_VALIDATED,
199 .vchannels = 3,
200 .tuner_type = TUNER_LG_PAL_NEW_TAPC, 254 .tuner_type = TUNER_LG_PAL_NEW_TAPC,
201 .tda9887_conf = TDA9887_PRESENT, 255 .tda9887_conf = TDA9887_PRESENT,
202 .decoder = EM28XX_SAA7113, 256 .decoder = EM28XX_SAA711X,
203 .input = { { 257 .input = { {
204 .type = EM28XX_VMUX_TELEVISION, 258 .type = EM28XX_VMUX_TELEVISION,
205 .vmux = SAA7115_COMPOSITE2, 259 .vmux = SAA7115_COMPOSITE2,
206 .amux = 1, 260 .amux = EM28XX_AMUX_LINE_IN,
207 }, { 261 }, {
208 .type = EM28XX_VMUX_COMPOSITE1, 262 .type = EM28XX_VMUX_COMPOSITE1,
209 .vmux = SAA7115_COMPOSITE0, 263 .vmux = SAA7115_COMPOSITE0,
210 .amux = 1, 264 .amux = EM28XX_AMUX_LINE_IN,
211 }, { 265 }, {
212 .type = EM28XX_VMUX_SVIDEO, 266 .type = EM28XX_VMUX_SVIDEO,
213 .vmux = SAA7115_SVIDEO3, 267 .vmux = SAA7115_SVIDEO3,
214 .amux = 1, 268 .amux = EM28XX_AMUX_LINE_IN,
215 } }, 269 } },
216 }, 270 },
217 [EM2820_BOARD_PINNACLE_USB_2_FM1216ME] = { 271 [EM2820_BOARD_PINNACLE_USB_2_FM1216ME] = {
218 .name = "Pinnacle PCTV USB 2 (Philips FM1216ME)", 272 .name = "Pinnacle PCTV USB 2 (Philips FM1216ME)",
219 .valid = EM28XX_BOARD_NOT_VALIDATED, 273 .valid = EM28XX_BOARD_NOT_VALIDATED,
220 .vchannels = 3,
221 .is_em2800 = 0,
222 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, 274 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
223 .tda9887_conf = TDA9887_PRESENT, 275 .tda9887_conf = TDA9887_PRESENT,
224 .decoder = EM28XX_SAA7113, 276 .decoder = EM28XX_SAA711X,
225 .input = { { 277 .input = { {
226 .type = EM28XX_VMUX_TELEVISION, 278 .type = EM28XX_VMUX_TELEVISION,
227 .vmux = SAA7115_COMPOSITE2, 279 .vmux = SAA7115_COMPOSITE2,
228 .amux = 0, 280 .amux = EM28XX_AMUX_VIDEO,
229 }, { 281 }, {
230 .type = EM28XX_VMUX_COMPOSITE1, 282 .type = EM28XX_VMUX_COMPOSITE1,
231 .vmux = SAA7115_COMPOSITE0, 283 .vmux = SAA7115_COMPOSITE0,
232 .amux = 1, 284 .amux = EM28XX_AMUX_LINE_IN,
233 }, { 285 }, {
234 .type = EM28XX_VMUX_SVIDEO, 286 .type = EM28XX_VMUX_SVIDEO,
235 .vmux = SAA7115_SVIDEO3, 287 .vmux = SAA7115_SVIDEO3,
236 .amux = 1, 288 .amux = EM28XX_AMUX_LINE_IN,
237 } }, 289 } },
238 }, 290 },
239 [EM2820_BOARD_GADMEI_UTV310] = { 291 [EM2820_BOARD_GADMEI_UTV310] = {
240 .name = "Gadmei UTV310", 292 .name = "Gadmei UTV310",
241 .valid = EM28XX_BOARD_NOT_VALIDATED, 293 .valid = EM28XX_BOARD_NOT_VALIDATED,
242 .vchannels = 3,
243 .tuner_type = TUNER_TNF_5335MF, 294 .tuner_type = TUNER_TNF_5335MF,
244 .tda9887_conf = TDA9887_PRESENT, 295 .tda9887_conf = TDA9887_PRESENT,
245 .decoder = EM28XX_SAA7113, 296 .decoder = EM28XX_SAA711X,
246 .input = { { 297 .input = { {
247 .type = EM28XX_VMUX_TELEVISION, 298 .type = EM28XX_VMUX_TELEVISION,
248 .vmux = SAA7115_COMPOSITE1, 299 .vmux = SAA7115_COMPOSITE1,
249 .amux = 1, 300 .amux = EM28XX_AMUX_LINE_IN,
250 }, { 301 }, {
251 .type = EM28XX_VMUX_COMPOSITE1, 302 .type = EM28XX_VMUX_COMPOSITE1,
252 .vmux = SAA7115_COMPOSITE0, 303 .vmux = SAA7115_COMPOSITE0,
253 .amux = 1, 304 .amux = EM28XX_AMUX_LINE_IN,
254 }, { 305 }, {
255 .type = EM28XX_VMUX_SVIDEO, 306 .type = EM28XX_VMUX_SVIDEO,
256 .vmux = SAA7115_SVIDEO3, 307 .vmux = SAA7115_SVIDEO3,
257 .amux = 1, 308 .amux = EM28XX_AMUX_LINE_IN,
258 } }, 309 } },
259 }, 310 },
260 [EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE] = { 311 [EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE] = {
261 .name = "Leadtek Winfast USB II Deluxe", 312 .name = "Leadtek Winfast USB II Deluxe",
262 .valid = EM28XX_BOARD_NOT_VALIDATED, 313 .valid = EM28XX_BOARD_NOT_VALIDATED,
263 .vchannels = 3,
264 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, 314 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
265 .tda9887_conf = TDA9887_PRESENT, 315 .tda9887_conf = TDA9887_PRESENT,
266 .decoder = EM28XX_SAA7114, 316 .decoder = EM28XX_SAA711X,
267 .input = { { 317 .input = { {
268 .type = EM28XX_VMUX_TELEVISION, 318 .type = EM28XX_VMUX_TELEVISION,
269 .vmux = 2, 319 .vmux = SAA7115_COMPOSITE2,
270 .amux = 0, 320 .amux = EM28XX_AMUX_VIDEO,
271 }, { 321 }, {
272 .type = EM28XX_VMUX_COMPOSITE1, 322 .type = EM28XX_VMUX_COMPOSITE1,
273 .vmux = 0,
274 .amux = 1,
275 }, {
276 .type = EM28XX_VMUX_SVIDEO,
277 .vmux = 9,
278 .amux = 1,
279 } },
280 },
281 [EM2820_BOARD_PINNACLE_DVC_100] = {
282 .name = "Pinnacle Dazzle DVC 100",
283 .valid = EM28XX_BOARD_NOT_VALIDATED,
284 .vchannels = 3,
285 .decoder = EM28XX_SAA7113,
286 .input = { {
287 .type = EM28XX_VMUX_COMPOSITE1,
288 .vmux = SAA7115_COMPOSITE0, 323 .vmux = SAA7115_COMPOSITE0,
289 .amux = 1, 324 .amux = EM28XX_AMUX_LINE_IN,
290 }, { 325 }, {
291 .type = EM28XX_VMUX_SVIDEO, 326 .type = EM28XX_VMUX_SVIDEO,
292 .vmux = SAA7115_SVIDEO3, 327 .vmux = SAA7115_COMPOSITE0,
293 .amux = 1, 328 .amux = EM28XX_AMUX_LINE_IN,
294 } }, 329 } },
295 }, 330 },
296 [EM2820_BOARD_VIDEOLOGY_20K14XUSB] = { 331 [EM2820_BOARD_VIDEOLOGY_20K14XUSB] = {
297 .name = "Videology 20K14XUSB USB2.0", 332 .name = "Videology 20K14XUSB USB2.0",
298 .valid = EM28XX_BOARD_NOT_VALIDATED, 333 .valid = EM28XX_BOARD_NOT_VALIDATED,
299 .vchannels = 1, 334 .tuner_type = TUNER_ABSENT, /* This is a webcam */
300 .input = { { 335 .input = { {
301 .type = EM28XX_VMUX_COMPOSITE1, 336 .type = EM28XX_VMUX_COMPOSITE1,
302 .vmux = 0, 337 .vmux = 0,
303 .amux = 0, 338 .amux = EM28XX_AMUX_VIDEO,
304 } }, 339 } },
305 }, 340 },
306 [EM2821_BOARD_PROLINK_PLAYTV_USB2] = { 341 [EM2821_BOARD_PROLINK_PLAYTV_USB2] = {
307 .name = "SIIG AVTuner-PVR/Prolink PlayTV USB 2.0", 342 .name = "SIIG AVTuner-PVR/Prolink PlayTV USB 2.0",
308 .valid = EM28XX_BOARD_NOT_VALIDATED, 343 .valid = EM28XX_BOARD_NOT_VALIDATED,
309 .vchannels = 3,
310 .is_em2800 = 0,
311 .tuner_type = TUNER_LG_PAL_NEW_TAPC, /* unknown? */ 344 .tuner_type = TUNER_LG_PAL_NEW_TAPC, /* unknown? */
312 .tda9887_conf = TDA9887_PRESENT, /* unknown? */ 345 .tda9887_conf = TDA9887_PRESENT, /* unknown? */
313 .decoder = EM28XX_SAA7113, 346 .decoder = EM28XX_SAA711X,
314 .input = { { 347 .input = { {
315 .type = EM28XX_VMUX_TELEVISION, 348 .type = EM28XX_VMUX_TELEVISION,
316 .vmux = SAA7115_COMPOSITE2, 349 .vmux = SAA7115_COMPOSITE2,
317 .amux = 1, 350 .amux = EM28XX_AMUX_LINE_IN,
318 }, { 351 }, {
319 .type = EM28XX_VMUX_COMPOSITE1, 352 .type = EM28XX_VMUX_COMPOSITE1,
320 .vmux = SAA7115_COMPOSITE0, 353 .vmux = SAA7115_COMPOSITE0,
321 .amux = 1, 354 .amux = EM28XX_AMUX_LINE_IN,
322 }, { 355 }, {
323 .type = EM28XX_VMUX_SVIDEO, 356 .type = EM28XX_VMUX_SVIDEO,
324 .vmux = SAA7115_SVIDEO3, 357 .vmux = SAA7115_SVIDEO3,
325 .amux = 1, 358 .amux = EM28XX_AMUX_LINE_IN,
326 } }, 359 } },
327 }, 360 },
328 [EM2821_BOARD_SUPERCOMP_USB_2] = { 361 [EM2821_BOARD_SUPERCOMP_USB_2] = {
329 .name = "Supercomp USB 2.0 TV", 362 .name = "Supercomp USB 2.0 TV",
330 .valid = EM28XX_BOARD_NOT_VALIDATED, 363 .valid = EM28XX_BOARD_NOT_VALIDATED,
331 .vchannels = 3,
332 .is_em2800 = 0,
333 .tuner_type = TUNER_PHILIPS_FM1236_MK3, 364 .tuner_type = TUNER_PHILIPS_FM1236_MK3,
334 .tda9887_conf = TDA9887_PRESENT | 365 .tda9887_conf = TDA9887_PRESENT |
335 TDA9887_PORT1_ACTIVE | 366 TDA9887_PORT1_ACTIVE |
336 TDA9887_PORT2_ACTIVE, 367 TDA9887_PORT2_ACTIVE,
337 .decoder = EM28XX_SAA7113, 368 .decoder = EM28XX_SAA711X,
338 .input = { { 369 .input = { {
339 .type = EM28XX_VMUX_TELEVISION, 370 .type = EM28XX_VMUX_TELEVISION,
340 .vmux = SAA7115_COMPOSITE2, 371 .vmux = SAA7115_COMPOSITE2,
341 .amux = 1, 372 .amux = EM28XX_AMUX_LINE_IN,
342 }, { 373 }, {
343 .type = EM28XX_VMUX_COMPOSITE1, 374 .type = EM28XX_VMUX_COMPOSITE1,
344 .vmux = SAA7115_COMPOSITE0, 375 .vmux = SAA7115_COMPOSITE0,
345 .amux = 0, 376 .amux = EM28XX_AMUX_VIDEO,
346 }, { 377 }, {
347 .type = EM28XX_VMUX_SVIDEO, 378 .type = EM28XX_VMUX_SVIDEO,
348 .vmux = SAA7115_SVIDEO3, 379 .vmux = SAA7115_SVIDEO3,
349 .amux = 1, 380 .amux = EM28XX_AMUX_LINE_IN,
350 } }, 381 } },
351 }, 382 },
352 [EM2821_BOARD_USBGEAR_VD204] = { 383 [EM2821_BOARD_USBGEAR_VD204] = {
353 .name = "Usbgear VD204v9", 384 .name = "Usbgear VD204v9",
354 .valid = EM28XX_BOARD_NOT_VALIDATED, 385 .valid = EM28XX_BOARD_NOT_VALIDATED,
355 .vchannels = 2, 386 .tuner_type = TUNER_ABSENT, /* Capture only device */
356 .decoder = EM28XX_SAA7113, 387 .decoder = EM28XX_SAA711X,
357 .input = { { 388 .input = { {
358 .type = EM28XX_VMUX_COMPOSITE1, 389 .type = EM28XX_VMUX_COMPOSITE1,
359 .vmux = SAA7115_COMPOSITE0, 390 .vmux = SAA7115_COMPOSITE0,
360 .amux = 1, 391 .amux = EM28XX_AMUX_LINE_IN,
361 }, { 392 }, {
362 .type = EM28XX_VMUX_SVIDEO, 393 .type = EM28XX_VMUX_SVIDEO,
363 .vmux = SAA7115_SVIDEO3, 394 .vmux = SAA7115_SVIDEO3,
364 .amux = 1, 395 .amux = EM28XX_AMUX_LINE_IN,
365 } }, 396 } },
366 }, 397 },
367 [EM2860_BOARD_NETGMBH_CAM] = { 398 [EM2860_BOARD_NETGMBH_CAM] = {
368 /* Beijing Huaqi Information Digital Technology Co., Ltd */ 399 /* Beijing Huaqi Information Digital Technology Co., Ltd */
369 .name = "NetGMBH Cam", 400 .name = "NetGMBH Cam",
370 .valid = EM28XX_BOARD_NOT_VALIDATED, 401 .valid = EM28XX_BOARD_NOT_VALIDATED,
371 .vchannels = 1, 402 .tuner_type = TUNER_ABSENT, /* This is a webcam */
372 .input = { { 403 .input = { {
373 .type = EM28XX_VMUX_COMPOSITE1, 404 .type = EM28XX_VMUX_COMPOSITE1,
374 .vmux = 0, 405 .vmux = 0,
375 .amux = 0, 406 .amux = EM28XX_AMUX_VIDEO,
376 } }, 407 } },
377 }, 408 },
378 [EM2860_BOARD_TYPHOON_DVD_MAKER] = { 409 [EM2860_BOARD_TYPHOON_DVD_MAKER] = {
379 .name = "Typhoon DVD Maker", 410 .name = "Typhoon DVD Maker",
380 .valid = EM28XX_BOARD_NOT_VALIDATED, 411 .decoder = EM28XX_SAA711X,
381 .vchannels = 2, 412 .tuner_type = TUNER_ABSENT, /* Capture only device */
382 .decoder = EM28XX_SAA7113, 413 .input = { {
383 .input = { {
384 .type = EM28XX_VMUX_COMPOSITE1, 414 .type = EM28XX_VMUX_COMPOSITE1,
385 .vmux = SAA7115_COMPOSITE0, 415 .vmux = SAA7115_COMPOSITE0,
386 .amux = 1, 416 .amux = EM28XX_AMUX_LINE_IN,
387 }, { 417 }, {
388 .type = EM28XX_VMUX_SVIDEO, 418 .type = EM28XX_VMUX_SVIDEO,
389 .vmux = SAA7115_SVIDEO3, 419 .vmux = SAA7115_SVIDEO3,
390 .amux = 1, 420 .amux = EM28XX_AMUX_LINE_IN,
391 } }, 421 } },
392 }, 422 },
393 [EM2860_BOARD_GADMEI_UTV330] = { 423 [EM2860_BOARD_GADMEI_UTV330] = {
394 .name = "Gadmei UTV330", 424 .name = "Gadmei UTV330",
395 .valid = EM28XX_BOARD_NOT_VALIDATED, 425 .valid = EM28XX_BOARD_NOT_VALIDATED,
396 .vchannels = 3,
397 .tuner_type = TUNER_TNF_5335MF, 426 .tuner_type = TUNER_TNF_5335MF,
398 .tda9887_conf = TDA9887_PRESENT, 427 .tda9887_conf = TDA9887_PRESENT,
399 .decoder = EM28XX_SAA7113, 428 .decoder = EM28XX_SAA711X,
400 .input = { { 429 .input = { {
401 .type = EM28XX_VMUX_TELEVISION, 430 .type = EM28XX_VMUX_TELEVISION,
402 .vmux = SAA7115_COMPOSITE2, 431 .vmux = SAA7115_COMPOSITE2,
403 .amux = 0, 432 .amux = EM28XX_AMUX_VIDEO,
404 }, { 433 }, {
405 .type = EM28XX_VMUX_COMPOSITE1, 434 .type = EM28XX_VMUX_COMPOSITE1,
406 .vmux = SAA7115_COMPOSITE0, 435 .vmux = SAA7115_COMPOSITE0,
407 .amux = 1, 436 .amux = EM28XX_AMUX_LINE_IN,
408 }, { 437 }, {
409 .type = EM28XX_VMUX_SVIDEO, 438 .type = EM28XX_VMUX_SVIDEO,
410 .vmux = SAA7115_SVIDEO3, 439 .vmux = SAA7115_SVIDEO3,
411 .amux = 1, 440 .amux = EM28XX_AMUX_LINE_IN,
412 } }, 441 } },
413 }, 442 },
414 [EM2860_BOARD_TERRATEC_HYBRID_XS] = { 443 [EM2860_BOARD_TERRATEC_HYBRID_XS] = {
415 .name = "Terratec Cinergy A Hybrid XS", 444 .name = "Terratec Cinergy A Hybrid XS",
416 .valid = EM28XX_BOARD_NOT_VALIDATED, 445 .valid = EM28XX_BOARD_NOT_VALIDATED,
417 .vchannels = 3,
418 .tuner_type = TUNER_XC2028, 446 .tuner_type = TUNER_XC2028,
447 .tuner_gpio = default_tuner_gpio,
419 .decoder = EM28XX_TVP5150, 448 .decoder = EM28XX_TVP5150,
420 .input = { { 449
450 .input = { {
421 .type = EM28XX_VMUX_TELEVISION, 451 .type = EM28XX_VMUX_TELEVISION,
422 .vmux = TVP5150_COMPOSITE0, 452 .vmux = TVP5150_COMPOSITE0,
423 .amux = 0, 453 .amux = EM28XX_AMUX_VIDEO,
454 .gpio = hauppauge_wintv_hvr_900_analog,
424 }, { 455 }, {
425 .type = EM28XX_VMUX_COMPOSITE1, 456 .type = EM28XX_VMUX_COMPOSITE1,
426 .vmux = TVP5150_COMPOSITE1, 457 .vmux = TVP5150_COMPOSITE1,
427 .amux = 1, 458 .amux = EM28XX_AMUX_LINE_IN,
459 .gpio = hauppauge_wintv_hvr_900_analog,
428 }, { 460 }, {
429 .type = EM28XX_VMUX_SVIDEO, 461 .type = EM28XX_VMUX_SVIDEO,
430 .vmux = TVP5150_SVIDEO, 462 .vmux = TVP5150_SVIDEO,
431 .amux = 1, 463 .amux = EM28XX_AMUX_LINE_IN,
464 .gpio = hauppauge_wintv_hvr_900_analog,
432 } }, 465 } },
433 }, 466 },
434 [EM2861_BOARD_KWORLD_PVRTV_300U] = { 467 [EM2861_BOARD_KWORLD_PVRTV_300U] = {
435 .name = "KWorld PVRTV 300U", 468 .name = "KWorld PVRTV 300U",
436 .valid = EM28XX_BOARD_NOT_VALIDATED, 469 .valid = EM28XX_BOARD_NOT_VALIDATED,
437 .vchannels = 3,
438 .tuner_type = TUNER_XC2028, 470 .tuner_type = TUNER_XC2028,
471 .tuner_gpio = default_tuner_gpio,
439 .decoder = EM28XX_TVP5150, 472 .decoder = EM28XX_TVP5150,
440 .input = { { 473 .input = { {
441 .type = EM28XX_VMUX_TELEVISION, 474 .type = EM28XX_VMUX_TELEVISION,
442 .vmux = TVP5150_COMPOSITE0, 475 .vmux = TVP5150_COMPOSITE0,
443 .amux = 0, 476 .amux = EM28XX_AMUX_VIDEO,
444 }, { 477 }, {
445 .type = EM28XX_VMUX_COMPOSITE1, 478 .type = EM28XX_VMUX_COMPOSITE1,
446 .vmux = TVP5150_COMPOSITE1, 479 .vmux = TVP5150_COMPOSITE1,
447 .amux = 1, 480 .amux = EM28XX_AMUX_LINE_IN,
448 }, { 481 }, {
449 .type = EM28XX_VMUX_SVIDEO, 482 .type = EM28XX_VMUX_SVIDEO,
450 .vmux = TVP5150_SVIDEO, 483 .vmux = TVP5150_SVIDEO,
451 .amux = 1, 484 .amux = EM28XX_AMUX_LINE_IN,
452 } }, 485 } },
453 }, 486 },
454 [EM2861_BOARD_YAKUMO_MOVIE_MIXER] = { 487 [EM2861_BOARD_YAKUMO_MOVIE_MIXER] = {
455 .name = "Yakumo MovieMixer", 488 .name = "Yakumo MovieMixer",
456 .valid = EM28XX_BOARD_NOT_VALIDATED, 489 .tuner_type = TUNER_ABSENT, /* Capture only device */
457 .vchannels = 1,
458 .decoder = EM28XX_TVP5150, 490 .decoder = EM28XX_TVP5150,
459 .input = { { 491 .input = { {
460 .type = EM28XX_VMUX_TELEVISION, 492 .type = EM28XX_VMUX_TELEVISION,
461 .vmux = TVP5150_COMPOSITE0, 493 .vmux = TVP5150_COMPOSITE0,
462 .amux = 0, 494 .amux = EM28XX_AMUX_VIDEO,
463 }, { 495 }, {
464 .type = EM28XX_VMUX_COMPOSITE1, 496 .type = EM28XX_VMUX_COMPOSITE1,
465 .vmux = TVP5150_COMPOSITE1, 497 .vmux = TVP5150_COMPOSITE1,
466 .amux = 1, 498 .amux = EM28XX_AMUX_LINE_IN,
467 }, { 499 }, {
468 .type = EM28XX_VMUX_SVIDEO, 500 .type = EM28XX_VMUX_SVIDEO,
469 .vmux = TVP5150_SVIDEO, 501 .vmux = TVP5150_SVIDEO,
470 .amux = 1, 502 .amux = EM28XX_AMUX_LINE_IN,
471 } }, 503 } },
472 }, 504 },
473 [EM2861_BOARD_PLEXTOR_PX_TV100U] = { 505 [EM2861_BOARD_PLEXTOR_PX_TV100U] = {
474 .name = "Plextor ConvertX PX-TV100U", 506 .name = "Plextor ConvertX PX-TV100U",
475 .valid = EM28XX_BOARD_NOT_VALIDATED, 507 .valid = EM28XX_BOARD_NOT_VALIDATED,
476 .vchannels = 3,
477 .tuner_type = TUNER_TNF_5335MF, 508 .tuner_type = TUNER_TNF_5335MF,
478 .tda9887_conf = TDA9887_PRESENT, 509 .tda9887_conf = TDA9887_PRESENT,
479 .decoder = EM28XX_TVP5150, 510 .decoder = EM28XX_TVP5150,
480 .input = { { 511 .input = { {
481 .type = EM28XX_VMUX_TELEVISION, 512 .type = EM28XX_VMUX_TELEVISION,
482 .vmux = TVP5150_COMPOSITE0, 513 .vmux = TVP5150_COMPOSITE0,
483 .amux = 1, 514 .amux = EM28XX_AMUX_LINE_IN,
484 }, { 515 }, {
485 .type = EM28XX_VMUX_COMPOSITE1, 516 .type = EM28XX_VMUX_COMPOSITE1,
486 .vmux = TVP5150_COMPOSITE1, 517 .vmux = TVP5150_COMPOSITE1,
487 .amux = 1, 518 .amux = EM28XX_AMUX_LINE_IN,
488 }, { 519 }, {
489 .type = EM28XX_VMUX_SVIDEO, 520 .type = EM28XX_VMUX_SVIDEO,
490 .vmux = TVP5150_SVIDEO, 521 .vmux = TVP5150_SVIDEO,
491 .amux = 1, 522 .amux = EM28XX_AMUX_LINE_IN,
492 } }, 523 } },
493 }, 524 },
525
526 /* Those boards with em2870 are DVB Only*/
527
494 [EM2870_BOARD_TERRATEC_XS] = { 528 [EM2870_BOARD_TERRATEC_XS] = {
495 .name = "Terratec Cinergy T XS", 529 .name = "Terratec Cinergy T XS",
496 .valid = EM28XX_BOARD_NOT_VALIDATED, 530 .valid = EM28XX_BOARD_NOT_VALIDATED,
497 .tuner_type = TUNER_XC2028, 531 .tuner_type = TUNER_XC2028,
532 .tuner_gpio = default_tuner_gpio,
498 }, 533 },
499 [EM2870_BOARD_TERRATEC_XS_MT2060] = { 534 [EM2870_BOARD_TERRATEC_XS_MT2060] = {
500 .name = "Terratec Cinergy T XS (MT2060)", 535 .name = "Terratec Cinergy T XS (MT2060)",
@@ -505,6 +540,7 @@ struct em28xx_board em28xx_boards[] = {
505 .name = "Kworld 350 U DVB-T", 540 .name = "Kworld 350 U DVB-T",
506 .valid = EM28XX_BOARD_NOT_VALIDATED, 541 .valid = EM28XX_BOARD_NOT_VALIDATED,
507 .tuner_type = TUNER_XC2028, 542 .tuner_type = TUNER_XC2028,
543 .tuner_gpio = default_tuner_gpio,
508 }, 544 },
509 [EM2870_BOARD_KWORLD_355U] = { 545 [EM2870_BOARD_KWORLD_355U] = {
510 .name = "Kworld 355 U DVB-T", 546 .name = "Kworld 355 U DVB-T",
@@ -514,164 +550,216 @@ struct em28xx_board em28xx_boards[] = {
514 .name = "Pinnacle PCTV DVB-T", 550 .name = "Pinnacle PCTV DVB-T",
515 .valid = EM28XX_BOARD_NOT_VALIDATED, 551 .valid = EM28XX_BOARD_NOT_VALIDATED,
516 .tuner_type = TUNER_ABSENT, /* MT2060 */ 552 .tuner_type = TUNER_ABSENT, /* MT2060 */
553 /* djh - I have serious doubts this is right... */
554 .xclk = EM28XX_XCLK_IR_RC5_MODE |
555 EM28XX_XCLK_FREQUENCY_10MHZ,
517 }, 556 },
518 [EM2870_BOARD_COMPRO_VIDEOMATE] = { 557 [EM2870_BOARD_COMPRO_VIDEOMATE] = {
519 .name = "Compro, VideoMate U3", 558 .name = "Compro, VideoMate U3",
520 .valid = EM28XX_BOARD_NOT_VALIDATED, 559 .valid = EM28XX_BOARD_NOT_VALIDATED,
521 .tuner_type = TUNER_ABSENT, /* MT2060 */ 560 .tuner_type = TUNER_ABSENT, /* MT2060 */
522 }, 561 },
562
523 [EM2880_BOARD_TERRATEC_HYBRID_XS_FR] = { 563 [EM2880_BOARD_TERRATEC_HYBRID_XS_FR] = {
524 .name = "Terratec Hybrid XS Secam", 564 .name = "Terratec Hybrid XS Secam",
525 .valid = EM28XX_BOARD_NOT_VALIDATED, 565 .valid = EM28XX_BOARD_NOT_VALIDATED,
526 .vchannels = 3,
527 .has_msp34xx = 1, 566 .has_msp34xx = 1,
528 .tuner_type = TUNER_XC2028, 567 .tuner_type = TUNER_XC2028,
568 .tuner_gpio = default_tuner_gpio,
529 .decoder = EM28XX_TVP5150, 569 .decoder = EM28XX_TVP5150,
530 .input = { { 570 .input = { {
531 .type = EM28XX_VMUX_TELEVISION, 571 .type = EM28XX_VMUX_TELEVISION,
532 .vmux = TVP5150_COMPOSITE0, 572 .vmux = TVP5150_COMPOSITE0,
533 .amux = 0, 573 .amux = EM28XX_AMUX_VIDEO,
574 .gpio = default_analog,
534 }, { 575 }, {
535 .type = EM28XX_VMUX_COMPOSITE1, 576 .type = EM28XX_VMUX_COMPOSITE1,
536 .vmux = TVP5150_COMPOSITE1, 577 .vmux = TVP5150_COMPOSITE1,
537 .amux = 1, 578 .amux = EM28XX_AMUX_LINE_IN,
579 .gpio = default_analog,
538 }, { 580 }, {
539 .type = EM28XX_VMUX_SVIDEO, 581 .type = EM28XX_VMUX_SVIDEO,
540 .vmux = TVP5150_SVIDEO, 582 .vmux = TVP5150_SVIDEO,
541 .amux = 1, 583 .amux = EM28XX_AMUX_LINE_IN,
584 .gpio = default_analog,
542 } }, 585 } },
543 }, 586 },
544 [EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900] = { 587 [EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900] = {
545 .name = "Hauppauge WinTV HVR 900", 588 .name = "Hauppauge WinTV HVR 900",
546 .vchannels = 3,
547 .tda9887_conf = TDA9887_PRESENT, 589 .tda9887_conf = TDA9887_PRESENT,
548 .tuner_type = TUNER_XC2028, 590 .tuner_type = TUNER_XC2028,
591 .tuner_gpio = default_tuner_gpio,
549 .mts_firmware = 1, 592 .mts_firmware = 1,
550 .has_dvb = 1, 593 .has_dvb = 1,
594 .dvb_gpio = hauppauge_wintv_hvr_900_digital,
551 .decoder = EM28XX_TVP5150, 595 .decoder = EM28XX_TVP5150,
552 .input = { { 596 .input = { {
553 .type = EM28XX_VMUX_TELEVISION, 597 .type = EM28XX_VMUX_TELEVISION,
554 .vmux = TVP5150_COMPOSITE0, 598 .vmux = TVP5150_COMPOSITE0,
555 .amux = 0, 599 .amux = EM28XX_AMUX_VIDEO,
600 .gpio = hauppauge_wintv_hvr_900_analog,
556 }, { 601 }, {
557 .type = EM28XX_VMUX_COMPOSITE1, 602 .type = EM28XX_VMUX_COMPOSITE1,
558 .vmux = TVP5150_COMPOSITE1, 603 .vmux = TVP5150_COMPOSITE1,
559 .amux = 1, 604 .amux = EM28XX_AMUX_LINE_IN,
605 .gpio = hauppauge_wintv_hvr_900_analog,
560 }, { 606 }, {
561 .type = EM28XX_VMUX_SVIDEO, 607 .type = EM28XX_VMUX_SVIDEO,
562 .vmux = TVP5150_SVIDEO, 608 .vmux = TVP5150_SVIDEO,
563 .amux = 1, 609 .amux = EM28XX_AMUX_LINE_IN,
610 .gpio = hauppauge_wintv_hvr_900_analog,
564 } }, 611 } },
565 }, 612 },
566 [EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2] = { 613 [EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2] = {
567 .name = "Hauppauge WinTV HVR 900 (R2)", 614 .name = "Hauppauge WinTV HVR 900 (R2)",
568 .vchannels = 3,
569 .tda9887_conf = TDA9887_PRESENT, 615 .tda9887_conf = TDA9887_PRESENT,
570 .tuner_type = TUNER_XC2028, 616 .tuner_type = TUNER_XC2028,
617 .tuner_gpio = default_tuner_gpio,
571 .mts_firmware = 1, 618 .mts_firmware = 1,
572 .decoder = EM28XX_TVP5150, 619 .decoder = EM28XX_TVP5150,
620 .input = { {
621 .type = EM28XX_VMUX_TELEVISION,
622 .vmux = TVP5150_COMPOSITE0,
623 .amux = EM28XX_AMUX_VIDEO,
624 .gpio = hauppauge_wintv_hvr_900_analog,
625 }, {
626 .type = EM28XX_VMUX_COMPOSITE1,
627 .vmux = TVP5150_COMPOSITE1,
628 .amux = EM28XX_AMUX_LINE_IN,
629 .gpio = hauppauge_wintv_hvr_900_analog,
630 }, {
631 .type = EM28XX_VMUX_SVIDEO,
632 .vmux = TVP5150_SVIDEO,
633 .amux = EM28XX_AMUX_LINE_IN,
634 .gpio = hauppauge_wintv_hvr_900_analog,
635 } },
636 },
637 [EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850] = {
638 .name = "Hauppauge WinTV HVR 850",
639 .tuner_type = TUNER_XC2028,
640 .tuner_gpio = default_tuner_gpio,
641 .mts_firmware = 1,
642 .has_dvb = 1,
643 .dvb_gpio = hauppauge_wintv_hvr_900_digital,
644 .ir_codes = ir_codes_hauppauge_new,
645 .decoder = EM28XX_TVP5150,
573 .input = { { 646 .input = { {
574 .type = EM28XX_VMUX_TELEVISION, 647 .type = EM28XX_VMUX_TELEVISION,
575 .vmux = TVP5150_COMPOSITE0, 648 .vmux = TVP5150_COMPOSITE0,
576 .amux = 0, 649 .amux = EM28XX_AMUX_VIDEO,
650 .gpio = hauppauge_wintv_hvr_900_analog,
577 }, { 651 }, {
578 .type = EM28XX_VMUX_COMPOSITE1, 652 .type = EM28XX_VMUX_COMPOSITE1,
579 .vmux = TVP5150_COMPOSITE1, 653 .vmux = TVP5150_COMPOSITE1,
580 .amux = 3, 654 .amux = EM28XX_AMUX_LINE_IN,
655 .gpio = hauppauge_wintv_hvr_900_analog,
581 }, { 656 }, {
582 .type = EM28XX_VMUX_SVIDEO, 657 .type = EM28XX_VMUX_SVIDEO,
583 .vmux = TVP5150_SVIDEO, 658 .vmux = TVP5150_SVIDEO,
584 .amux = 1, 659 .amux = EM28XX_AMUX_LINE_IN,
660 .gpio = hauppauge_wintv_hvr_900_analog,
585 } }, 661 } },
586 }, 662 },
587 [EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950] = { 663 [EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950] = {
588 .name = "Hauppauge WinTV HVR 950", 664 .name = "Hauppauge WinTV HVR 950",
589 .vchannels = 3,
590 .tda9887_conf = TDA9887_PRESENT,
591 .tuner_type = TUNER_XC2028, 665 .tuner_type = TUNER_XC2028,
666 .tuner_gpio = default_tuner_gpio,
592 .mts_firmware = 1, 667 .mts_firmware = 1,
593 .has_12mhz_i2s = 1,
594 .has_dvb = 1, 668 .has_dvb = 1,
669 .dvb_gpio = hauppauge_wintv_hvr_900_digital,
670 .ir_codes = ir_codes_hauppauge_new,
595 .decoder = EM28XX_TVP5150, 671 .decoder = EM28XX_TVP5150,
596 .input = { { 672 .input = { {
597 .type = EM28XX_VMUX_TELEVISION, 673 .type = EM28XX_VMUX_TELEVISION,
598 .vmux = TVP5150_COMPOSITE0, 674 .vmux = TVP5150_COMPOSITE0,
599 .amux = 0, 675 .amux = EM28XX_AMUX_VIDEO,
676 .gpio = hauppauge_wintv_hvr_900_analog,
600 }, { 677 }, {
601 .type = EM28XX_VMUX_COMPOSITE1, 678 .type = EM28XX_VMUX_COMPOSITE1,
602 .vmux = TVP5150_COMPOSITE1, 679 .vmux = TVP5150_COMPOSITE1,
603 .amux = 1, 680 .amux = EM28XX_AMUX_LINE_IN,
681 .gpio = hauppauge_wintv_hvr_900_analog,
604 }, { 682 }, {
605 .type = EM28XX_VMUX_SVIDEO, 683 .type = EM28XX_VMUX_SVIDEO,
606 .vmux = TVP5150_SVIDEO, 684 .vmux = TVP5150_SVIDEO,
607 .amux = 1, 685 .amux = EM28XX_AMUX_LINE_IN,
686 .gpio = hauppauge_wintv_hvr_900_analog,
608 } }, 687 } },
609 }, 688 },
610 [EM2880_BOARD_PINNACLE_PCTV_HD_PRO] = { 689 [EM2880_BOARD_PINNACLE_PCTV_HD_PRO] = {
611 .name = "Pinnacle PCTV HD Pro Stick", 690 .name = "Pinnacle PCTV HD Pro Stick",
612 .vchannels = 3,
613 .tda9887_conf = TDA9887_PRESENT,
614 .tuner_type = TUNER_XC2028, 691 .tuner_type = TUNER_XC2028,
692 .tuner_gpio = default_tuner_gpio,
615 .mts_firmware = 1, 693 .mts_firmware = 1,
616 .has_12mhz_i2s = 1,
617 .has_dvb = 1, 694 .has_dvb = 1,
695 .dvb_gpio = hauppauge_wintv_hvr_900_digital,
696 .ir_codes = ir_codes_pinnacle_pctv_hd,
618 .decoder = EM28XX_TVP5150, 697 .decoder = EM28XX_TVP5150,
619 .input = { { 698 .input = { {
620 .type = EM28XX_VMUX_TELEVISION, 699 .type = EM28XX_VMUX_TELEVISION,
621 .vmux = TVP5150_COMPOSITE0, 700 .vmux = TVP5150_COMPOSITE0,
622 .amux = 0, 701 .amux = EM28XX_AMUX_VIDEO,
702 .gpio = hauppauge_wintv_hvr_900_analog,
623 }, { 703 }, {
624 .type = EM28XX_VMUX_COMPOSITE1, 704 .type = EM28XX_VMUX_COMPOSITE1,
625 .vmux = TVP5150_COMPOSITE1, 705 .vmux = TVP5150_COMPOSITE1,
626 .amux = 1, 706 .amux = EM28XX_AMUX_LINE_IN,
707 .gpio = hauppauge_wintv_hvr_900_analog,
627 }, { 708 }, {
628 .type = EM28XX_VMUX_SVIDEO, 709 .type = EM28XX_VMUX_SVIDEO,
629 .vmux = TVP5150_SVIDEO, 710 .vmux = TVP5150_SVIDEO,
630 .amux = 1, 711 .amux = EM28XX_AMUX_LINE_IN,
712 .gpio = hauppauge_wintv_hvr_900_analog,
631 } }, 713 } },
632 }, 714 },
633 [EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600] = { 715 [EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600] = {
634 .name = "AMD ATI TV Wonder HD 600", 716 .name = "AMD ATI TV Wonder HD 600",
635 .vchannels = 3,
636 .tda9887_conf = TDA9887_PRESENT,
637 .tuner_type = TUNER_XC2028, 717 .tuner_type = TUNER_XC2028,
718 .tuner_gpio = default_tuner_gpio,
638 .mts_firmware = 1, 719 .mts_firmware = 1,
639 .has_12mhz_i2s = 1,
640 .has_dvb = 1, 720 .has_dvb = 1,
721 .dvb_gpio = hauppauge_wintv_hvr_900_digital,
722 .ir_codes = ir_codes_ati_tv_wonder_hd_600,
641 .decoder = EM28XX_TVP5150, 723 .decoder = EM28XX_TVP5150,
642 .input = { { 724 .input = { {
643 .type = EM28XX_VMUX_TELEVISION, 725 .type = EM28XX_VMUX_TELEVISION,
644 .vmux = TVP5150_COMPOSITE0, 726 .vmux = TVP5150_COMPOSITE0,
645 .amux = 0, 727 .amux = EM28XX_AMUX_VIDEO,
728 .gpio = hauppauge_wintv_hvr_900_analog,
646 }, { 729 }, {
647 .type = EM28XX_VMUX_COMPOSITE1, 730 .type = EM28XX_VMUX_COMPOSITE1,
648 .vmux = TVP5150_COMPOSITE1, 731 .vmux = TVP5150_COMPOSITE1,
649 .amux = 1, 732 .amux = EM28XX_AMUX_LINE_IN,
733 .gpio = hauppauge_wintv_hvr_900_analog,
650 }, { 734 }, {
651 .type = EM28XX_VMUX_SVIDEO, 735 .type = EM28XX_VMUX_SVIDEO,
652 .vmux = TVP5150_SVIDEO, 736 .vmux = TVP5150_SVIDEO,
653 .amux = 1, 737 .amux = EM28XX_AMUX_LINE_IN,
738 .gpio = hauppauge_wintv_hvr_900_analog,
654 } }, 739 } },
655 }, 740 },
656 [EM2880_BOARD_TERRATEC_HYBRID_XS] = { 741 [EM2880_BOARD_TERRATEC_HYBRID_XS] = {
657 .name = "Terratec Hybrid XS", 742 .name = "Terratec Hybrid XS",
658 .vchannels = 3, 743 .tuner_type = TUNER_XC2028,
659 .tda9887_conf = TDA9887_PRESENT, 744 .tuner_gpio = default_tuner_gpio,
660 .tuner_type = TUNER_XC2028, 745 .decoder = EM28XX_TVP5150,
661 .decoder = EM28XX_TVP5150,
662 .has_dvb = 1, 746 .has_dvb = 1,
747 .dvb_gpio = default_analog,
663 .input = { { 748 .input = { {
664 .type = EM28XX_VMUX_TELEVISION, 749 .type = EM28XX_VMUX_TELEVISION,
665 .vmux = TVP5150_COMPOSITE0, 750 .vmux = TVP5150_COMPOSITE0,
666 .amux = 0, 751 .amux = EM28XX_AMUX_VIDEO,
752 .gpio = default_analog,
667 }, { 753 }, {
668 .type = EM28XX_VMUX_COMPOSITE1, 754 .type = EM28XX_VMUX_COMPOSITE1,
669 .vmux = TVP5150_COMPOSITE1, 755 .vmux = TVP5150_COMPOSITE1,
670 .amux = 1, 756 .amux = EM28XX_AMUX_LINE_IN,
757 .gpio = default_analog,
671 }, { 758 }, {
672 .type = EM28XX_VMUX_SVIDEO, 759 .type = EM28XX_VMUX_SVIDEO,
673 .vmux = TVP5150_SVIDEO, 760 .vmux = TVP5150_SVIDEO,
674 .amux = 1, 761 .amux = EM28XX_AMUX_LINE_IN,
762 .gpio = default_analog,
675 } }, 763 } },
676 }, 764 },
677 /* maybe there's a reason behind it why Terratec sells the Hybrid XS 765 /* maybe there's a reason behind it why Terratec sells the Hybrid XS
@@ -679,172 +767,190 @@ struct em28xx_board em28xx_boards[] = {
679 maybe we'll need it lateron */ 767 maybe we'll need it lateron */
680 [EM2880_BOARD_TERRATEC_PRODIGY_XS] = { 768 [EM2880_BOARD_TERRATEC_PRODIGY_XS] = {
681 .name = "Terratec Prodigy XS", 769 .name = "Terratec Prodigy XS",
682 .vchannels = 3,
683 .tda9887_conf = TDA9887_PRESENT,
684 .tuner_type = TUNER_XC2028, 770 .tuner_type = TUNER_XC2028,
771 .tuner_gpio = default_tuner_gpio,
685 .decoder = EM28XX_TVP5150, 772 .decoder = EM28XX_TVP5150,
686 .input = { { 773 .input = { {
687 .type = EM28XX_VMUX_TELEVISION, 774 .type = EM28XX_VMUX_TELEVISION,
688 .vmux = TVP5150_COMPOSITE0, 775 .vmux = TVP5150_COMPOSITE0,
689 .amux = 0, 776 .amux = EM28XX_AMUX_VIDEO,
777 .gpio = hauppauge_wintv_hvr_900_analog,
690 }, { 778 }, {
691 .type = EM28XX_VMUX_COMPOSITE1, 779 .type = EM28XX_VMUX_COMPOSITE1,
692 .vmux = TVP5150_COMPOSITE1, 780 .vmux = TVP5150_COMPOSITE1,
693 .amux = 1, 781 .amux = EM28XX_AMUX_LINE_IN,
782 .gpio = hauppauge_wintv_hvr_900_analog,
694 }, { 783 }, {
695 .type = EM28XX_VMUX_SVIDEO, 784 .type = EM28XX_VMUX_SVIDEO,
696 .vmux = TVP5150_SVIDEO, 785 .vmux = TVP5150_SVIDEO,
697 .amux = 1, 786 .amux = EM28XX_AMUX_LINE_IN,
787 .gpio = hauppauge_wintv_hvr_900_analog,
698 } }, 788 } },
699 }, 789 },
700 [EM2820_BOARD_MSI_VOX_USB_2] = { 790 [EM2820_BOARD_MSI_VOX_USB_2] = {
701 .name = "MSI VOX USB 2.0", 791 .name = "MSI VOX USB 2.0",
702 .vchannels = 3,
703 .tuner_type = TUNER_LG_PAL_NEW_TAPC, 792 .tuner_type = TUNER_LG_PAL_NEW_TAPC,
704 .tda9887_conf = TDA9887_PRESENT | 793 .tda9887_conf = TDA9887_PRESENT |
705 TDA9887_PORT1_ACTIVE | 794 TDA9887_PORT1_ACTIVE |
706 TDA9887_PORT2_ACTIVE, 795 TDA9887_PORT2_ACTIVE,
707 .max_range_640_480 = 1, 796 .max_range_640_480 = 1,
708 797 .decoder = EM28XX_SAA711X,
709 .decoder = EM28XX_SAA7114,
710 .input = { { 798 .input = { {
711 .type = EM28XX_VMUX_TELEVISION, 799 .type = EM28XX_VMUX_TELEVISION,
712 .vmux = SAA7115_COMPOSITE4, 800 .vmux = SAA7115_COMPOSITE4,
713 .amux = 0, 801 .amux = EM28XX_AMUX_VIDEO,
714 }, { 802 }, {
715 .type = EM28XX_VMUX_COMPOSITE1, 803 .type = EM28XX_VMUX_COMPOSITE1,
716 .vmux = SAA7115_COMPOSITE0, 804 .vmux = SAA7115_COMPOSITE0,
717 .amux = 1, 805 .amux = EM28XX_AMUX_LINE_IN,
718 }, { 806 }, {
719 .type = EM28XX_VMUX_SVIDEO, 807 .type = EM28XX_VMUX_SVIDEO,
720 .vmux = SAA7115_SVIDEO3, 808 .vmux = SAA7115_SVIDEO3,
721 .amux = 1, 809 .amux = EM28XX_AMUX_LINE_IN,
722 } }, 810 } },
723 }, 811 },
724 [EM2800_BOARD_TERRATEC_CINERGY_200] = { 812 [EM2800_BOARD_TERRATEC_CINERGY_200] = {
725 .name = "Terratec Cinergy 200 USB", 813 .name = "Terratec Cinergy 200 USB",
726 .is_em2800 = 1, 814 .is_em2800 = 1,
727 .vchannels = 3,
728 .tuner_type = TUNER_LG_PAL_NEW_TAPC, 815 .tuner_type = TUNER_LG_PAL_NEW_TAPC,
729 .tda9887_conf = TDA9887_PRESENT, 816 .tda9887_conf = TDA9887_PRESENT,
730 .decoder = EM28XX_SAA7113, 817 .decoder = EM28XX_SAA711X,
731 .input = { { 818 .input = { {
732 .type = EM28XX_VMUX_TELEVISION, 819 .type = EM28XX_VMUX_TELEVISION,
733 .vmux = SAA7115_COMPOSITE2, 820 .vmux = SAA7115_COMPOSITE2,
734 .amux = 0, 821 .amux = EM28XX_AMUX_VIDEO,
735 }, { 822 }, {
736 .type = EM28XX_VMUX_COMPOSITE1, 823 .type = EM28XX_VMUX_COMPOSITE1,
737 .vmux = SAA7115_COMPOSITE0, 824 .vmux = SAA7115_COMPOSITE0,
738 .amux = 1, 825 .amux = EM28XX_AMUX_LINE_IN,
739 }, { 826 }, {
740 .type = EM28XX_VMUX_SVIDEO, 827 .type = EM28XX_VMUX_SVIDEO,
741 .vmux = SAA7115_SVIDEO3, 828 .vmux = SAA7115_SVIDEO3,
742 .amux = 1, 829 .amux = EM28XX_AMUX_LINE_IN,
743 } }, 830 } },
744 }, 831 },
745 [EM2800_BOARD_GRABBEEX_USB2800] = { 832 [EM2800_BOARD_GRABBEEX_USB2800] = {
746 .name = "eMPIA Technology, Inc. GrabBeeX+ Video Encoder", 833 .name = "eMPIA Technology, Inc. GrabBeeX+ Video Encoder",
747 .is_em2800 = 1, 834 .is_em2800 = 1,
748 .vchannels = 2, 835 .decoder = EM28XX_SAA711X,
749 .decoder = EM28XX_SAA7113, 836 .tuner_type = TUNER_ABSENT, /* capture only board */
750 .input = { { 837 .input = { {
751 .type = EM28XX_VMUX_COMPOSITE1, 838 .type = EM28XX_VMUX_COMPOSITE1,
752 .vmux = SAA7115_COMPOSITE0, 839 .vmux = SAA7115_COMPOSITE0,
753 .amux = 1, 840 .amux = EM28XX_AMUX_LINE_IN,
754 }, { 841 }, {
755 .type = EM28XX_VMUX_SVIDEO, 842 .type = EM28XX_VMUX_SVIDEO,
756 .vmux = SAA7115_SVIDEO3, 843 .vmux = SAA7115_SVIDEO3,
757 .amux = 1, 844 .amux = EM28XX_AMUX_LINE_IN,
758 } }, 845 } },
759 }, 846 },
760 [EM2800_BOARD_LEADTEK_WINFAST_USBII] = { 847 [EM2800_BOARD_LEADTEK_WINFAST_USBII] = {
761 .name = "Leadtek Winfast USB II", 848 .name = "Leadtek Winfast USB II",
762 .is_em2800 = 1, 849 .is_em2800 = 1,
763 .vchannels = 3,
764 .tuner_type = TUNER_LG_PAL_NEW_TAPC, 850 .tuner_type = TUNER_LG_PAL_NEW_TAPC,
765 .tda9887_conf = TDA9887_PRESENT, 851 .tda9887_conf = TDA9887_PRESENT,
766 .decoder = EM28XX_SAA7113, 852 .decoder = EM28XX_SAA711X,
767 .input = { { 853 .input = { {
768 .type = EM28XX_VMUX_TELEVISION, 854 .type = EM28XX_VMUX_TELEVISION,
769 .vmux = SAA7115_COMPOSITE2, 855 .vmux = SAA7115_COMPOSITE2,
770 .amux = 0, 856 .amux = EM28XX_AMUX_VIDEO,
771 }, { 857 }, {
772 .type = EM28XX_VMUX_COMPOSITE1, 858 .type = EM28XX_VMUX_COMPOSITE1,
773 .vmux = SAA7115_COMPOSITE0, 859 .vmux = SAA7115_COMPOSITE0,
774 .amux = 1, 860 .amux = EM28XX_AMUX_LINE_IN,
775 }, { 861 }, {
776 .type = EM28XX_VMUX_SVIDEO, 862 .type = EM28XX_VMUX_SVIDEO,
777 .vmux = SAA7115_SVIDEO3, 863 .vmux = SAA7115_SVIDEO3,
778 .amux = 1, 864 .amux = EM28XX_AMUX_LINE_IN,
779 } }, 865 } },
780 }, 866 },
781 [EM2800_BOARD_KWORLD_USB2800] = { 867 [EM2800_BOARD_KWORLD_USB2800] = {
782 .name = "Kworld USB2800", 868 .name = "Kworld USB2800",
783 .is_em2800 = 1, 869 .is_em2800 = 1,
784 .vchannels = 3,
785 .tuner_type = TUNER_PHILIPS_FCV1236D, 870 .tuner_type = TUNER_PHILIPS_FCV1236D,
786 .tda9887_conf = TDA9887_PRESENT, 871 .tda9887_conf = TDA9887_PRESENT,
787 .decoder = EM28XX_SAA7113, 872 .decoder = EM28XX_SAA711X,
788 .input = { { 873 .input = { {
789 .type = EM28XX_VMUX_TELEVISION, 874 .type = EM28XX_VMUX_TELEVISION,
790 .vmux = SAA7115_COMPOSITE2, 875 .vmux = SAA7115_COMPOSITE2,
791 .amux = 0, 876 .amux = EM28XX_AMUX_VIDEO,
792 }, { 877 }, {
793 .type = EM28XX_VMUX_COMPOSITE1, 878 .type = EM28XX_VMUX_COMPOSITE1,
794 .vmux = SAA7115_COMPOSITE0, 879 .vmux = SAA7115_COMPOSITE0,
795 .amux = 1, 880 .amux = EM28XX_AMUX_LINE_IN,
796 }, { 881 }, {
797 .type = EM28XX_VMUX_SVIDEO, 882 .type = EM28XX_VMUX_SVIDEO,
798 .vmux = SAA7115_SVIDEO3, 883 .vmux = SAA7115_SVIDEO3,
799 .amux = 1, 884 .amux = EM28XX_AMUX_LINE_IN,
800 } }, 885 } },
801 }, 886 },
802 [EM2820_BOARD_PINNACLE_DVC_90] = { 887 [EM2820_BOARD_PINNACLE_DVC_90] = {
803 .name = "Pinnacle Dazzle DVC 90/DVC 100", 888 .name = "Pinnacle Dazzle DVC 90/DVC 100",
804 .vchannels = 3, 889 .tuner_type = TUNER_ABSENT, /* capture only board */
805 .tuner_type = TUNER_ABSENT, 890 .decoder = EM28XX_SAA711X,
806 .decoder = EM28XX_SAA7113, 891 .input = { {
807 .input = { {
808 .type = EM28XX_VMUX_COMPOSITE1, 892 .type = EM28XX_VMUX_COMPOSITE1,
809 .vmux = SAA7115_COMPOSITE0, 893 .vmux = SAA7115_COMPOSITE0,
810 .amux = 1, 894 .amux = EM28XX_AMUX_LINE_IN,
811 }, { 895 }, {
812 .type = EM28XX_VMUX_SVIDEO, 896 .type = EM28XX_VMUX_SVIDEO,
813 .vmux = SAA7115_SVIDEO3, 897 .vmux = SAA7115_SVIDEO3,
814 .amux = 1, 898 .amux = EM28XX_AMUX_LINE_IN,
815 } }, 899 } },
816 }, 900 },
817 [EM2800_BOARD_VGEAR_POCKETTV] = { 901 [EM2800_BOARD_VGEAR_POCKETTV] = {
818 .name = "V-Gear PocketTV", 902 .name = "V-Gear PocketTV",
819 .is_em2800 = 1, 903 .is_em2800 = 1,
820 .vchannels = 3,
821 .tuner_type = TUNER_LG_PAL_NEW_TAPC, 904 .tuner_type = TUNER_LG_PAL_NEW_TAPC,
822 .tda9887_conf = TDA9887_PRESENT, 905 .tda9887_conf = TDA9887_PRESENT,
823 .decoder = EM28XX_SAA7113, 906 .decoder = EM28XX_SAA711X,
824 .input = { { 907 .input = { {
825 .type = EM28XX_VMUX_TELEVISION, 908 .type = EM28XX_VMUX_TELEVISION,
826 .vmux = SAA7115_COMPOSITE2, 909 .vmux = SAA7115_COMPOSITE2,
827 .amux = 0, 910 .amux = EM28XX_AMUX_VIDEO,
828 }, { 911 }, {
829 .type = EM28XX_VMUX_COMPOSITE1, 912 .type = EM28XX_VMUX_COMPOSITE1,
830 .vmux = SAA7115_COMPOSITE0, 913 .vmux = SAA7115_COMPOSITE0,
831 .amux = 1, 914 .amux = EM28XX_AMUX_LINE_IN,
915 }, {
916 .type = EM28XX_VMUX_SVIDEO,
917 .vmux = SAA7115_SVIDEO3,
918 .amux = EM28XX_AMUX_LINE_IN,
919 } },
920 },
921 [EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2] = {
922 .name = "Pixelview PlayTV Box 4 USB 2.0",
923 .tda9887_conf = TDA9887_PRESENT,
924 .tuner_type = TUNER_YMEC_TVF_5533MF,
925 .decoder = EM28XX_SAA711X,
926 .input = { {
927 .type = EM28XX_VMUX_TELEVISION,
928 .vmux = SAA7115_COMPOSITE2,
929 .amux = EM28XX_AMUX_VIDEO,
930 .aout = EM28XX_AOUT_MONO | /* I2S */
931 EM28XX_AOUT_MASTER, /* Line out pin */
932 }, {
933 .type = EM28XX_VMUX_COMPOSITE1,
934 .vmux = SAA7115_COMPOSITE0,
935 .amux = EM28XX_AMUX_LINE_IN,
832 }, { 936 }, {
833 .type = EM28XX_VMUX_SVIDEO, 937 .type = EM28XX_VMUX_SVIDEO,
834 .vmux = SAA7115_SVIDEO3, 938 .vmux = SAA7115_SVIDEO3,
835 .amux = 1, 939 .amux = EM28XX_AMUX_LINE_IN,
836 } }, 940 } },
837 }, 941 },
838 [EM2820_BOARD_PROLINK_PLAYTV_USB2] = { 942 [EM2820_BOARD_PROLINK_PLAYTV_USB2] = {
839 .name = "Pixelview Prolink PlayTV USB 2.0", 943 .name = "Pixelview Prolink PlayTV USB 2.0",
840 .vchannels = 3, 944 .has_snapshot_button = 1,
841 .tda9887_conf = TDA9887_PRESENT, 945 .tda9887_conf = TDA9887_PRESENT,
842 .tuner_type = TUNER_YMEC_TVF_5533MF, 946 .tuner_type = TUNER_YMEC_TVF_5533MF,
843 .decoder = EM28XX_SAA7113, 947 .decoder = EM28XX_SAA711X,
844 .input = { { 948 .input = { {
845 .type = EM28XX_VMUX_TELEVISION, 949 .type = EM28XX_VMUX_TELEVISION,
846 .vmux = SAA7115_COMPOSITE2, 950 .vmux = SAA7115_COMPOSITE2,
847 .amux = EM28XX_AMUX_LINE_IN, 951 .amux = EM28XX_AMUX_VIDEO,
952 .aout = EM28XX_AOUT_MONO | /* I2S */
953 EM28XX_AOUT_MASTER, /* Line out pin */
848 }, { 954 }, {
849 .type = EM28XX_VMUX_COMPOSITE1, 955 .type = EM28XX_VMUX_COMPOSITE1,
850 .vmux = SAA7115_COMPOSITE0, 956 .vmux = SAA7115_COMPOSITE0,
@@ -856,228 +962,252 @@ struct em28xx_board em28xx_boards[] = {
856 } }, 962 } },
857 }, 963 },
858 [EM2860_BOARD_POINTNIX_INTRAORAL_CAMERA] = { 964 [EM2860_BOARD_POINTNIX_INTRAORAL_CAMERA] = {
859 .name = "PointNix Intra-Oral Camera", 965 .name = "PointNix Intra-Oral Camera",
860 .has_snapshot_button = 1, 966 .has_snapshot_button = 1,
861 .vchannels = 1, 967 .tda9887_conf = TDA9887_PRESENT,
862 .tda9887_conf = TDA9887_PRESENT, 968 .tuner_type = TUNER_ABSENT,
863 .tuner_type = TUNER_ABSENT, 969 .decoder = EM28XX_SAA711X,
864 .decoder = EM28XX_SAA7113, 970 .input = { {
865 .input = { {
866 .type = EM28XX_VMUX_SVIDEO, 971 .type = EM28XX_VMUX_SVIDEO,
867 .vmux = SAA7115_SVIDEO3, 972 .vmux = SAA7115_SVIDEO3,
868 .amux = 0, 973 .amux = EM28XX_AMUX_VIDEO,
869 } }, 974 } },
870 }, 975 },
871 [EM2880_BOARD_MSI_DIGIVOX_AD] = { 976 [EM2880_BOARD_MSI_DIGIVOX_AD] = {
872 .name = "MSI DigiVox A/D", 977 .name = "MSI DigiVox A/D",
873 .valid = EM28XX_BOARD_NOT_VALIDATED, 978 .valid = EM28XX_BOARD_NOT_VALIDATED,
874 .vchannels = 3,
875 .tuner_type = TUNER_XC2028, 979 .tuner_type = TUNER_XC2028,
980 .tuner_gpio = default_tuner_gpio,
876 .decoder = EM28XX_TVP5150, 981 .decoder = EM28XX_TVP5150,
877 .input = { { 982 .input = { {
878 .type = EM28XX_VMUX_TELEVISION, 983 .type = EM28XX_VMUX_TELEVISION,
879 .vmux = TVP5150_COMPOSITE0, 984 .vmux = TVP5150_COMPOSITE0,
880 .amux = 0, 985 .amux = EM28XX_AMUX_VIDEO,
986 .gpio = em2880_msi_digivox_ad_analog,
881 }, { 987 }, {
882 .type = EM28XX_VMUX_COMPOSITE1, 988 .type = EM28XX_VMUX_COMPOSITE1,
883 .vmux = TVP5150_COMPOSITE1, 989 .vmux = TVP5150_COMPOSITE1,
884 .amux = 1, 990 .amux = EM28XX_AMUX_LINE_IN,
991 .gpio = em2880_msi_digivox_ad_analog,
885 }, { 992 }, {
886 .type = EM28XX_VMUX_SVIDEO, 993 .type = EM28XX_VMUX_SVIDEO,
887 .vmux = TVP5150_SVIDEO, 994 .vmux = TVP5150_SVIDEO,
888 .amux = 1, 995 .amux = EM28XX_AMUX_LINE_IN,
996 .gpio = em2880_msi_digivox_ad_analog,
889 } }, 997 } },
890 }, 998 },
891 [EM2880_BOARD_MSI_DIGIVOX_AD_II] = { 999 [EM2880_BOARD_MSI_DIGIVOX_AD_II] = {
892 .name = "MSI DigiVox A/D II", 1000 .name = "MSI DigiVox A/D II",
893 .valid = EM28XX_BOARD_NOT_VALIDATED, 1001 .valid = EM28XX_BOARD_NOT_VALIDATED,
894 .vchannels = 3,
895 .tuner_type = TUNER_XC2028, 1002 .tuner_type = TUNER_XC2028,
1003 .tuner_gpio = default_tuner_gpio,
896 .decoder = EM28XX_TVP5150, 1004 .decoder = EM28XX_TVP5150,
897 .input = { { 1005 .input = { {
898 .type = EM28XX_VMUX_TELEVISION, 1006 .type = EM28XX_VMUX_TELEVISION,
899 .vmux = TVP5150_COMPOSITE0, 1007 .vmux = TVP5150_COMPOSITE0,
900 .amux = 0, 1008 .amux = EM28XX_AMUX_VIDEO,
1009 .gpio = em2880_msi_digivox_ad_analog,
901 }, { 1010 }, {
902 .type = EM28XX_VMUX_COMPOSITE1, 1011 .type = EM28XX_VMUX_COMPOSITE1,
903 .vmux = TVP5150_COMPOSITE1, 1012 .vmux = TVP5150_COMPOSITE1,
904 .amux = 1, 1013 .amux = EM28XX_AMUX_LINE_IN,
1014 .gpio = em2880_msi_digivox_ad_analog,
905 }, { 1015 }, {
906 .type = EM28XX_VMUX_SVIDEO, 1016 .type = EM28XX_VMUX_SVIDEO,
907 .vmux = TVP5150_SVIDEO, 1017 .vmux = TVP5150_SVIDEO,
908 .amux = 1, 1018 .amux = EM28XX_AMUX_LINE_IN,
1019 .gpio = em2880_msi_digivox_ad_analog,
909 } }, 1020 } },
910 }, 1021 },
911 [EM2880_BOARD_KWORLD_DVB_305U] = { 1022 [EM2880_BOARD_KWORLD_DVB_305U] = {
912 .name = "KWorld DVB-T 305U", 1023 .name = "KWorld DVB-T 305U",
913 .valid = EM28XX_BOARD_NOT_VALIDATED, 1024 .valid = EM28XX_BOARD_NOT_VALIDATED,
914 .vchannels = 3,
915 .tuner_type = TUNER_XC2028, 1025 .tuner_type = TUNER_XC2028,
1026 .tuner_gpio = default_tuner_gpio,
916 .decoder = EM28XX_TVP5150, 1027 .decoder = EM28XX_TVP5150,
917 .input = { { 1028 .input = { {
918 .type = EM28XX_VMUX_TELEVISION, 1029 .type = EM28XX_VMUX_TELEVISION,
919 .vmux = TVP5150_COMPOSITE0, 1030 .vmux = TVP5150_COMPOSITE0,
920 .amux = 0, 1031 .amux = EM28XX_AMUX_VIDEO,
921 }, { 1032 }, {
922 .type = EM28XX_VMUX_COMPOSITE1, 1033 .type = EM28XX_VMUX_COMPOSITE1,
923 .vmux = TVP5150_COMPOSITE1, 1034 .vmux = TVP5150_COMPOSITE1,
924 .amux = 1, 1035 .amux = EM28XX_AMUX_LINE_IN,
925 }, { 1036 }, {
926 .type = EM28XX_VMUX_SVIDEO, 1037 .type = EM28XX_VMUX_SVIDEO,
927 .vmux = TVP5150_SVIDEO, 1038 .vmux = TVP5150_SVIDEO,
928 .amux = 1, 1039 .amux = EM28XX_AMUX_LINE_IN,
929 } }, 1040 } },
930 }, 1041 },
931 [EM2880_BOARD_KWORLD_DVB_310U] = { 1042 [EM2880_BOARD_KWORLD_DVB_310U] = {
932 .name = "KWorld DVB-T 310U", 1043 .name = "KWorld DVB-T 310U",
933 .vchannels = 3,
934 .tuner_type = TUNER_XC2028, 1044 .tuner_type = TUNER_XC2028,
1045 .tuner_gpio = default_tuner_gpio,
935 .has_dvb = 1, 1046 .has_dvb = 1,
1047 .dvb_gpio = default_digital,
936 .mts_firmware = 1, 1048 .mts_firmware = 1,
937 .decoder = EM28XX_TVP5150, 1049 .decoder = EM28XX_TVP5150,
938 .input = { { 1050 .input = { {
939 .type = EM28XX_VMUX_TELEVISION, 1051 .type = EM28XX_VMUX_TELEVISION,
940 .vmux = TVP5150_COMPOSITE0, 1052 .vmux = TVP5150_COMPOSITE0,
941 .amux = EM28XX_AMUX_VIDEO, 1053 .amux = EM28XX_AMUX_VIDEO,
1054 .gpio = default_analog,
942 }, { 1055 }, {
943 .type = EM28XX_VMUX_COMPOSITE1, 1056 .type = EM28XX_VMUX_COMPOSITE1,
944 .vmux = TVP5150_COMPOSITE1, 1057 .vmux = TVP5150_COMPOSITE1,
945 .amux = EM28XX_AMUX_AC97_LINE_IN, 1058 .amux = EM28XX_AMUX_LINE_IN,
1059 .gpio = default_analog,
946 }, { /* S-video has not been tested yet */ 1060 }, { /* S-video has not been tested yet */
947 .type = EM28XX_VMUX_SVIDEO, 1061 .type = EM28XX_VMUX_SVIDEO,
948 .vmux = TVP5150_SVIDEO, 1062 .vmux = TVP5150_SVIDEO,
949 .amux = EM28XX_AMUX_AC97_LINE_IN, 1063 .amux = EM28XX_AMUX_LINE_IN,
1064 .gpio = default_analog,
950 } }, 1065 } },
951 }, 1066 },
952 [EM2881_BOARD_DNT_DA2_HYBRID] = { 1067 [EM2881_BOARD_DNT_DA2_HYBRID] = {
953 .name = "DNT DA2 Hybrid", 1068 .name = "DNT DA2 Hybrid",
954 .valid = EM28XX_BOARD_NOT_VALIDATED, 1069 .valid = EM28XX_BOARD_NOT_VALIDATED,
955 .vchannels = 3,
956 .tuner_type = TUNER_XC2028, 1070 .tuner_type = TUNER_XC2028,
1071 .tuner_gpio = default_tuner_gpio,
957 .decoder = EM28XX_TVP5150, 1072 .decoder = EM28XX_TVP5150,
958 .input = { { 1073 .input = { {
959 .type = EM28XX_VMUX_TELEVISION, 1074 .type = EM28XX_VMUX_TELEVISION,
960 .vmux = TVP5150_COMPOSITE0, 1075 .vmux = TVP5150_COMPOSITE0,
961 .amux = 0, 1076 .amux = EM28XX_AMUX_VIDEO,
1077 .gpio = default_analog,
962 }, { 1078 }, {
963 .type = EM28XX_VMUX_COMPOSITE1, 1079 .type = EM28XX_VMUX_COMPOSITE1,
964 .vmux = TVP5150_COMPOSITE1, 1080 .vmux = TVP5150_COMPOSITE1,
965 .amux = 1, 1081 .amux = EM28XX_AMUX_LINE_IN,
1082 .gpio = default_analog,
966 }, { 1083 }, {
967 .type = EM28XX_VMUX_SVIDEO, 1084 .type = EM28XX_VMUX_SVIDEO,
968 .vmux = TVP5150_SVIDEO, 1085 .vmux = TVP5150_SVIDEO,
969 .amux = 1, 1086 .amux = EM28XX_AMUX_LINE_IN,
1087 .gpio = default_analog,
970 } }, 1088 } },
971 }, 1089 },
972 [EM2881_BOARD_PINNACLE_HYBRID_PRO] = { 1090 [EM2881_BOARD_PINNACLE_HYBRID_PRO] = {
973 .name = "Pinnacle Hybrid Pro", 1091 .name = "Pinnacle Hybrid Pro",
974 .valid = EM28XX_BOARD_NOT_VALIDATED, 1092 .valid = EM28XX_BOARD_NOT_VALIDATED,
975 .vchannels = 3,
976 .tuner_type = TUNER_XC2028, 1093 .tuner_type = TUNER_XC2028,
1094 .tuner_gpio = default_tuner_gpio,
977 .decoder = EM28XX_TVP5150, 1095 .decoder = EM28XX_TVP5150,
978 .input = { { 1096 .input = { {
979 .type = EM28XX_VMUX_TELEVISION, 1097 .type = EM28XX_VMUX_TELEVISION,
980 .vmux = TVP5150_COMPOSITE0, 1098 .vmux = TVP5150_COMPOSITE0,
981 .amux = 0, 1099 .amux = EM28XX_AMUX_VIDEO,
1100 .gpio = default_analog,
982 }, { 1101 }, {
983 .type = EM28XX_VMUX_COMPOSITE1, 1102 .type = EM28XX_VMUX_COMPOSITE1,
984 .vmux = TVP5150_COMPOSITE1, 1103 .vmux = TVP5150_COMPOSITE1,
985 .amux = 1, 1104 .amux = EM28XX_AMUX_LINE_IN,
1105 .gpio = default_analog,
986 }, { 1106 }, {
987 .type = EM28XX_VMUX_SVIDEO, 1107 .type = EM28XX_VMUX_SVIDEO,
988 .vmux = TVP5150_SVIDEO, 1108 .vmux = TVP5150_SVIDEO,
989 .amux = 1, 1109 .amux = EM28XX_AMUX_LINE_IN,
1110 .gpio = default_analog,
990 } }, 1111 } },
991 }, 1112 },
992 [EM2882_BOARD_PINNACLE_HYBRID_PRO] = { 1113 [EM2882_BOARD_PINNACLE_HYBRID_PRO] = {
993 .name = "Pinnacle Hybrid Pro (2)", 1114 .name = "Pinnacle Hybrid Pro (2)",
994 .valid = EM28XX_BOARD_NOT_VALIDATED, 1115 .valid = EM28XX_BOARD_NOT_VALIDATED,
995 .vchannels = 3,
996 .tuner_type = TUNER_XC2028, 1116 .tuner_type = TUNER_XC2028,
1117 .tuner_gpio = default_tuner_gpio,
997 .mts_firmware = 1, 1118 .mts_firmware = 1,
998 .decoder = EM28XX_TVP5150, 1119 .decoder = EM28XX_TVP5150,
999 .input = { { 1120 .input = { {
1000 .type = EM28XX_VMUX_TELEVISION, 1121 .type = EM28XX_VMUX_TELEVISION,
1001 .vmux = TVP5150_COMPOSITE0, 1122 .vmux = TVP5150_COMPOSITE0,
1002 .amux = 0, 1123 .amux = EM28XX_AMUX_VIDEO,
1124 .gpio = hauppauge_wintv_hvr_900_analog,
1003 }, { 1125 }, {
1004 .type = EM28XX_VMUX_COMPOSITE1, 1126 .type = EM28XX_VMUX_COMPOSITE1,
1005 .vmux = TVP5150_COMPOSITE1, 1127 .vmux = TVP5150_COMPOSITE1,
1006 .amux = 1, 1128 .amux = EM28XX_AMUX_LINE_IN,
1129 .gpio = hauppauge_wintv_hvr_900_analog,
1007 }, { 1130 }, {
1008 .type = EM28XX_VMUX_SVIDEO, 1131 .type = EM28XX_VMUX_SVIDEO,
1009 .vmux = TVP5150_SVIDEO, 1132 .vmux = TVP5150_SVIDEO,
1010 .amux = 1, 1133 .amux = EM28XX_AMUX_LINE_IN,
1134 .gpio = hauppauge_wintv_hvr_900_analog,
1011 } }, 1135 } },
1012 }, 1136 },
1013 [EM2882_BOARD_KWORLD_VS_DVBT] = { 1137 [EM2882_BOARD_KWORLD_VS_DVBT] = {
1014 .name = "Kworld VS-DVB-T 323UR", 1138 .name = "Kworld VS-DVB-T 323UR",
1015 .valid = EM28XX_BOARD_NOT_VALIDATED, 1139 .valid = EM28XX_BOARD_NOT_VALIDATED,
1016 .vchannels = 3,
1017 .tuner_type = TUNER_XC2028, 1140 .tuner_type = TUNER_XC2028,
1141 .tuner_gpio = default_tuner_gpio,
1018 .decoder = EM28XX_TVP5150, 1142 .decoder = EM28XX_TVP5150,
1019 .input = { { 1143 .input = { {
1020 .type = EM28XX_VMUX_TELEVISION, 1144 .type = EM28XX_VMUX_TELEVISION,
1021 .vmux = TVP5150_COMPOSITE0, 1145 .vmux = TVP5150_COMPOSITE0,
1022 .amux = 0, 1146 .amux = EM28XX_AMUX_VIDEO,
1023 }, { 1147 }, {
1024 .type = EM28XX_VMUX_COMPOSITE1, 1148 .type = EM28XX_VMUX_COMPOSITE1,
1025 .vmux = TVP5150_COMPOSITE1, 1149 .vmux = TVP5150_COMPOSITE1,
1026 .amux = 1, 1150 .amux = EM28XX_AMUX_LINE_IN,
1027 }, { 1151 }, {
1028 .type = EM28XX_VMUX_SVIDEO, 1152 .type = EM28XX_VMUX_SVIDEO,
1029 .vmux = TVP5150_SVIDEO, 1153 .vmux = TVP5150_SVIDEO,
1030 .amux = 1, 1154 .amux = EM28XX_AMUX_LINE_IN,
1031 } }, 1155 } },
1032 }, 1156 },
1033 [EM2882_BOARD_TERRATEC_HYBRID_XS] = { 1157 [EM2882_BOARD_TERRATEC_HYBRID_XS] = {
1034 .name = "Terratec Hybrid XS (em2882)", 1158 .name = "Terratec Hybrid XS (em2882)",
1035 .valid = EM28XX_BOARD_NOT_VALIDATED, 1159 .valid = EM28XX_BOARD_NOT_VALIDATED,
1036 .vchannels = 3,
1037 .tuner_type = TUNER_XC2028, 1160 .tuner_type = TUNER_XC2028,
1161 .tuner_gpio = default_tuner_gpio,
1038 .decoder = EM28XX_TVP5150, 1162 .decoder = EM28XX_TVP5150,
1039 .input = { { 1163 .input = { {
1040 .type = EM28XX_VMUX_TELEVISION, 1164 .type = EM28XX_VMUX_TELEVISION,
1041 .vmux = TVP5150_COMPOSITE0, 1165 .vmux = TVP5150_COMPOSITE0,
1042 .amux = 0, 1166 .amux = EM28XX_AMUX_VIDEO,
1167 .gpio = hauppauge_wintv_hvr_900_analog,
1043 }, { 1168 }, {
1044 .type = EM28XX_VMUX_COMPOSITE1, 1169 .type = EM28XX_VMUX_COMPOSITE1,
1045 .vmux = TVP5150_COMPOSITE1, 1170 .vmux = TVP5150_COMPOSITE1,
1046 .amux = 1, 1171 .amux = EM28XX_AMUX_LINE_IN,
1172 .gpio = hauppauge_wintv_hvr_900_analog,
1047 }, { 1173 }, {
1048 .type = EM28XX_VMUX_SVIDEO, 1174 .type = EM28XX_VMUX_SVIDEO,
1049 .vmux = TVP5150_SVIDEO, 1175 .vmux = TVP5150_SVIDEO,
1050 .amux = 1, 1176 .amux = EM28XX_AMUX_LINE_IN,
1177 .gpio = hauppauge_wintv_hvr_900_analog,
1051 } }, 1178 } },
1052 }, 1179 },
1053 [EM2883_BOARD_KWORLD_HYBRID_A316] = { 1180 [EM2883_BOARD_KWORLD_HYBRID_A316] = {
1054 .name = "Kworld PlusTV HD Hybrid 330", 1181 .name = "Kworld PlusTV HD Hybrid 330",
1055 .valid = EM28XX_BOARD_NOT_VALIDATED,
1056 .vchannels = 3,
1057 .is_em2800 = 0,
1058 .tuner_type = TUNER_XC2028, 1182 .tuner_type = TUNER_XC2028,
1183 .tuner_gpio = default_tuner_gpio,
1059 .decoder = EM28XX_TVP5150, 1184 .decoder = EM28XX_TVP5150,
1060 .input = { { 1185 .mts_firmware = 1,
1186 .has_dvb = 1,
1187 .dvb_gpio = default_digital,
1188 .input = { {
1061 .type = EM28XX_VMUX_TELEVISION, 1189 .type = EM28XX_VMUX_TELEVISION,
1062 .vmux = TVP5150_COMPOSITE0, 1190 .vmux = TVP5150_COMPOSITE0,
1063 .amux = 0, 1191 .amux = EM28XX_AMUX_VIDEO,
1192 .gpio = default_analog,
1064 }, { 1193 }, {
1065 .type = EM28XX_VMUX_COMPOSITE1, 1194 .type = EM28XX_VMUX_COMPOSITE1,
1066 .vmux = TVP5150_COMPOSITE1, 1195 .vmux = TVP5150_COMPOSITE1,
1067 .amux = 1, 1196 .amux = EM28XX_AMUX_LINE_IN,
1197 .gpio = hauppauge_wintv_hvr_900_analog,
1068 }, { 1198 }, {
1069 .type = EM28XX_VMUX_SVIDEO, 1199 .type = EM28XX_VMUX_SVIDEO,
1070 .vmux = TVP5150_SVIDEO, 1200 .vmux = TVP5150_SVIDEO,
1071 .amux = 1, 1201 .amux = EM28XX_AMUX_LINE_IN,
1202 .gpio = hauppauge_wintv_hvr_900_analog,
1072 } }, 1203 } },
1073 }, 1204 },
1074 [EM2820_BOARD_COMPRO_VIDEOMATE_FORYOU] = { 1205 [EM2820_BOARD_COMPRO_VIDEOMATE_FORYOU] = {
1075 .name = "Compro VideoMate ForYou/Stereo", 1206 .name = "Compro VideoMate ForYou/Stereo",
1076 .vchannels = 2,
1077 .tuner_type = TUNER_LG_PAL_NEW_TAPC, 1207 .tuner_type = TUNER_LG_PAL_NEW_TAPC,
1078 .tda9887_conf = TDA9887_PRESENT, 1208 .tda9887_conf = TDA9887_PRESENT,
1079 .decoder = EM28XX_TVP5150, 1209 .decoder = EM28XX_TVP5150,
1080 .input = { { 1210 .input = { {
1081 .type = EM28XX_VMUX_TELEVISION, 1211 .type = EM28XX_VMUX_TELEVISION,
1082 .vmux = TVP5150_COMPOSITE0, 1212 .vmux = TVP5150_COMPOSITE0,
1083 .amux = EM28XX_AMUX_LINE_IN, 1213 .amux = EM28XX_AMUX_LINE_IN,
@@ -1101,7 +1231,7 @@ struct usb_device_id em28xx_id_table [] = {
1101 { USB_DEVICE(0xeb1a, 0x2820), 1231 { USB_DEVICE(0xeb1a, 0x2820),
1102 .driver_info = EM2820_BOARD_UNKNOWN }, 1232 .driver_info = EM2820_BOARD_UNKNOWN },
1103 { USB_DEVICE(0xeb1a, 0x2821), 1233 { USB_DEVICE(0xeb1a, 0x2821),
1104 .driver_info = EM2820_BOARD_PROLINK_PLAYTV_USB2 }, 1234 .driver_info = EM2820_BOARD_UNKNOWN },
1105 { USB_DEVICE(0xeb1a, 0x2860), 1235 { USB_DEVICE(0xeb1a, 0x2860),
1106 .driver_info = EM2820_BOARD_UNKNOWN }, 1236 .driver_info = EM2820_BOARD_UNKNOWN },
1107 { USB_DEVICE(0xeb1a, 0x2861), 1237 { USB_DEVICE(0xeb1a, 0x2861),
@@ -1164,8 +1294,8 @@ struct usb_device_id em28xx_id_table [] = {
1164 .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 }, 1294 .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 },
1165 { USB_DEVICE(0x2040, 0x651b), /* RP HVR-950 */ 1295 { USB_DEVICE(0x2040, 0x651b), /* RP HVR-950 */
1166 .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 }, 1296 .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 },
1167 { USB_DEVICE(0x2040, 0x651f), /* HCW HVR-850 */ 1297 { USB_DEVICE(0x2040, 0x651f),
1168 .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 }, 1298 .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 },
1169 { USB_DEVICE(0x0438, 0xb002), 1299 { USB_DEVICE(0x0438, 0xb002),
1170 .driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 }, 1300 .driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 },
1171 { USB_DEVICE(0x2001, 0xf112), 1301 { USB_DEVICE(0x2001, 0xf112),
@@ -1189,78 +1319,12 @@ struct usb_device_id em28xx_id_table [] = {
1189MODULE_DEVICE_TABLE(usb, em28xx_id_table); 1319MODULE_DEVICE_TABLE(usb, em28xx_id_table);
1190 1320
1191/* 1321/*
1192 * Reset sequences for analog/digital modes
1193 */
1194
1195/* Reset for the most [analog] boards */
1196static struct em28xx_reg_seq default_analog[] = {
1197 {EM28XX_R08_GPIO, 0x6d, ~EM_GPIO_4, 10},
1198 { -1, -1, -1, -1},
1199};
1200
1201/* Reset for the most [digital] boards */
1202static struct em28xx_reg_seq default_digital[] = {
1203 {EM28XX_R08_GPIO, 0x6e, ~EM_GPIO_4, 10},
1204 { -1, -1, -1, -1},
1205};
1206
1207/* Board Hauppauge WinTV HVR 900 analog */
1208static struct em28xx_reg_seq hauppauge_wintv_hvr_900_analog[] = {
1209 {EM28XX_R08_GPIO, 0x2d, ~EM_GPIO_4, 10},
1210 {0x05, 0xff, 0x10, 10},
1211 { -1, -1, -1, -1},
1212};
1213
1214/* Board Hauppauge WinTV HVR 900 digital */
1215static struct em28xx_reg_seq hauppauge_wintv_hvr_900_digital[] = {
1216 {EM28XX_R08_GPIO, 0x2e, ~EM_GPIO_4, 10},
1217 {EM2880_R04_GPO, 0x04, 0x0f, 10},
1218 {EM2880_R04_GPO, 0x0c, 0x0f, 10},
1219 { -1, -1, -1, -1},
1220};
1221
1222/* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */
1223static struct em28xx_reg_seq em2880_msi_digivox_ad_analog[] = {
1224 {EM28XX_R08_GPIO, 0x69, ~EM_GPIO_4, 10},
1225 { -1, -1, -1, -1},
1226};
1227
1228/* Boards - EM2880 MSI DIGIVOX AD and EM2880_BOARD_MSI_DIGIVOX_AD_II */
1229static struct em28xx_reg_seq em2880_msi_digivox_ad_digital[] = {
1230 {EM28XX_R08_GPIO, 0x6a, ~EM_GPIO_4, 10},
1231 { -1, -1, -1, -1},
1232};
1233
1234/* Board - EM2870 Kworld 355u
1235 Analog - No input analog */
1236static struct em28xx_reg_seq em2870_kworld_355u_digital[] = {
1237 {EM2880_R04_GPO, 0x01, 0xff, 10},
1238 { -1, -1, -1, -1},
1239};
1240
1241/* Callback for the most boards */
1242static struct em28xx_reg_seq default_callback[] = {
1243 {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10},
1244 {EM28XX_R08_GPIO, 0, EM_GPIO_4, 10},
1245 {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10},
1246 { -1, -1, -1, -1},
1247};
1248
1249/* Callback for EM2882 TERRATEC HYBRID XS */
1250static struct em28xx_reg_seq em2882_terratec_hybrid_xs_digital[] = {
1251 {EM28XX_R08_GPIO, 0x2e, 0xff, 6},
1252 {EM28XX_R08_GPIO, 0x3e, ~EM_GPIO_4, 6},
1253 {EM2880_R04_GPO, 0x04, 0xff, 10},
1254 {EM2880_R04_GPO, 0x0c, 0xff, 10},
1255 { -1, -1, -1, -1},
1256};
1257
1258/*
1259 * EEPROM hash table for devices with generic USB IDs 1322 * EEPROM hash table for devices with generic USB IDs
1260 */ 1323 */
1261static struct em28xx_hash_table em28xx_eeprom_hash [] = { 1324static struct em28xx_hash_table em28xx_eeprom_hash [] = {
1262 /* P/N: SA 60002070465 Tuner: TVF7533-MF */ 1325 /* P/N: SA 60002070465 Tuner: TVF7533-MF */
1263 {0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF}, 1326 {0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF},
1327 {0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF},
1264 {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, 1328 {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028},
1265}; 1329};
1266 1330
@@ -1282,27 +1346,26 @@ int em28xx_tuner_callback(void *ptr, int component, int command, int arg)
1282 if (command != XC2028_TUNER_RESET) 1346 if (command != XC2028_TUNER_RESET)
1283 return 0; 1347 return 0;
1284 1348
1285 if (dev->mode == EM28XX_ANALOG_MODE) 1349 rc = em28xx_gpio_set(dev, dev->board.tuner_gpio);
1286 rc = em28xx_gpio_set(dev, dev->tun_analog_gpio);
1287 else
1288 rc = em28xx_gpio_set(dev, dev->tun_digital_gpio);
1289 1350
1290 return rc; 1351 return rc;
1291} 1352}
1292EXPORT_SYMBOL_GPL(em28xx_tuner_callback); 1353EXPORT_SYMBOL_GPL(em28xx_tuner_callback);
1293 1354
1294static void em28xx_set_model(struct em28xx *dev) 1355static void inline em28xx_set_model(struct em28xx *dev)
1295{ 1356{
1296 dev->is_em2800 = em28xx_boards[dev->model].is_em2800; 1357 memcpy(&dev->board, &em28xx_boards[dev->model], sizeof(dev->board));
1297 dev->has_msp34xx = em28xx_boards[dev->model].has_msp34xx; 1358
1298 dev->tda9887_conf = em28xx_boards[dev->model].tda9887_conf; 1359 /* Those are the default values for the majority of boards
1299 dev->decoder = em28xx_boards[dev->model].decoder; 1360 Use those values if not specified otherwise at boards entry
1300 dev->video_inputs = em28xx_boards[dev->model].vchannels; 1361 */
1301 dev->has_12mhz_i2s = em28xx_boards[dev->model].has_12mhz_i2s; 1362 if (!dev->board.xclk)
1302 dev->max_range_640_480 = em28xx_boards[dev->model].max_range_640_480; 1363 dev->board.xclk = EM28XX_XCLK_IR_RC5_MODE |
1303 dev->has_dvb = em28xx_boards[dev->model].has_dvb; 1364 EM28XX_XCLK_FREQUENCY_12MHZ;
1304 dev->has_snapshot_button = em28xx_boards[dev->model].has_snapshot_button; 1365
1305 dev->valid = em28xx_boards[dev->model].valid; 1366 if (!dev->board.i2c_speed)
1367 dev->board.i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
1368 EM28XX_I2C_FREQ_100_KHZ;
1306} 1369}
1307 1370
1308/* Since em28xx_pre_card_setup() requires a proper dev->model, 1371/* Since em28xx_pre_card_setup() requires a proper dev->model,
@@ -1312,205 +1375,126 @@ void em28xx_pre_card_setup(struct em28xx *dev)
1312{ 1375{
1313 int rc; 1376 int rc;
1314 1377
1315 rc = em28xx_read_reg(dev, EM2880_R04_GPO); 1378 em28xx_set_model(dev);
1316 if (rc >= 0) 1379
1317 dev->reg_gpo = rc; 1380 em28xx_info("Identified as %s (card=%d)\n",
1381 dev->board.name, dev->model);
1382
1383 /* Set the default GPO/GPIO for legacy devices */
1384 dev->reg_gpo_num = EM2880_R04_GPO;
1385 dev->reg_gpio_num = EM28XX_R08_GPIO;
1318 1386
1319 dev->wait_after_write = 5; 1387 dev->wait_after_write = 5;
1388
1389 /* Based on the Chip ID, set the device configuration */
1320 rc = em28xx_read_reg(dev, EM28XX_R0A_CHIPID); 1390 rc = em28xx_read_reg(dev, EM28XX_R0A_CHIPID);
1321 if (rc > 0) { 1391 if (rc > 0) {
1322 switch (rc) { 1392 dev->chip_id = rc;
1393
1394 switch (dev->chip_id) {
1395 case CHIP_ID_EM2750:
1396 em28xx_info("chip ID is em2750\n");
1397 break;
1398 case CHIP_ID_EM2820:
1399 em28xx_info("chip ID is em2820\n");
1400 break;
1401 case CHIP_ID_EM2840:
1402 em28xx_info("chip ID is em2840\n");
1403 break;
1323 case CHIP_ID_EM2860: 1404 case CHIP_ID_EM2860:
1324 em28xx_info("chip ID is em2860\n"); 1405 em28xx_info("chip ID is em2860\n");
1325 break; 1406 break;
1407 case CHIP_ID_EM2870:
1408 em28xx_info("chip ID is em2870\n");
1409 dev->wait_after_write = 0;
1410 break;
1411 case CHIP_ID_EM2874:
1412 em28xx_info("chip ID is em2874\n");
1413 dev->reg_gpio_num = EM2874_R80_GPIO;
1414 dev->wait_after_write = 0;
1415 break;
1326 case CHIP_ID_EM2883: 1416 case CHIP_ID_EM2883:
1327 em28xx_info("chip ID is em2882/em2883\n"); 1417 em28xx_info("chip ID is em2882/em2883\n");
1328 dev->wait_after_write = 0; 1418 dev->wait_after_write = 0;
1329 break; 1419 break;
1330 default: 1420 default:
1331 em28xx_info("em28xx chip ID = %d\n", rc); 1421 em28xx_info("em28xx chip ID = %d\n", dev->chip_id);
1332 } 1422 }
1333 } 1423 }
1334 em28xx_set_model(dev);
1335
1336 /* request some modules */
1337 switch (dev->model) {
1338 case EM2880_BOARD_TERRATEC_PRODIGY_XS:
1339 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
1340 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
1341 case EM2860_BOARD_TERRATEC_HYBRID_XS:
1342 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
1343 case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
1344 case EM2882_BOARD_PINNACLE_HYBRID_PRO:
1345 case EM2883_BOARD_KWORLD_HYBRID_A316:
1346 case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
1347 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x27", 1);
1348 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
1349 msleep(50);
1350
1351 /* Sets GPO/GPIO sequences for this device */
1352 dev->analog_gpio = hauppauge_wintv_hvr_900_analog;
1353 dev->digital_gpio = hauppauge_wintv_hvr_900_digital;
1354 dev->tun_analog_gpio = default_callback;
1355 dev->tun_digital_gpio = default_callback;
1356 break;
1357
1358 case EM2882_BOARD_TERRATEC_HYBRID_XS:
1359 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x27", 1);
1360 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
1361 msleep(50);
1362
1363 /* should be added ir_codes here */
1364
1365 /* Sets GPO/GPIO sequences for this device */
1366 dev->analog_gpio = hauppauge_wintv_hvr_900_analog;
1367 dev->digital_gpio = hauppauge_wintv_hvr_900_digital;
1368 dev->tun_analog_gpio = default_callback;
1369 dev->tun_digital_gpio = em2882_terratec_hybrid_xs_digital;
1370 break;
1371
1372 case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
1373 case EM2880_BOARD_TERRATEC_HYBRID_XS:
1374 case EM2870_BOARD_TERRATEC_XS:
1375 case EM2881_BOARD_PINNACLE_HYBRID_PRO:
1376 case EM2880_BOARD_KWORLD_DVB_310U:
1377 case EM2870_BOARD_KWORLD_350U:
1378 case EM2881_BOARD_DNT_DA2_HYBRID:
1379 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x27", 1);
1380 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
1381 msleep(50);
1382
1383 /* NOTE: EM2881_DNT_DA2_HYBRID spend 140 msleep for digital
1384 and analog commands. If this commands doesn't work,
1385 add this timer. */
1386
1387 /* Sets GPO/GPIO sequences for this device */
1388 dev->analog_gpio = default_analog;
1389 dev->digital_gpio = default_digital;
1390 dev->tun_analog_gpio = default_callback;
1391 dev->tun_digital_gpio = default_callback;
1392 break;
1393 1424
1394 case EM2880_BOARD_MSI_DIGIVOX_AD: 1425 /* Prepopulate cached GPO register content */
1395 case EM2880_BOARD_MSI_DIGIVOX_AD_II: 1426 rc = em28xx_read_reg(dev, dev->reg_gpo_num);
1396 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x27", 1); 1427 if (rc >= 0)
1397 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1); 1428 dev->reg_gpo = rc;
1398 msleep(50);
1399
1400 /* Sets GPO/GPIO sequences for this device */
1401 dev->analog_gpio = em2880_msi_digivox_ad_analog;
1402 dev->digital_gpio = em2880_msi_digivox_ad_digital;
1403 dev->tun_analog_gpio = default_callback;
1404 dev->tun_digital_gpio = default_callback;
1405 break;
1406 1429
1407 case EM2750_BOARD_UNKNOWN: 1430 /* Set the initial XCLK and I2C clock values based on the board
1408 case EM2750_BOARD_DLCW_130: 1431 definition */
1409 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x0a", 1); 1432 em28xx_write_reg(dev, EM28XX_R0F_XCLK, dev->board.xclk & 0x7f);
1410 break; 1433 em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, dev->board.i2c_speed);
1434 msleep(50);
1411 1435
1436 /* request some modules */
1437 switch (dev->model) {
1412 case EM2861_BOARD_PLEXTOR_PX_TV100U: 1438 case EM2861_BOARD_PLEXTOR_PX_TV100U:
1413 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x27", 1);
1414 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
1415 /* FIXME guess */ 1439 /* FIXME guess */
1416 /* Turn on analog audio output */ 1440 /* Turn on analog audio output */
1417 em28xx_write_regs_req(dev, 0x00, 0x08, "\xfd", 1); 1441 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfd);
1418 break; 1442 break;
1419
1420 case EM2861_BOARD_KWORLD_PVRTV_300U: 1443 case EM2861_BOARD_KWORLD_PVRTV_300U:
1421 case EM2880_BOARD_KWORLD_DVB_305U: 1444 case EM2880_BOARD_KWORLD_DVB_305U:
1422 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x27", 1); 1445 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0x6d);
1423 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x4c", 1);
1424 msleep(10);
1425 em28xx_write_regs(dev, 0x08, "\x6d", 1);
1426 msleep(10); 1446 msleep(10);
1427 em28xx_write_regs(dev, 0x08, "\x7d", 1); 1447 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0x7d);
1428 msleep(10); 1448 msleep(10);
1429 break; 1449 break;
1430
1431 case EM2870_BOARD_KWORLD_355U:
1432 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x27", 1);
1433 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
1434 msleep(50);
1435
1436 /* Sets GPO/GPIO sequences for this device */
1437 dev->digital_gpio = em2870_kworld_355u_digital;
1438 break;
1439
1440 case EM2870_BOARD_COMPRO_VIDEOMATE: 1450 case EM2870_BOARD_COMPRO_VIDEOMATE:
1441 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x27", 1);
1442 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
1443 /* TODO: someone can do some cleanup here... 1451 /* TODO: someone can do some cleanup here...
1444 not everything's needed */ 1452 not everything's needed */
1445 em28xx_write_regs(dev, 0x04, "\x00", 1); 1453 em28xx_write_reg(dev, EM2880_R04_GPO, 0x00);
1446 msleep(10); 1454 msleep(10);
1447 em28xx_write_regs(dev, 0x04, "\x01", 1); 1455 em28xx_write_reg(dev, EM2880_R04_GPO, 0x01);
1448 msleep(10); 1456 msleep(10);
1449 em28xx_write_regs(dev, 0x08, "\xfd", 1); 1457 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfd);
1450 mdelay(70); 1458 mdelay(70);
1451 em28xx_write_regs(dev, 0x08, "\xfc", 1); 1459 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfc);
1452 mdelay(70); 1460 mdelay(70);
1453 em28xx_write_regs(dev, 0x08, "\xdc", 1); 1461 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xdc);
1454 mdelay(70); 1462 mdelay(70);
1455 em28xx_write_regs(dev, 0x08, "\xfc", 1); 1463 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfc);
1456 mdelay(70); 1464 mdelay(70);
1457 break; 1465 break;
1458
1459 case EM2870_BOARD_TERRATEC_XS_MT2060: 1466 case EM2870_BOARD_TERRATEC_XS_MT2060:
1460 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x27", 1);
1461 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
1462 /* this device needs some gpio writes to get the DVB-T 1467 /* this device needs some gpio writes to get the DVB-T
1463 demod work */ 1468 demod work */
1464 em28xx_write_regs(dev, 0x08, "\xfe", 1); 1469 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfe);
1465 mdelay(70); 1470 mdelay(70);
1466 em28xx_write_regs(dev, 0x08, "\xde", 1); 1471 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xde);
1467 mdelay(70); 1472 mdelay(70);
1468 dev->em28xx_write_regs(dev, 0x08, "\xfe", 1); 1473 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfe);
1469 mdelay(70); 1474 mdelay(70);
1470 break; 1475 break;
1471
1472 case EM2870_BOARD_PINNACLE_PCTV_DVB: 1476 case EM2870_BOARD_PINNACLE_PCTV_DVB:
1473 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
1474 /* this device needs some gpio writes to get the 1477 /* this device needs some gpio writes to get the
1475 DVB-T demod work */ 1478 DVB-T demod work */
1476 em28xx_write_regs(dev, 0x08, "\xfe", 1); 1479 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfe);
1477 mdelay(70); 1480 mdelay(70);
1478 em28xx_write_regs(dev, 0x08, "\xde", 1); 1481 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xde);
1479 mdelay(70); 1482 mdelay(70);
1480 em28xx_write_regs(dev, 0x08, "\xfe", 1); 1483 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfe);
1481 mdelay(70); 1484 mdelay(70);
1482 /* switch em2880 rc protocol */
1483 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x22", 1);
1484 /* should be added ir_codes here */
1485 break; 1485 break;
1486
1487 case EM2820_BOARD_GADMEI_UTV310: 1486 case EM2820_BOARD_GADMEI_UTV310:
1488 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x27", 1);
1489 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
1490 /* Turn on analog audio output */
1491 em28xx_write_regs_req(dev, 0x00, 0x08, "\xfd", 1);
1492 break;
1493
1494 case EM2860_BOARD_GADMEI_UTV330:
1495 /* Turn on IR */
1496 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x07", 1);
1497 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
1498 /* should be added ir_codes here */
1499 break;
1500
1501 case EM2820_BOARD_MSI_VOX_USB_2: 1487 case EM2820_BOARD_MSI_VOX_USB_2:
1502 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x27", 1); 1488 /* enables audio for that devices */
1503 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1); 1489 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfd);
1504 /* enables audio for that device */
1505 em28xx_write_regs_req(dev, 0x00, 0x08, "\xfd", 1);
1506 break; 1490 break;
1507 } 1491 }
1508 1492
1509 em28xx_gpio_set(dev, dev->tun_analog_gpio); 1493 em28xx_gpio_set(dev, dev->board.tuner_gpio);
1510 em28xx_set_mode(dev, EM28XX_ANALOG_MODE); 1494 em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
1511 1495
1512 /* Unlock device */ 1496 /* Unlock device */
1513 em28xx_set_mode(dev, EM28XX_MODE_UNDEFINED); 1497 em28xx_set_mode(dev, EM28XX_SUSPEND);
1514} 1498}
1515 1499
1516static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl) 1500static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
@@ -1536,6 +1520,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
1536 ctl->demod = XC3028_FE_DEFAULT; 1520 ctl->demod = XC3028_FE_DEFAULT;
1537 ctl->fname = XC3028L_DEFAULT_FIRMWARE; 1521 ctl->fname = XC3028L_DEFAULT_FIRMWARE;
1538 break; 1522 break;
1523 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
1539 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950: 1524 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
1540 case EM2880_BOARD_PINNACLE_PCTV_HD_PRO: 1525 case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
1541 /* FIXME: Better to specify the needed IF */ 1526 /* FIXME: Better to specify the needed IF */
@@ -1712,12 +1697,15 @@ void em28xx_card_setup(struct em28xx *dev)
1712 em28xx_set_model(dev); 1697 em28xx_set_model(dev);
1713 1698
1714 dev->tuner_type = em28xx_boards[dev->model].tuner_type; 1699 dev->tuner_type = em28xx_boards[dev->model].tuner_type;
1700 if (em28xx_boards[dev->model].tuner_addr)
1701 dev->tuner_addr = em28xx_boards[dev->model].tuner_addr;
1715 1702
1716 /* request some modules */ 1703 /* request some modules */
1717 switch (dev->model) { 1704 switch (dev->model) {
1718 case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2: 1705 case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
1719 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: 1706 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
1720 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2: 1707 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
1708 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
1721 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950: 1709 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
1722 { 1710 {
1723 struct tveeprom tv; 1711 struct tveeprom tv;
@@ -1733,7 +1721,7 @@ void em28xx_card_setup(struct em28xx *dev)
1733 1721
1734 if (tv.audio_processor == V4L2_IDENT_MSPX4XX) { 1722 if (tv.audio_processor == V4L2_IDENT_MSPX4XX) {
1735 dev->i2s_speed = 2048000; 1723 dev->i2s_speed = 2048000;
1736 dev->has_msp34xx = 1; 1724 dev->board.has_msp34xx = 1;
1737 } 1725 }
1738#ifdef CONFIG_MODULES 1726#ifdef CONFIG_MODULES
1739 if (tv.has_ir) 1727 if (tv.has_ir)
@@ -1743,7 +1731,7 @@ void em28xx_card_setup(struct em28xx *dev)
1743 } 1731 }
1744 case EM2820_BOARD_KWORLD_PVRTV2800RF: 1732 case EM2820_BOARD_KWORLD_PVRTV2800RF:
1745 /* GPIO enables sound on KWORLD PVR TV 2800RF */ 1733 /* GPIO enables sound on KWORLD PVR TV 2800RF */
1746 em28xx_write_regs_req(dev, 0x00, 0x08, "\xf9", 1); 1734 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xf9);
1747 break; 1735 break;
1748 case EM2820_BOARD_UNKNOWN: 1736 case EM2820_BOARD_UNKNOWN:
1749 case EM2800_BOARD_UNKNOWN: 1737 case EM2800_BOARD_UNKNOWN:
@@ -1766,10 +1754,10 @@ void em28xx_card_setup(struct em28xx *dev)
1766 break; 1754 break;
1767 } 1755 }
1768 1756
1769 if (dev->has_snapshot_button) 1757 if (dev->board.has_snapshot_button)
1770 em28xx_register_snapshot_button(dev); 1758 em28xx_register_snapshot_button(dev);
1771 1759
1772 if (dev->valid == EM28XX_BOARD_NOT_VALIDATED) { 1760 if (dev->board.valid == EM28XX_BOARD_NOT_VALIDATED) {
1773 em28xx_errdev("\n\n"); 1761 em28xx_errdev("\n\n");
1774 em28xx_errdev("The support for this board weren't " 1762 em28xx_errdev("The support for this board weren't "
1775 "valid yet.\n"); 1763 "valid yet.\n");
@@ -1784,15 +1772,433 @@ void em28xx_card_setup(struct em28xx *dev)
1784 1772
1785#ifdef CONFIG_MODULES 1773#ifdef CONFIG_MODULES
1786 /* request some modules */ 1774 /* request some modules */
1787 if (dev->has_msp34xx) 1775 if (dev->board.has_msp34xx)
1788 request_module("msp3400"); 1776 request_module("msp3400");
1789 if (dev->decoder == EM28XX_SAA7113 || dev->decoder == EM28XX_SAA7114) 1777 if (dev->board.decoder == EM28XX_SAA711X)
1790 request_module("saa7115"); 1778 request_module("saa7115");
1791 if (dev->decoder == EM28XX_TVP5150) 1779 if (dev->board.decoder == EM28XX_TVP5150)
1792 request_module("tvp5150"); 1780 request_module("tvp5150");
1793 if (dev->tuner_type != TUNER_ABSENT) 1781 if (dev->board.tuner_type != TUNER_ABSENT)
1794 request_module("tuner"); 1782 request_module("tuner");
1795#endif 1783#endif
1796 1784
1797 em28xx_config_tuner(dev); 1785 em28xx_config_tuner(dev);
1786
1787 em28xx_ir_init(dev);
1788}
1789
1790
1791#if defined(CONFIG_MODULES) && defined(MODULE)
1792static void request_module_async(struct work_struct *work)
1793{
1794 struct em28xx *dev = container_of(work,
1795 struct em28xx, request_module_wk);
1796
1797 if (dev->has_audio_class)
1798 request_module("snd-usb-audio");
1799 else if (dev->has_alsa_audio)
1800 request_module("em28xx-alsa");
1801
1802 if (dev->board.has_dvb)
1803 request_module("em28xx-dvb");
1804}
1805
1806static void request_modules(struct em28xx *dev)
1807{
1808 INIT_WORK(&dev->request_module_wk, request_module_async);
1809 schedule_work(&dev->request_module_wk);
1810}
1811#else
1812#define request_modules(dev)
1813#endif /* CONFIG_MODULES */
1814
1815/*
1816 * em28xx_realease_resources()
1817 * unregisters the v4l2,i2c and usb devices
1818 * called when the device gets disconected or at module unload
1819*/
1820void em28xx_release_resources(struct em28xx *dev)
1821{
1822 if (dev->sbutton_input_dev)
1823 em28xx_deregister_snapshot_button(dev);
1824
1825 if (dev->ir)
1826 em28xx_ir_fini(dev);
1827
1828 /*FIXME: I2C IR should be disconnected */
1829
1830 em28xx_release_analog_resources(dev);
1831
1832 em28xx_remove_from_devlist(dev);
1833
1834 em28xx_i2c_unregister(dev);
1835 usb_put_dev(dev->udev);
1836
1837 /* Mark device as unused */
1838 em28xx_devused &= ~(1 << dev->devno);
1839};
1840
1841/*
1842 * em28xx_init_dev()
1843 * allocates and inits the device structs, registers i2c bus and v4l device
1844 */
1845int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1846 int minor)
1847{
1848 struct em28xx *dev = *devhandle;
1849 int retval = -ENOMEM;
1850 int errCode;
1851
1852 dev->udev = udev;
1853 mutex_init(&dev->ctrl_urb_lock);
1854 spin_lock_init(&dev->slock);
1855 init_waitqueue_head(&dev->open);
1856 init_waitqueue_head(&dev->wait_frame);
1857 init_waitqueue_head(&dev->wait_stream);
1858
1859 dev->em28xx_write_regs = em28xx_write_regs;
1860 dev->em28xx_read_reg = em28xx_read_reg;
1861 dev->em28xx_read_reg_req_len = em28xx_read_reg_req_len;
1862 dev->em28xx_write_regs_req = em28xx_write_regs_req;
1863 dev->em28xx_read_reg_req = em28xx_read_reg_req;
1864 dev->board.is_em2800 = em28xx_boards[dev->model].is_em2800;
1865
1866 em28xx_pre_card_setup(dev);
1867
1868 if (!dev->board.is_em2800) {
1869 /* Sets I2C speed to 100 KHz */
1870 retval = em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
1871 if (retval < 0) {
1872 em28xx_errdev("%s: em28xx_write_regs_req failed!"
1873 " retval [%d]\n",
1874 __func__, retval);
1875 return retval;
1876 }
1877 }
1878
1879 /* register i2c bus */
1880 errCode = em28xx_i2c_register(dev);
1881 if (errCode < 0) {
1882 em28xx_errdev("%s: em28xx_i2c_register - errCode [%d]!\n",
1883 __func__, errCode);
1884 return errCode;
1885 }
1886
1887 /* Do board specific init and eeprom reading */
1888 em28xx_card_setup(dev);
1889
1890 /* Configure audio */
1891 errCode = em28xx_audio_setup(dev);
1892 if (errCode < 0) {
1893 em28xx_errdev("%s: Error while setting audio - errCode [%d]!\n",
1894 __func__, errCode);
1895 }
1896
1897 /* wake i2c devices */
1898 em28xx_wake_i2c(dev);
1899
1900 /* init video dma queues */
1901 INIT_LIST_HEAD(&dev->vidq.active);
1902 INIT_LIST_HEAD(&dev->vidq.queued);
1903
1904
1905 if (dev->board.has_msp34xx) {
1906 /* Send a reset to other chips via gpio */
1907 errCode = em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xf7);
1908 if (errCode < 0) {
1909 em28xx_errdev("%s: em28xx_write_regs_req - "
1910 "msp34xx(1) failed! errCode [%d]\n",
1911 __func__, errCode);
1912 return errCode;
1913 }
1914 msleep(3);
1915
1916 errCode = em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xff);
1917 if (errCode < 0) {
1918 em28xx_errdev("%s: em28xx_write_regs_req - "
1919 "msp34xx(2) failed! errCode [%d]\n",
1920 __func__, errCode);
1921 return errCode;
1922 }
1923 msleep(3);
1924 }
1925
1926 em28xx_add_into_devlist(dev);
1927
1928 retval = em28xx_register_analog_devices(dev);
1929 if (retval < 0) {
1930 em28xx_release_resources(dev);
1931 goto fail_reg_devices;
1932 }
1933
1934 em28xx_init_extension(dev);
1935
1936 /* Save some power by putting tuner to sleep */
1937 em28xx_i2c_call_clients(dev, TUNER_SET_STANDBY, NULL);
1938
1939 return 0;
1940
1941fail_reg_devices:
1942 return retval;
1943}
1944
1945/*
1946 * em28xx_usb_probe()
1947 * checks for supported devices
1948 */
1949static int em28xx_usb_probe(struct usb_interface *interface,
1950 const struct usb_device_id *id)
1951{
1952 const struct usb_endpoint_descriptor *endpoint;
1953 struct usb_device *udev;
1954 struct usb_interface *uif;
1955 struct em28xx *dev = NULL;
1956 int retval = -ENODEV;
1957 int i, nr, ifnum, isoc_pipe;
1958 char *speed;
1959 char descr[255] = "";
1960
1961 udev = usb_get_dev(interface_to_usbdev(interface));
1962 ifnum = interface->altsetting[0].desc.bInterfaceNumber;
1963
1964 /* Check to see next free device and mark as used */
1965 nr = find_first_zero_bit(&em28xx_devused, EM28XX_MAXBOARDS);
1966 em28xx_devused |= 1<<nr;
1967
1968 /* Don't register audio interfaces */
1969 if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
1970 em28xx_err(DRIVER_NAME " audio device (%04x:%04x): "
1971 "interface %i, class %i\n",
1972 le16_to_cpu(udev->descriptor.idVendor),
1973 le16_to_cpu(udev->descriptor.idProduct),
1974 ifnum,
1975 interface->altsetting[0].desc.bInterfaceClass);
1976
1977 em28xx_devused &= ~(1<<nr);
1978 return -ENODEV;
1979 }
1980
1981 endpoint = &interface->cur_altsetting->endpoint[0].desc;
1982
1983 /* check if the device has the iso in endpoint at the correct place */
1984 if (usb_endpoint_xfer_isoc(endpoint)
1985 &&
1986 (interface->altsetting[1].endpoint[0].desc.wMaxPacketSize == 940)) {
1987 /* It's a newer em2874/em2875 device */
1988 isoc_pipe = 0;
1989 } else {
1990 int check_interface = 1;
1991 isoc_pipe = 1;
1992 endpoint = &interface->cur_altsetting->endpoint[1].desc;
1993 if (usb_endpoint_type(endpoint) !=
1994 USB_ENDPOINT_XFER_ISOC)
1995 check_interface = 0;
1996
1997 if (usb_endpoint_dir_out(endpoint))
1998 check_interface = 0;
1999
2000 if (!check_interface) {
2001 em28xx_err(DRIVER_NAME " video device (%04x:%04x): "
2002 "interface %i, class %i found.\n",
2003 le16_to_cpu(udev->descriptor.idVendor),
2004 le16_to_cpu(udev->descriptor.idProduct),
2005 ifnum,
2006 interface->altsetting[0].desc.bInterfaceClass);
2007
2008 em28xx_err(DRIVER_NAME " This is an anciliary "
2009 "interface not used by the driver\n");
2010
2011 em28xx_devused &= ~(1<<nr);
2012 return -ENODEV;
2013 }
2014 }
2015
2016 switch (udev->speed) {
2017 case USB_SPEED_LOW:
2018 speed = "1.5";
2019 break;
2020 case USB_SPEED_UNKNOWN:
2021 case USB_SPEED_FULL:
2022 speed = "12";
2023 break;
2024 case USB_SPEED_HIGH:
2025 speed = "480";
2026 break;
2027 default:
2028 speed = "unknown";
2029 }
2030
2031 if (udev->manufacturer)
2032 strlcpy(descr, udev->manufacturer, sizeof(descr));
2033
2034 if (udev->product) {
2035 if (*descr)
2036 strlcat(descr, " ", sizeof(descr));
2037 strlcat(descr, udev->product, sizeof(descr));
2038 }
2039 if (*descr)
2040 strlcat(descr, " ", sizeof(descr));
2041
2042 printk(DRIVER_NAME ": New device %s@ %s Mbps "
2043 "(%04x:%04x, interface %d, class %d)\n",
2044 descr,
2045 speed,
2046 le16_to_cpu(udev->descriptor.idVendor),
2047 le16_to_cpu(udev->descriptor.idProduct),
2048 ifnum,
2049 interface->altsetting->desc.bInterfaceNumber);
2050
2051 if (nr >= EM28XX_MAXBOARDS) {
2052 printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
2053 EM28XX_MAXBOARDS);
2054 em28xx_devused &= ~(1<<nr);
2055 return -ENOMEM;
2056 }
2057
2058 /* allocate memory for our device state and initialize it */
2059 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2060 if (dev == NULL) {
2061 em28xx_err(DRIVER_NAME ": out of memory!\n");
2062 em28xx_devused &= ~(1<<nr);
2063 return -ENOMEM;
2064 }
2065
2066 snprintf(dev->name, 29, "em28xx #%d", nr);
2067 dev->devno = nr;
2068 dev->model = id->driver_info;
2069 dev->alt = -1;
2070
2071 /* Checks if audio is provided by some interface */
2072 for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
2073 uif = udev->config->interface[i];
2074 if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
2075 dev->has_audio_class = 1;
2076 break;
2077 }
2078 }
2079
2080 /* compute alternate max packet sizes */
2081 uif = udev->actconfig->interface[0];
2082
2083 dev->num_alt = uif->num_altsetting;
2084 dev->alt_max_pkt_size = kmalloc(32 * dev->num_alt, GFP_KERNEL);
2085
2086 if (dev->alt_max_pkt_size == NULL) {
2087 em28xx_errdev("out of memory!\n");
2088 em28xx_devused &= ~(1<<nr);
2089 kfree(dev);
2090 return -ENOMEM;
2091 }
2092
2093 for (i = 0; i < dev->num_alt ; i++) {
2094 u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
2095 dev->alt_max_pkt_size[i] =
2096 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
2097 }
2098
2099 if ((card[nr] >= 0) && (card[nr] < em28xx_bcount))
2100 dev->model = card[nr];
2101
2102 /* allocate device struct */
2103 mutex_init(&dev->lock);
2104 mutex_lock(&dev->lock);
2105 retval = em28xx_init_dev(&dev, udev, nr);
2106 if (retval) {
2107 em28xx_devused &= ~(1<<dev->devno);
2108 kfree(dev);
2109
2110 return retval;
2111 }
2112
2113 /* save our data pointer in this interface device */
2114 usb_set_intfdata(interface, dev);
2115
2116 request_modules(dev);
2117
2118 /* Should be the last thing to do, to avoid newer udev's to
2119 open the device before fully initializing it
2120 */
2121 mutex_unlock(&dev->lock);
2122
2123 return 0;
2124}
2125
2126/*
2127 * em28xx_usb_disconnect()
2128 * called when the device gets diconencted
2129 * video device will be unregistered on v4l2_close in case it is still open
2130 */
2131static void em28xx_usb_disconnect(struct usb_interface *interface)
2132{
2133 struct em28xx *dev;
2134
2135 dev = usb_get_intfdata(interface);
2136 usb_set_intfdata(interface, NULL);
2137
2138 if (!dev)
2139 return;
2140
2141 em28xx_info("disconnecting %s\n", dev->vdev->name);
2142
2143 /* wait until all current v4l2 io is finished then deallocate
2144 resources */
2145 mutex_lock(&dev->lock);
2146
2147 wake_up_interruptible_all(&dev->open);
2148
2149 if (dev->users) {
2150 em28xx_warn
2151 ("device /dev/video%d is open! Deregistration and memory "
2152 "deallocation are deferred on close.\n",
2153 dev->vdev->num);
2154
2155 dev->state |= DEV_MISCONFIGURED;
2156 em28xx_uninit_isoc(dev);
2157 dev->state |= DEV_DISCONNECTED;
2158 wake_up_interruptible(&dev->wait_frame);
2159 wake_up_interruptible(&dev->wait_stream);
2160 } else {
2161 dev->state |= DEV_DISCONNECTED;
2162 em28xx_release_resources(dev);
2163 }
2164
2165 em28xx_close_extension(dev);
2166
2167 mutex_unlock(&dev->lock);
2168
2169 if (!dev->users) {
2170 kfree(dev->alt_max_pkt_size);
2171 kfree(dev);
2172 }
1798} 2173}
2174
2175static struct usb_driver em28xx_usb_driver = {
2176 .name = "em28xx",
2177 .probe = em28xx_usb_probe,
2178 .disconnect = em28xx_usb_disconnect,
2179 .id_table = em28xx_id_table,
2180};
2181
2182static int __init em28xx_module_init(void)
2183{
2184 int result;
2185
2186 /* register this driver with the USB subsystem */
2187 result = usb_register(&em28xx_usb_driver);
2188 if (result)
2189 em28xx_err(DRIVER_NAME
2190 " usb_register failed. Error number %d.\n", result);
2191
2192 printk(KERN_INFO DRIVER_NAME " driver loaded\n");
2193
2194 return result;
2195}
2196
2197static void __exit em28xx_module_exit(void)
2198{
2199 /* deregister this driver with the USB subsystem */
2200 usb_deregister(&em28xx_usb_driver);
2201}
2202
2203module_init(em28xx_module_init);
2204module_exit(em28xx_module_exit);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 15e2b525310d..f8504518586a 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -26,6 +26,7 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/usb.h> 27#include <linux/usb.h>
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <media/v4l2-common.h>
29 30
30#include "em28xx.h" 31#include "em28xx.h"
31 32
@@ -66,7 +67,8 @@ MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
66int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg, 67int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
67 char *buf, int len) 68 char *buf, int len)
68{ 69{
69 int ret, byte; 70 int ret;
71 int pipe = usb_rcvctrlpipe(dev->udev, 0);
70 72
71 if (dev->state & DEV_DISCONNECTED) 73 if (dev->state & DEV_DISCONNECTED)
72 return -ENODEV; 74 return -ENODEV;
@@ -74,10 +76,18 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
74 if (len > URB_MAX_CTRL_SIZE) 76 if (len > URB_MAX_CTRL_SIZE)
75 return -EINVAL; 77 return -EINVAL;
76 78
77 em28xx_regdbg("req=%02x, reg=%02x ", req, reg); 79 if (reg_debug) {
80 printk( KERN_DEBUG "(pipe 0x%08x): "
81 "IN: %02x %02x %02x %02x %02x %02x %02x %02x ",
82 pipe,
83 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
84 req, 0, 0,
85 reg & 0xff, reg >> 8,
86 len & 0xff, len >> 8);
87 }
78 88
79 mutex_lock(&dev->ctrl_urb_lock); 89 mutex_lock(&dev->ctrl_urb_lock);
80 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), req, 90 ret = usb_control_msg(dev->udev, pipe, req,
81 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 91 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
82 0x0000, reg, dev->urb_buf, len, HZ); 92 0x0000, reg, dev->urb_buf, len, HZ);
83 if (ret < 0) { 93 if (ret < 0) {
@@ -93,7 +103,9 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
93 mutex_unlock(&dev->ctrl_urb_lock); 103 mutex_unlock(&dev->ctrl_urb_lock);
94 104
95 if (reg_debug) { 105 if (reg_debug) {
96 printk("%02x values: ", ret); 106 int byte;
107
108 printk("<<<");
97 for (byte = 0; byte < len; byte++) 109 for (byte = 0; byte < len; byte++)
98 printk(" %02x", (unsigned char)buf[byte]); 110 printk(" %02x", (unsigned char)buf[byte]);
99 printk("\n"); 111 printk("\n");
@@ -108,28 +120,12 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
108 */ 120 */
109int em28xx_read_reg_req(struct em28xx *dev, u8 req, u16 reg) 121int em28xx_read_reg_req(struct em28xx *dev, u8 req, u16 reg)
110{ 122{
111 u8 val;
112 int ret; 123 int ret;
124 u8 val;
113 125
114 if (dev->state & DEV_DISCONNECTED) 126 ret = em28xx_read_reg_req_len(dev, req, reg, &val, 1);
115 return(-ENODEV); 127 if (ret < 0)
116
117 em28xx_regdbg("req=%02x, reg=%02x:", req, reg);
118
119 mutex_lock(&dev->ctrl_urb_lock);
120 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), req,
121 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
122 0x0000, reg, dev->urb_buf, 1, HZ);
123 val = dev->urb_buf[0];
124 mutex_unlock(&dev->ctrl_urb_lock);
125
126 if (ret < 0) {
127 printk(" failed!\n");
128 return ret; 128 return ret;
129 }
130
131 if (reg_debug)
132 printk("%02x\n", (unsigned char) val);
133 129
134 return val; 130 return val;
135} 131}
@@ -147,6 +143,7 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
147 int len) 143 int len)
148{ 144{
149 int ret; 145 int ret;
146 int pipe = usb_sndctrlpipe(dev->udev, 0);
150 147
151 if (dev->state & DEV_DISCONNECTED) 148 if (dev->state & DEV_DISCONNECTED)
152 return -ENODEV; 149 return -ENODEV;
@@ -154,17 +151,25 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
154 if ((len < 1) || (len > URB_MAX_CTRL_SIZE)) 151 if ((len < 1) || (len > URB_MAX_CTRL_SIZE))
155 return -EINVAL; 152 return -EINVAL;
156 153
157 em28xx_regdbg("req=%02x reg=%02x:", req, reg);
158 if (reg_debug) { 154 if (reg_debug) {
159 int i; 155 int byte;
160 for (i = 0; i < len; ++i) 156
161 printk(" %02x", (unsigned char)buf[i]); 157 printk( KERN_DEBUG "(pipe 0x%08x): "
158 "OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>",
159 pipe,
160 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
161 req, 0, 0,
162 reg & 0xff, reg >> 8,
163 len & 0xff, len >> 8);
164
165 for (byte = 0; byte < len; byte++)
166 printk(" %02x", (unsigned char)buf[byte]);
162 printk("\n"); 167 printk("\n");
163 } 168 }
164 169
165 mutex_lock(&dev->ctrl_urb_lock); 170 mutex_lock(&dev->ctrl_urb_lock);
166 memcpy(dev->urb_buf, buf, len); 171 memcpy(dev->urb_buf, buf, len);
167 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), req, 172 ret = usb_control_msg(dev->udev, pipe, req,
168 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 173 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
169 0x0000, reg, dev->urb_buf, len, HZ); 174 0x0000, reg, dev->urb_buf, len, HZ);
170 mutex_unlock(&dev->ctrl_urb_lock); 175 mutex_unlock(&dev->ctrl_urb_lock);
@@ -187,15 +192,21 @@ int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len)
187 Not sure what happens on reading GPO register. 192 Not sure what happens on reading GPO register.
188 */ 193 */
189 if (rc >= 0) { 194 if (rc >= 0) {
190 if (reg == EM2880_R04_GPO) 195 if (reg == dev->reg_gpo_num)
191 dev->reg_gpo = buf[0]; 196 dev->reg_gpo = buf[0];
192 else if (reg == EM28XX_R08_GPIO) 197 else if (reg == dev->reg_gpio_num)
193 dev->reg_gpio = buf[0]; 198 dev->reg_gpio = buf[0];
194 } 199 }
195 200
196 return rc; 201 return rc;
197} 202}
198 203
204/* Write a single register */
205int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val)
206{
207 return em28xx_write_regs(dev, reg, &val, 1);
208}
209
199/* 210/*
200 * em28xx_write_reg_bits() 211 * em28xx_write_reg_bits()
201 * sets only some bits (specified by bitmask) of a register, by first reading 212 * sets only some bits (specified by bitmask) of a register, by first reading
@@ -208,9 +219,9 @@ static int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
208 u8 newval; 219 u8 newval;
209 220
210 /* Uses cache for gpo/gpio registers */ 221 /* Uses cache for gpo/gpio registers */
211 if (reg == EM2880_R04_GPO) 222 if (reg == dev->reg_gpo_num)
212 oldval = dev->reg_gpo; 223 oldval = dev->reg_gpo;
213 else if (reg == EM28XX_R08_GPIO) 224 else if (reg == dev->reg_gpio_num)
214 oldval = dev->reg_gpio; 225 oldval = dev->reg_gpio;
215 else 226 else
216 oldval = em28xx_read_reg(dev, reg); 227 oldval = em28xx_read_reg(dev, reg);
@@ -224,15 +235,70 @@ static int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
224} 235}
225 236
226/* 237/*
238 * em28xx_is_ac97_ready()
239 * Checks if ac97 is ready
240 */
241static int em28xx_is_ac97_ready(struct em28xx *dev)
242{
243 int ret, i;
244
245 /* Wait up to 50 ms for AC97 command to complete */
246 for (i = 0; i < 10; i++, msleep(5)) {
247 ret = em28xx_read_reg(dev, EM28XX_R43_AC97BUSY);
248 if (ret < 0)
249 return ret;
250
251 if (!(ret & 0x01))
252 return 0;
253 }
254
255 em28xx_warn("AC97 command still being executed: not handled properly!\n");
256 return -EBUSY;
257}
258
259/*
260 * em28xx_read_ac97()
261 * write a 16 bit value to the specified AC97 address (LSB first!)
262 */
263int em28xx_read_ac97(struct em28xx *dev, u8 reg)
264{
265 int ret;
266 u8 addr = (reg & 0x7f) | 0x80;
267 u16 val;
268
269 ret = em28xx_is_ac97_ready(dev);
270 if (ret < 0)
271 return ret;
272
273 ret = em28xx_write_regs(dev, EM28XX_R42_AC97ADDR, &addr, 1);
274 if (ret < 0)
275 return ret;
276
277 ret = dev->em28xx_read_reg_req_len(dev, 0, EM28XX_R40_AC97LSB,
278 (u8 *)&val, sizeof(val));
279
280 if (ret < 0)
281 return ret;
282 return le16_to_cpu(val);
283}
284
285/*
227 * em28xx_write_ac97() 286 * em28xx_write_ac97()
228 * write a 16 bit value to the specified AC97 address (LSB first!) 287 * write a 16 bit value to the specified AC97 address (LSB first!)
229 */ 288 */
230static int em28xx_write_ac97(struct em28xx *dev, u8 reg, u8 *val) 289int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val)
231{ 290{
232 int ret, i; 291 int ret;
233 u8 addr = reg & 0x7f; 292 u8 addr = reg & 0x7f;
293 __le16 value;
234 294
235 ret = em28xx_write_regs(dev, EM28XX_R40_AC97LSB, val, 2); 295 value = cpu_to_le16(val);
296
297 ret = em28xx_is_ac97_ready(dev);
298 if (ret < 0)
299 return ret;
300
301 ret = em28xx_write_regs(dev, EM28XX_R40_AC97LSB, (u8 *) &value, 2);
236 if (ret < 0) 302 if (ret < 0)
237 return ret; 303 return ret;
238 304
@@ -240,58 +306,74 @@ static int em28xx_write_ac97(struct em28xx *dev, u8 reg, u8 *val)
240 if (ret < 0) 306 if (ret < 0)
241 return ret; 307 return ret;
242 308
243 /* Wait up to 50 ms for AC97 command to complete */ 309 return 0;
244 for (i = 0; i < 10; i++) { 310}
245 ret = em28xx_read_reg(dev, EM28XX_R43_AC97BUSY);
246 if (ret < 0)
247 return ret;
248 311
249 if (!(ret & 0x01)) 312struct em28xx_vol_table {
250 return 0; 313 enum em28xx_amux mux;
251 msleep(5); 314 u8 reg;
315};
316
317static struct em28xx_vol_table inputs[] = {
318 { EM28XX_AMUX_VIDEO, AC97_VIDEO_VOL },
319 { EM28XX_AMUX_LINE_IN, AC97_LINEIN_VOL },
320 { EM28XX_AMUX_PHONE, AC97_PHONE_VOL },
321 { EM28XX_AMUX_MIC, AC97_MIC_VOL },
322 { EM28XX_AMUX_CD, AC97_CD_VOL },
323 { EM28XX_AMUX_AUX, AC97_AUX_VOL },
324 { EM28XX_AMUX_PCM_OUT, AC97_PCM_OUT_VOL },
325};
326
327static int set_ac97_input(struct em28xx *dev)
328{
329 int ret, i;
330 enum em28xx_amux amux = dev->ctl_ainput;
331
332 /* EM28XX_AMUX_VIDEO2 is a special case used to indicate that
333 em28xx should point to LINE IN, while AC97 should use VIDEO
334 */
335 if (amux == EM28XX_AMUX_VIDEO2)
336 amux = EM28XX_AMUX_VIDEO;
337
338 /* Mute all entres but the one that were selected */
339 for (i = 0; i < ARRAY_SIZE(inputs); i++) {
340 if (amux == inputs[i].mux)
341 ret = em28xx_write_ac97(dev, inputs[i].reg, 0x0808);
342 else
343 ret = em28xx_write_ac97(dev, inputs[i].reg, 0x8000);
344
345 if (ret < 0)
346 em28xx_warn("couldn't setup AC97 register %d\n",
347 inputs[i].reg);
252 } 348 }
253 em28xx_warn("AC97 command still being executed: not handled properly!\n");
254 return 0; 349 return 0;
255} 350}
256 351
257static int em28xx_set_audio_source(struct em28xx *dev) 352static int em28xx_set_audio_source(struct em28xx *dev)
258{ 353{
259 static char *enable = "\x08\x08";
260 static char *disable = "\x08\x88";
261 char *video = enable, *line = disable;
262 int ret; 354 int ret;
263 u8 input; 355 u8 input;
264 356
265 if (dev->is_em2800) { 357 if (dev->board.is_em2800) {
266 if (dev->ctl_ainput) 358 if (dev->ctl_ainput == EM28XX_AMUX_VIDEO)
267 input = EM2800_AUDIO_SRC_LINE;
268 else
269 input = EM2800_AUDIO_SRC_TUNER; 359 input = EM2800_AUDIO_SRC_TUNER;
360 else
361 input = EM2800_AUDIO_SRC_LINE;
270 362
271 ret = em28xx_write_regs(dev, EM2800_R08_AUDIOSRC, &input, 1); 363 ret = em28xx_write_regs(dev, EM2800_R08_AUDIOSRC, &input, 1);
272 if (ret < 0) 364 if (ret < 0)
273 return ret; 365 return ret;
274 } 366 }
275 367
276 if (dev->has_msp34xx) 368 if (dev->board.has_msp34xx)
277 input = EM28XX_AUDIO_SRC_TUNER; 369 input = EM28XX_AUDIO_SRC_TUNER;
278 else { 370 else {
279 switch (dev->ctl_ainput) { 371 switch (dev->ctl_ainput) {
280 case EM28XX_AMUX_VIDEO: 372 case EM28XX_AMUX_VIDEO:
281 input = EM28XX_AUDIO_SRC_TUNER; 373 input = EM28XX_AUDIO_SRC_TUNER;
282 break; 374 break;
283 case EM28XX_AMUX_LINE_IN: 375 default:
284 input = EM28XX_AUDIO_SRC_LINE; 376 input = EM28XX_AUDIO_SRC_LINE;
285 video = disable;
286 line = enable;
287 break;
288 case EM28XX_AMUX_AC97_VIDEO:
289 input = EM28XX_AUDIO_SRC_LINE;
290 break;
291 case EM28XX_AMUX_AC97_LINE_IN:
292 input = EM28XX_AUDIO_SRC_LINE;
293 video = disable;
294 line = enable;
295 break; 377 break;
296 } 378 }
297 } 379 }
@@ -301,41 +383,50 @@ static int em28xx_set_audio_source(struct em28xx *dev)
301 return ret; 383 return ret;
302 msleep(5); 384 msleep(5);
303 385
304 /* Sets AC97 mixer registers 386 switch (dev->audio_mode.ac97) {
305 This is seems to be needed, even for non-ac97 configs 387 case EM28XX_NO_AC97:
306 */ 388 break;
307 ret = em28xx_write_ac97(dev, EM28XX_R14_VIDEO_AC97, video); 389 default:
308 if (ret < 0) 390 ret = set_ac97_input(dev);
309 return ret; 391 }
310
311 ret = em28xx_write_ac97(dev, EM28XX_R10_LINE_IN_AC97, line);
312 392
313 return ret; 393 return ret;
314} 394}
315 395
396struct em28xx_vol_table outputs[] = {
397 { EM28XX_AOUT_MASTER, AC97_MASTER_VOL },
398 { EM28XX_AOUT_LINE, AC97_LINE_LEVEL_VOL },
399 { EM28XX_AOUT_MONO, AC97_MASTER_MONO_VOL },
400 { EM28XX_AOUT_LFE, AC97_LFE_MASTER_VOL },
401 { EM28XX_AOUT_SURR, AC97_SURR_MASTER_VOL },
402};
403
316int em28xx_audio_analog_set(struct em28xx *dev) 404int em28xx_audio_analog_set(struct em28xx *dev)
317{ 405{
318 int ret; 406 int ret, i;
319 char s[2] = { 0x00, 0x00 }; 407 u8 xclk;
320 u8 xclk = 0x07;
321
322 s[0] |= 0x1f - dev->volume;
323 s[1] |= 0x1f - dev->volume;
324
325 /* Mute */
326 s[1] |= 0x80;
327 ret = em28xx_write_ac97(dev, EM28XX_R02_MASTER_AC97, s);
328 408
329 if (ret < 0) 409 if (!dev->audio_mode.has_audio)
330 return ret; 410 return 0;
331 411
332 if (dev->has_12mhz_i2s) 412 /* It is assumed that all devices use master volume for output.
333 xclk |= 0x20; 413 It would be possible to use also line output.
414 */
415 if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
416 /* Mute all outputs */
417 for (i = 0; i < ARRAY_SIZE(outputs); i++) {
418 ret = em28xx_write_ac97(dev, outputs[i].reg, 0x8000);
419 if (ret < 0)
420 em28xx_warn("couldn't setup AC97 register %d\n",
421 outputs[i].reg);
422 }
423 }
334 424
425 xclk = dev->board.xclk & 0x7f;
335 if (!dev->mute) 426 if (!dev->mute)
336 xclk |= 0x80; 427 xclk |= 0x80;
337 428
338 ret = em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, xclk, 0xa7); 429 ret = em28xx_write_reg(dev, EM28XX_R0F_XCLK, xclk);
339 if (ret < 0) 430 if (ret < 0)
340 return ret; 431 return ret;
341 msleep(10); 432 msleep(10);
@@ -343,36 +434,169 @@ int em28xx_audio_analog_set(struct em28xx *dev)
343 /* Selects the proper audio input */ 434 /* Selects the proper audio input */
344 ret = em28xx_set_audio_source(dev); 435 ret = em28xx_set_audio_source(dev);
345 436
346 /* Unmute device */ 437 /* Sets volume */
347 if (!dev->mute) 438 if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
348 s[1] &= ~0x80; 439 int vol;
349 ret = em28xx_write_ac97(dev, EM28XX_R02_MASTER_AC97, s); 440
441 /* LSB: left channel - both channels with the same level */
442 vol = (0x1f - dev->volume) | ((0x1f - dev->volume) << 8);
443
444 /* Mute device, if needed */
445 if (dev->mute)
446 vol |= 0x8000;
447
448 /* Sets volume */
449 for (i = 0; i < ARRAY_SIZE(outputs); i++) {
450 if (dev->ctl_aoutput & outputs[i].mux)
451 ret = em28xx_write_ac97(dev, outputs[i].reg,
452 vol);
453 if (ret < 0)
454 em28xx_warn("couldn't setup AC97 register %d\n",
455 outputs[i].reg);
456 }
457 }
350 458
351 return ret; 459 return ret;
352} 460}
353EXPORT_SYMBOL_GPL(em28xx_audio_analog_set); 461EXPORT_SYMBOL_GPL(em28xx_audio_analog_set);
354 462
355int em28xx_colorlevels_set_default(struct em28xx *dev) 463int em28xx_audio_setup(struct em28xx *dev)
356{ 464{
357 em28xx_write_regs(dev, EM28XX_R20_YGAIN, "\x10", 1); /* contrast */ 465 int vid1, vid2, feat, cfg;
358 em28xx_write_regs(dev, EM28XX_R21_YOFFSET, "\x00", 1); /* brightness */ 466 u32 vid;
359 em28xx_write_regs(dev, EM28XX_R22_UVGAIN, "\x10", 1); /* saturation */ 467
360 em28xx_write_regs(dev, EM28XX_R23_UOFFSET, "\x00", 1); 468 if (dev->chip_id == CHIP_ID_EM2870 || dev->chip_id == CHIP_ID_EM2874) {
361 em28xx_write_regs(dev, EM28XX_R24_VOFFSET, "\x00", 1); 469 /* Digital only device - don't load any alsa module */
362 em28xx_write_regs(dev, EM28XX_R25_SHARPNESS, "\x00", 1); 470 dev->audio_mode.has_audio = 0;
471 dev->has_audio_class = 0;
472 dev->has_alsa_audio = 0;
473 return 0;
474 }
475
476 /* If device doesn't support Usb Audio Class, use vendor class */
477 if (!dev->has_audio_class)
478 dev->has_alsa_audio = 1;
479
480 dev->audio_mode.has_audio = 1;
363 481
364 em28xx_write_regs(dev, EM28XX_R14_GAMMA, "\x20", 1); 482 /* See how this device is configured */
365 em28xx_write_regs(dev, EM28XX_R15_RGAIN, "\x20", 1); 483 cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
366 em28xx_write_regs(dev, EM28XX_R16_GGAIN, "\x20", 1); 484 if (cfg < 0)
367 em28xx_write_regs(dev, EM28XX_R17_BGAIN, "\x20", 1); 485 cfg = EM28XX_CHIPCFG_AC97; /* Be conservative */
368 em28xx_write_regs(dev, EM28XX_R18_ROFFSET, "\x00", 1); 486 else
369 em28xx_write_regs(dev, EM28XX_R19_GOFFSET, "\x00", 1); 487 em28xx_info("Config register raw data: 0x%02x\n", cfg);
370 return em28xx_write_regs(dev, EM28XX_R1A_BOFFSET, "\x00", 1); 488
489 if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
490 EM28XX_CHIPCFG_I2S_3_SAMPRATES) {
491 em28xx_info("I2S Audio (3 sample rates)\n");
492 dev->audio_mode.i2s_3rates = 1;
493 }
494 if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
495 EM28XX_CHIPCFG_I2S_5_SAMPRATES) {
496 em28xx_info("I2S Audio (5 sample rates)\n");
497 dev->audio_mode.i2s_5rates = 1;
498 }
499
500 if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) {
501 /* Skip the code that does AC97 vendor detection */
502 dev->audio_mode.ac97 = EM28XX_NO_AC97;
503 goto init_audio;
504 }
505
506 dev->audio_mode.ac97 = EM28XX_AC97_OTHER;
507
508 vid1 = em28xx_read_ac97(dev, AC97_VENDOR_ID1);
509 if (vid1 < 0) {
510 /* Device likely doesn't support AC97 */
511 em28xx_warn("AC97 chip type couldn't be determined\n");
512 goto init_audio;
513 }
514
515 vid2 = em28xx_read_ac97(dev, AC97_VENDOR_ID2);
516 if (vid2 < 0)
517 goto init_audio;
518
519 vid = vid1 << 16 | vid2;
520
521 dev->audio_mode.ac97_vendor_id = vid;
522 em28xx_warn("AC97 vendor ID = 0x%08x\n", vid);
523
524 feat = em28xx_read_ac97(dev, AC97_RESET);
525 if (feat < 0)
526 goto init_audio;
527
528 dev->audio_mode.ac97_feat = feat;
529 em28xx_warn("AC97 features = 0x%04x\n", feat);
530
531 /* Try to identify what audio processor we have */
532 if ((vid == 0xffffffff) && (feat == 0x6a90))
533 dev->audio_mode.ac97 = EM28XX_AC97_EM202;
534 else if ((vid >> 8) == 0x838476)
535 dev->audio_mode.ac97 = EM28XX_AC97_SIGMATEL;
536
537init_audio:
538 /* Reports detected AC97 processor */
539 switch (dev->audio_mode.ac97) {
540 case EM28XX_NO_AC97:
541 em28xx_info("No AC97 audio processor\n");
542 break;
543 case EM28XX_AC97_EM202:
544 em28xx_info("Empia 202 AC97 audio processor detected\n");
545 break;
546 case EM28XX_AC97_SIGMATEL:
547 em28xx_info("Sigmatel audio processor detected(stac 97%02x)\n",
548 dev->audio_mode.ac97_vendor_id & 0xff);
549 break;
550 case EM28XX_AC97_OTHER:
551 em28xx_warn("Unknown AC97 audio processor detected!\n");
552 break;
553 default:
554 break;
555 }
556
557 return em28xx_audio_analog_set(dev);
558}
559EXPORT_SYMBOL_GPL(em28xx_audio_setup);
560
561int em28xx_colorlevels_set_default(struct em28xx *dev)
562{
563 em28xx_write_reg(dev, EM28XX_R20_YGAIN, 0x10); /* contrast */
564 em28xx_write_reg(dev, EM28XX_R21_YOFFSET, 0x00); /* brightness */
565 em28xx_write_reg(dev, EM28XX_R22_UVGAIN, 0x10); /* saturation */
566 em28xx_write_reg(dev, EM28XX_R23_UOFFSET, 0x00);
567 em28xx_write_reg(dev, EM28XX_R24_VOFFSET, 0x00);
568 em28xx_write_reg(dev, EM28XX_R25_SHARPNESS, 0x00);
569
570 em28xx_write_reg(dev, EM28XX_R14_GAMMA, 0x20);
571 em28xx_write_reg(dev, EM28XX_R15_RGAIN, 0x20);
572 em28xx_write_reg(dev, EM28XX_R16_GGAIN, 0x20);
573 em28xx_write_reg(dev, EM28XX_R17_BGAIN, 0x20);
574 em28xx_write_reg(dev, EM28XX_R18_ROFFSET, 0x00);
575 em28xx_write_reg(dev, EM28XX_R19_GOFFSET, 0x00);
576 return em28xx_write_reg(dev, EM28XX_R1A_BOFFSET, 0x00);
371} 577}
372 578
373int em28xx_capture_start(struct em28xx *dev, int start) 579int em28xx_capture_start(struct em28xx *dev, int start)
374{ 580{
375 int rc; 581 int rc;
582
583 if (dev->chip_id == CHIP_ID_EM2874) {
584 /* The Transport Stream Enable Register moved in em2874 */
585 if (!start) {
586 rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE,
587 0x00,
588 EM2874_TS1_CAPTURE_ENABLE);
589 return rc;
590 }
591
592 /* Enable Transport Stream */
593 rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE,
594 EM2874_TS1_CAPTURE_ENABLE,
595 EM2874_TS1_CAPTURE_ENABLE);
596 return rc;
597 }
598
599
376 /* FIXME: which is the best order? */ 600 /* FIXME: which is the best order? */
377 /* video registers are sampled by VREF */ 601 /* video registers are sampled by VREF */
378 rc = em28xx_write_reg_bits(dev, EM28XX_R0C_USBSUSP, 602 rc = em28xx_write_reg_bits(dev, EM28XX_R0C_USBSUSP,
@@ -382,28 +606,37 @@ int em28xx_capture_start(struct em28xx *dev, int start)
382 606
383 if (!start) { 607 if (!start) {
384 /* disable video capture */ 608 /* disable video capture */
385 rc = em28xx_write_regs(dev, EM28XX_R12_VINENABLE, "\x27", 1); 609 rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x27);
386 return rc; 610 return rc;
387 } 611 }
388 612
389 /* enable video capture */ 613 /* enable video capture */
390 rc = em28xx_write_regs_req(dev, 0x00, 0x48, "\x00", 1); 614 rc = em28xx_write_reg(dev, 0x48, 0x00);
391 615
392 if (dev->mode == EM28XX_ANALOG_MODE) 616 if (dev->mode == EM28XX_ANALOG_MODE)
393 rc = em28xx_write_regs(dev, EM28XX_R12_VINENABLE, "\x67", 1); 617 rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x67);
394 else 618 else
395 rc = em28xx_write_regs(dev, EM28XX_R12_VINENABLE, "\x37", 1); 619 rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x37);
396 620
397 msleep(6); 621 msleep(6);
398 622
399 return rc; 623 return rc;
400} 624}
401 625
402int em28xx_outfmt_set_yuv422(struct em28xx *dev) 626int em28xx_set_outfmt(struct em28xx *dev)
403{ 627{
404 em28xx_write_regs(dev, EM28XX_R27_OUTFMT, "\x34", 1); 628 int ret;
405 em28xx_write_regs(dev, EM28XX_R10_VINMODE, "\x10", 1); 629
406 return em28xx_write_regs(dev, EM28XX_R11_VINCTRL, "\x11", 1); 630 ret = em28xx_write_reg_bits(dev, EM28XX_R27_OUTFMT,
631 dev->format->reg | 0x20, 0x3f);
632 if (ret < 0)
633 return ret;
634
635 ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, 0x10);
636 if (ret < 0)
637 return ret;
638
639 return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, 0x11);
407} 640}
408 641
409static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax, 642static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
@@ -440,7 +673,7 @@ static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
440{ 673{
441 u8 mode; 674 u8 mode;
442 /* the em2800 scaler only supports scaling down to 50% */ 675 /* the em2800 scaler only supports scaling down to 50% */
443 if (dev->is_em2800) 676 if (dev->board.is_em2800)
444 mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00); 677 mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00);
445 else { 678 else {
446 u8 buf[2]; 679 u8 buf[2];
@@ -464,7 +697,7 @@ int em28xx_resolution_set(struct em28xx *dev)
464 width = norm_maxw(dev); 697 width = norm_maxw(dev);
465 height = norm_maxh(dev) >> 1; 698 height = norm_maxh(dev) >> 1;
466 699
467 em28xx_outfmt_set_yuv422(dev); 700 em28xx_set_outfmt(dev);
468 em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2); 701 em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
469 em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2); 702 em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2);
470 return em28xx_scaler_set(dev, dev->hscale, dev->vscale); 703 return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
@@ -519,12 +752,14 @@ int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio)
519 if (!gpio) 752 if (!gpio)
520 return rc; 753 return rc;
521 754
522 dev->em28xx_write_regs_req(dev, 0x00, 0x48, "\x00", 1); 755 if (dev->mode != EM28XX_SUSPEND) {
523 if (dev->mode == EM28XX_ANALOG_MODE) 756 em28xx_write_reg(dev, 0x48, 0x00);
524 dev->em28xx_write_regs_req(dev, 0x00, 0x12, "\x67", 1); 757 if (dev->mode == EM28XX_ANALOG_MODE)
525 else 758 em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x67);
526 dev->em28xx_write_regs_req(dev, 0x00, 0x12, "\x37", 1); 759 else
527 msleep(6); 760 em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x37);
761 msleep(6);
762 }
528 763
529 /* Send GPIO reset sequences specified at board entry */ 764 /* Send GPIO reset sequences specified at board entry */
530 while (gpio->sleep >= 0) { 765 while (gpio->sleep >= 0) {
@@ -549,17 +784,20 @@ int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode)
549 if (dev->mode == set_mode) 784 if (dev->mode == set_mode)
550 return 0; 785 return 0;
551 786
552 if (set_mode == EM28XX_MODE_UNDEFINED) { 787 if (set_mode == EM28XX_SUSPEND) {
553 dev->mode = set_mode; 788 dev->mode = set_mode;
554 return 0; 789
790 /* FIXME: add suspend support for ac97 */
791
792 return em28xx_gpio_set(dev, dev->board.suspend_gpio);
555 } 793 }
556 794
557 dev->mode = set_mode; 795 dev->mode = set_mode;
558 796
559 if (dev->mode == EM28XX_DIGITAL_MODE) 797 if (dev->mode == EM28XX_DIGITAL_MODE)
560 return em28xx_gpio_set(dev, dev->digital_gpio); 798 return em28xx_gpio_set(dev, dev->board.dvb_gpio);
561 else 799 else
562 return em28xx_gpio_set(dev, dev->analog_gpio); 800 return em28xx_gpio_set(dev, INPUT(dev->ctl_input)->gpio);
563} 801}
564EXPORT_SYMBOL_GPL(em28xx_set_mode); 802EXPORT_SYMBOL_GPL(em28xx_set_mode);
565 803
@@ -738,3 +976,145 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
738 return 0; 976 return 0;
739} 977}
740EXPORT_SYMBOL_GPL(em28xx_init_isoc); 978EXPORT_SYMBOL_GPL(em28xx_init_isoc);
979
980/*
981 * em28xx_wake_i2c()
982 * configure i2c attached devices
983 */
984void em28xx_wake_i2c(struct em28xx *dev)
985{
986 struct v4l2_routing route;
987 int zero = 0;
988
989 route.input = INPUT(dev->ctl_input)->vmux;
990 route.output = 0;
991 em28xx_i2c_call_clients(dev, VIDIOC_INT_RESET, &zero);
992 em28xx_i2c_call_clients(dev, VIDIOC_INT_S_VIDEO_ROUTING, &route);
993 em28xx_i2c_call_clients(dev, VIDIOC_STREAMON, NULL);
994}
995
996/*
997 * Device control list
998 */
999
1000static LIST_HEAD(em28xx_devlist);
1001static DEFINE_MUTEX(em28xx_devlist_mutex);
1002
1003struct em28xx *em28xx_get_device(struct inode *inode,
1004 enum v4l2_buf_type *fh_type,
1005 int *has_radio)
1006{
1007 struct em28xx *h, *dev = NULL;
1008 int minor = iminor(inode);
1009
1010 *fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1011 *has_radio = 0;
1012
1013 mutex_lock(&em28xx_devlist_mutex);
1014 list_for_each_entry(h, &em28xx_devlist, devlist) {
1015 if (h->vdev->minor == minor)
1016 dev = h;
1017 if (h->vbi_dev->minor == minor) {
1018 dev = h;
1019 *fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
1020 }
1021 if (h->radio_dev &&
1022 h->radio_dev->minor == minor) {
1023 dev = h;
1024 *has_radio = 1;
1025 }
1026 }
1027 mutex_unlock(&em28xx_devlist_mutex);
1028
1029 return dev;
1030}
1031
1032/*
1033 * em28xx_realease_resources()
1034 * unregisters the v4l2,i2c and usb devices
1035 * called when the device gets disconected or at module unload
1036*/
1037void em28xx_remove_from_devlist(struct em28xx *dev)
1038{
1039 mutex_lock(&em28xx_devlist_mutex);
1040 list_del(&dev->devlist);
1041 mutex_unlock(&em28xx_devlist_mutex);
1042};
1043
1044void em28xx_add_into_devlist(struct em28xx *dev)
1045{
1046 mutex_lock(&em28xx_devlist_mutex);
1047 list_add_tail(&dev->devlist, &em28xx_devlist);
1048 mutex_unlock(&em28xx_devlist_mutex);
1049};
1050
1051/*
1052 * Extension interface
1053 */
1054
1055static LIST_HEAD(em28xx_extension_devlist);
1056static DEFINE_MUTEX(em28xx_extension_devlist_lock);
1057
1058int em28xx_register_extension(struct em28xx_ops *ops)
1059{
1060 struct em28xx *dev = NULL;
1061
1062 mutex_lock(&em28xx_devlist_mutex);
1063 mutex_lock(&em28xx_extension_devlist_lock);
1064 list_add_tail(&ops->next, &em28xx_extension_devlist);
1065 list_for_each_entry(dev, &em28xx_devlist, devlist) {
1066 if (dev)
1067 ops->init(dev);
1068 }
1069 printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name);
1070 mutex_unlock(&em28xx_extension_devlist_lock);
1071 mutex_unlock(&em28xx_devlist_mutex);
1072 return 0;
1073}
1074EXPORT_SYMBOL(em28xx_register_extension);
1075
1076void em28xx_unregister_extension(struct em28xx_ops *ops)
1077{
1078 struct em28xx *dev = NULL;
1079
1080 mutex_lock(&em28xx_devlist_mutex);
1081 list_for_each_entry(dev, &em28xx_devlist, devlist) {
1082 if (dev)
1083 ops->fini(dev);
1084 }
1085
1086 mutex_lock(&em28xx_extension_devlist_lock);
1087 printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name);
1088 list_del(&ops->next);
1089 mutex_unlock(&em28xx_extension_devlist_lock);
1090 mutex_unlock(&em28xx_devlist_mutex);
1091}
1092EXPORT_SYMBOL(em28xx_unregister_extension);
1093
1094void em28xx_init_extension(struct em28xx *dev)
1095{
1096 struct em28xx_ops *ops = NULL;
1097
1098 mutex_lock(&em28xx_extension_devlist_lock);
1099 if (!list_empty(&em28xx_extension_devlist)) {
1100 list_for_each_entry(ops, &em28xx_extension_devlist, next) {
1101 if (ops->init)
1102 ops->init(dev);
1103 }
1104 }
1105 mutex_unlock(&em28xx_extension_devlist_lock);
1106}
1107
1108void em28xx_close_extension(struct em28xx *dev)
1109{
1110 struct em28xx_ops *ops = NULL;
1111
1112 mutex_lock(&em28xx_extension_devlist_lock);
1113 if (!list_empty(&em28xx_extension_devlist)) {
1114 list_for_each_entry(ops, &em28xx_extension_devlist, next) {
1115 if (ops->fini)
1116 ops->fini(dev);
1117 }
1118 }
1119 mutex_unlock(&em28xx_extension_devlist_lock);
1120}
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index c99e2383b7ec..d38cb21834d9 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -161,7 +161,7 @@ static int stop_streaming(struct em28xx_dvb *dvb)
161 161
162 em28xx_uninit_isoc(dev); 162 em28xx_uninit_isoc(dev);
163 163
164 em28xx_set_mode(dev, EM28XX_MODE_UNDEFINED); 164 em28xx_set_mode(dev, EM28XX_SUSPEND);
165 165
166 return 0; 166 return 0;
167} 167}
@@ -215,7 +215,7 @@ static int em28xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
215 if (acquire) 215 if (acquire)
216 return em28xx_set_mode(dev, EM28XX_DIGITAL_MODE); 216 return em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
217 else 217 else
218 return em28xx_set_mode(dev, EM28XX_MODE_UNDEFINED); 218 return em28xx_set_mode(dev, EM28XX_SUSPEND);
219} 219}
220 220
221/* ------------------------------------------------------------------ */ 221/* ------------------------------------------------------------------ */
@@ -393,7 +393,7 @@ static int dvb_init(struct em28xx *dev)
393 int result = 0; 393 int result = 0;
394 struct em28xx_dvb *dvb; 394 struct em28xx_dvb *dvb;
395 395
396 if (!dev->has_dvb) { 396 if (!dev->board.has_dvb) {
397 /* This device does not support the extension */ 397 /* This device does not support the extension */
398 return 0; 398 return 0;
399 } 399 }
@@ -409,8 +409,10 @@ static int dvb_init(struct em28xx *dev)
409 em28xx_set_mode(dev, EM28XX_DIGITAL_MODE); 409 em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
410 /* init frontend */ 410 /* init frontend */
411 switch (dev->model) { 411 switch (dev->model) {
412 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
412 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950: 413 case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
413 case EM2880_BOARD_PINNACLE_PCTV_HD_PRO: 414 case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
415 case EM2883_BOARD_KWORLD_HYBRID_A316:
414 case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600: 416 case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
415 dvb->frontend = dvb_attach(lgdt330x_attach, 417 dvb->frontend = dvb_attach(lgdt330x_attach,
416 &em2880_lgdt3303_dev, 418 &em2880_lgdt3303_dev,
@@ -466,12 +468,12 @@ static int dvb_init(struct em28xx *dev)
466 if (result < 0) 468 if (result < 0)
467 goto out_free; 469 goto out_free;
468 470
469 em28xx_set_mode(dev, EM28XX_MODE_UNDEFINED); 471 em28xx_set_mode(dev, EM28XX_SUSPEND);
470 printk(KERN_INFO "Successfully loaded em28xx-dvb\n"); 472 printk(KERN_INFO "Successfully loaded em28xx-dvb\n");
471 return 0; 473 return 0;
472 474
473out_free: 475out_free:
474 em28xx_set_mode(dev, EM28XX_MODE_UNDEFINED); 476 em28xx_set_mode(dev, EM28XX_SUSPEND);
475 kfree(dvb); 477 kfree(dvb);
476 dev->dvb = NULL; 478 dev->dvb = NULL;
477 return result; 479 return result;
@@ -479,7 +481,7 @@ out_free:
479 481
480static int dvb_fini(struct em28xx *dev) 482static int dvb_fini(struct em28xx *dev)
481{ 483{
482 if (!dev->has_dvb) { 484 if (!dev->board.has_dvb) {
483 /* This device does not support the extension */ 485 /* This device does not support the extension */
484 return 0; 486 return 0;
485 } 487 }
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 2360c61ddca9..d69f0efcc9aa 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -250,7 +250,7 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
250 (msgs[i].flags & I2C_M_RD) ? "read" : "write", 250 (msgs[i].flags & I2C_M_RD) ? "read" : "write",
251 i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len); 251 i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len);
252 if (!msgs[i].len) { /* no len: check only for device presence */ 252 if (!msgs[i].len) { /* no len: check only for device presence */
253 if (dev->is_em2800) 253 if (dev->board.is_em2800)
254 rc = em2800_i2c_check_for_device(dev, addr); 254 rc = em2800_i2c_check_for_device(dev, addr);
255 else 255 else
256 rc = em28xx_i2c_check_for_device(dev, addr); 256 rc = em28xx_i2c_check_for_device(dev, addr);
@@ -261,7 +261,7 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
261 261
262 } else if (msgs[i].flags & I2C_M_RD) { 262 } else if (msgs[i].flags & I2C_M_RD) {
263 /* read bytes */ 263 /* read bytes */
264 if (dev->is_em2800) 264 if (dev->board.is_em2800)
265 rc = em2800_i2c_recv_bytes(dev, addr, 265 rc = em2800_i2c_recv_bytes(dev, addr,
266 msgs[i].buf, 266 msgs[i].buf,
267 msgs[i].len); 267 msgs[i].len);
@@ -279,7 +279,7 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
279 for (byte = 0; byte < msgs[i].len; byte++) 279 for (byte = 0; byte < msgs[i].len; byte++)
280 printk(" %02x", msgs[i].buf[byte]); 280 printk(" %02x", msgs[i].buf[byte]);
281 } 281 }
282 if (dev->is_em2800) 282 if (dev->board.is_em2800)
283 rc = em2800_i2c_send_bytes(dev, addr, 283 rc = em2800_i2c_send_bytes(dev, addr,
284 msgs[i].buf, 284 msgs[i].buf,
285 msgs[i].len); 285 msgs[i].len);
@@ -332,6 +332,17 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
332 struct em28xx_eeprom *em_eeprom = (void *)eedata; 332 struct em28xx_eeprom *em_eeprom = (void *)eedata;
333 int i, err, size = len, block; 333 int i, err, size = len, block;
334 334
335 if (dev->chip_id == CHIP_ID_EM2874) {
336 /* Empia switched to a 16-bit addressable eeprom in newer
337 devices. While we could certainly write a routine to read
338 the eeprom, there is nothing of use in there that cannot be
339 accessed through registers, and there is the risk that we
340 could corrupt the eeprom (since a 16-bit read call is
341 interpreted as a write call by 8-bit eeproms).
342 */
343 return 0;
344 }
345
335 dev->i2c_client.addr = 0xa0 >> 1; 346 dev->i2c_client.addr = 0xa0 >> 1;
336 347
337 /* Check if board has eeprom */ 348 /* Check if board has eeprom */
@@ -377,47 +388,49 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
377 if (em_eeprom->id == 0x9567eb1a) 388 if (em_eeprom->id == 0x9567eb1a)
378 dev->hash = em28xx_hash_mem(eedata, len, 32); 389 dev->hash = em28xx_hash_mem(eedata, len, 32);
379 390
380 printk(KERN_INFO "EEPROM ID= 0x%08x, hash = 0x%08lx\n", 391 printk(KERN_INFO "%s: EEPROM ID= 0x%08x, EEPROM hash = 0x%08lx\n",
381 em_eeprom->id, dev->hash); 392 dev->name, em_eeprom->id, dev->hash);
382 printk(KERN_INFO "Vendor/Product ID= %04x:%04x\n", em_eeprom->vendor_ID, 393
383 em_eeprom->product_ID); 394 printk(KERN_INFO "%s: EEPROM info:\n", dev->name);
384 395
385 switch (em_eeprom->chip_conf >> 4 & 0x3) { 396 switch (em_eeprom->chip_conf >> 4 & 0x3) {
386 case 0: 397 case 0:
387 printk(KERN_INFO "No audio on board.\n"); 398 printk(KERN_INFO "%s:\tNo audio on board.\n", dev->name);
388 break; 399 break;
389 case 1: 400 case 1:
390 printk(KERN_INFO "AC97 audio (5 sample rates)\n"); 401 printk(KERN_INFO "%s:\tAC97 audio (5 sample rates)\n",
402 dev->name);
391 break; 403 break;
392 case 2: 404 case 2:
393 printk(KERN_INFO "I2S audio, sample rate=32k\n"); 405 printk(KERN_INFO "%s:\tI2S audio, sample rate=32k\n", dev->name);
394 break; 406 break;
395 case 3: 407 case 3:
396 printk(KERN_INFO "I2S audio, 3 sample rates\n"); 408 printk(KERN_INFO "%s:\tI2S audio, 3 sample rates\n", dev->name);
397 break; 409 break;
398 } 410 }
399 411
400 if (em_eeprom->chip_conf & 1 << 3) 412 if (em_eeprom->chip_conf & 1 << 3)
401 printk(KERN_INFO "USB Remote wakeup capable\n"); 413 printk(KERN_INFO "%s:\tUSB Remote wakeup capable\n", dev->name);
402 414
403 if (em_eeprom->chip_conf & 1 << 2) 415 if (em_eeprom->chip_conf & 1 << 2)
404 printk(KERN_INFO "USB Self power capable\n"); 416 printk(KERN_INFO "%s:\tUSB Self power capable\n", dev->name);
405 417
406 switch (em_eeprom->chip_conf & 0x3) { 418 switch (em_eeprom->chip_conf & 0x3) {
407 case 0: 419 case 0:
408 printk(KERN_INFO "500mA max power\n"); 420 printk(KERN_INFO "%s:\t500mA max power\n", dev->name);
409 break; 421 break;
410 case 1: 422 case 1:
411 printk(KERN_INFO "400mA max power\n"); 423 printk(KERN_INFO "%s:\t400mA max power\n", dev->name);
412 break; 424 break;
413 case 2: 425 case 2:
414 printk(KERN_INFO "300mA max power\n"); 426 printk(KERN_INFO "%s:\t300mA max power\n", dev->name);
415 break; 427 break;
416 case 3: 428 case 3:
417 printk(KERN_INFO "200mA max power\n"); 429 printk(KERN_INFO "%s:\t200mA max power\n", dev->name);
418 break; 430 break;
419 } 431 }
420 printk(KERN_INFO "Table at 0x%02x, strings=0x%04x, 0x%04x, 0x%04x\n", 432 printk(KERN_INFO "%s:\tTable at 0x%02x, strings=0x%04x, 0x%04x, 0x%04x\n",
433 dev->name,
421 em_eeprom->string_idx_table, 434 em_eeprom->string_idx_table,
422 em_eeprom->string1, 435 em_eeprom->string1,
423 em_eeprom->string2, 436 em_eeprom->string2,
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index eab3d9511af3..42bbaf64aceb 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -38,12 +38,48 @@ static unsigned int ir_debug;
38module_param(ir_debug, int, 0644); 38module_param(ir_debug, int, 0644);
39MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]"); 39MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
40 40
41#define dprintk(fmt, arg...) \ 41#define i2cdprintk(fmt, arg...) \
42 if (ir_debug) { \ 42 if (ir_debug) { \
43 printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg); \ 43 printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg); \
44 } 44 }
45 45
46/* ----------------------------------------------------------------------- */ 46#define dprintk(fmt, arg...) \
47 if (ir_debug) { \
48 printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
49 }
50
51/**********************************************************
52 Polling structure used by em28xx IR's
53 **********************************************************/
54
55struct em28xx_ir_poll_result {
56 unsigned int toggle_bit:1;
57 unsigned int read_count:7;
58 u8 rc_address;
59 u8 rc_data[4]; /* 1 byte on em2860/2880, 4 on em2874 */
60};
61
62struct em28xx_IR {
63 struct em28xx *dev;
64 struct input_dev *input;
65 struct ir_input_state ir;
66 char name[32];
67 char phys[32];
68
69 /* poll external decoder */
70 int polling;
71 struct work_struct work;
72 struct timer_list timer;
73 unsigned int last_toggle:1;
74 unsigned int last_readcount;
75 unsigned int repeat_interval;
76
77 int (*get_key)(struct em28xx_IR *, struct em28xx_ir_poll_result *);
78};
79
80/**********************************************************
81 I2C IR based get keycodes - should be used with ir-kbd-i2c
82 **********************************************************/
47 83
48int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) 84int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
49{ 85{
@@ -51,7 +87,7 @@ int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
51 87
52 /* poll IR chip */ 88 /* poll IR chip */
53 if (1 != i2c_master_recv(&ir->c, &b, 1)) { 89 if (1 != i2c_master_recv(&ir->c, &b, 1)) {
54 dprintk("read error\n"); 90 i2cdprintk("read error\n");
55 return -EIO; 91 return -EIO;
56 } 92 }
57 93
@@ -59,7 +95,7 @@ int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
59 down, while 0xff indicates that no button is hold 95 down, while 0xff indicates that no button is hold
60 down. 0xfe sequences are sometimes interrupted by 0xFF */ 96 down. 0xfe sequences are sometimes interrupted by 0xFF */
61 97
62 dprintk("key %02x\n", b); 98 i2cdprintk("key %02x\n", b);
63 99
64 if (b == 0xff) 100 if (b == 0xff)
65 return 0; 101 return 0;
@@ -73,7 +109,6 @@ int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
73 return 1; 109 return 1;
74} 110}
75 111
76
77int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) 112int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
78{ 113{
79 unsigned char buf[2]; 114 unsigned char buf[2];
@@ -97,7 +132,7 @@ int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
97 ((buf[0]&0x10)>>3) | /* 0000 0010 */ 132 ((buf[0]&0x10)>>3) | /* 0000 0010 */
98 ((buf[0]&0x20)>>5); /* 0000 0001 */ 133 ((buf[0]&0x20)>>5); /* 0000 0001 */
99 134
100 dprintk("ir hauppauge (em2840): code=0x%02x (rcv=0x%02x)\n", 135 i2cdprintk("ir hauppauge (em2840): code=0x%02x (rcv=0x%02x)\n",
101 code, buf[0]); 136 code, buf[0]);
102 137
103 /* return key */ 138 /* return key */
@@ -114,11 +149,11 @@ int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
114 /* poll IR chip */ 149 /* poll IR chip */
115 150
116 if (3 != i2c_master_recv(&ir->c, buf, 3)) { 151 if (3 != i2c_master_recv(&ir->c, buf, 3)) {
117 dprintk("read error\n"); 152 i2cdprintk("read error\n");
118 return -EIO; 153 return -EIO;
119 } 154 }
120 155
121 dprintk("key %02x\n", buf[2]&0x3f); 156 i2cdprintk("key %02x\n", buf[2]&0x3f);
122 if (buf[0] != 0x00) 157 if (buf[0] != 0x00)
123 return 0; 158 return 0;
124 159
@@ -128,6 +163,260 @@ int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
128 return 1; 163 return 1;
129} 164}
130 165
166/**********************************************************
167 Poll based get keycode functions
168 **********************************************************/
169
170/* This is for the em2860/em2880 */
171static int default_polling_getkey(struct em28xx_IR *ir,
172 struct em28xx_ir_poll_result *poll_result)
173{
174 struct em28xx *dev = ir->dev;
175 int rc;
176 u8 msg[3] = { 0, 0, 0 };
177
178 /* Read key toggle, brand, and key code
179 on registers 0x45, 0x46 and 0x47
180 */
181 rc = dev->em28xx_read_reg_req_len(dev, 0, EM28XX_R45_IR,
182 msg, sizeof(msg));
183 if (rc < 0)
184 return rc;
185
186 /* Infrared toggle (Reg 0x45[7]) */
187 poll_result->toggle_bit = (msg[0] >> 7);
188
189 /* Infrared read count (Reg 0x45[6:0] */
190 poll_result->read_count = (msg[0] & 0x7f);
191
192 /* Remote Control Address (Reg 0x46) */
193 poll_result->rc_address = msg[1];
194
195 /* Remote Control Data (Reg 0x47) */
196 poll_result->rc_data[0] = msg[2];
197
198 return 0;
199}
200
201static int em2874_polling_getkey(struct em28xx_IR *ir,
202 struct em28xx_ir_poll_result *poll_result)
203{
204 struct em28xx *dev = ir->dev;
205 int rc;
206 u8 msg[5] = { 0, 0, 0, 0, 0 };
207
208 /* Read key toggle, brand, and key code
209 on registers 0x51-55
210 */
211 rc = dev->em28xx_read_reg_req_len(dev, 0, EM2874_R51_IR,
212 msg, sizeof(msg));
213 if (rc < 0)
214 return rc;
215
216 /* Infrared toggle (Reg 0x51[7]) */
217 poll_result->toggle_bit = (msg[0] >> 7);
218
219 /* Infrared read count (Reg 0x51[6:0] */
220 poll_result->read_count = (msg[0] & 0x7f);
221
222 /* Remote Control Address (Reg 0x52) */
223 poll_result->rc_address = msg[1];
224
225 /* Remote Control Data (Reg 0x53-55) */
226 poll_result->rc_data[0] = msg[2];
227 poll_result->rc_data[1] = msg[3];
228 poll_result->rc_data[2] = msg[4];
229
230 return 0;
231}
232
233/**********************************************************
234 Polling code for em28xx
235 **********************************************************/
236
237static void em28xx_ir_handle_key(struct em28xx_IR *ir)
238{
239 int result;
240 int do_sendkey = 0;
241 struct em28xx_ir_poll_result poll_result;
242
243 /* read the registers containing the IR status */
244 result = ir->get_key(ir, &poll_result);
245 if (result < 0) {
246 dprintk("ir->get_key() failed %d\n", result);
247 return;
248 }
249
250 dprintk("ir->get_key result tb=%02x rc=%02x lr=%02x data=%02x\n",
251 poll_result.toggle_bit, poll_result.read_count,
252 ir->last_readcount, poll_result.rc_data[0]);
253
254 if (ir->dev->chip_id == CHIP_ID_EM2874) {
255 /* The em2874 clears the readcount field every time the
256 register is read. The em2860/2880 datasheet says that it
257 is supposed to clear the readcount, but it doesn't. So with
258 the em2874, we are looking for a non-zero read count as
259 opposed to a readcount that is incrementing */
260 ir->last_readcount = 0;
261 }
262
263 if (poll_result.read_count == 0) {
264 /* The button has not been pressed since the last read */
265 } else if (ir->last_toggle != poll_result.toggle_bit) {
266 /* A button has been pressed */
267 dprintk("button has been pressed\n");
268 ir->last_toggle = poll_result.toggle_bit;
269 ir->repeat_interval = 0;
270 do_sendkey = 1;
271 } else if (poll_result.toggle_bit == ir->last_toggle &&
272 poll_result.read_count > 0 &&
273 poll_result.read_count != ir->last_readcount) {
274 /* The button is still being held down */
275 dprintk("button being held down\n");
276
277 /* Debouncer for first keypress */
278 if (ir->repeat_interval++ > 9) {
279 /* Start repeating after 1 second */
280 do_sendkey = 1;
281 }
282 }
283
284 if (do_sendkey) {
285 dprintk("sending keypress\n");
286 ir_input_keydown(ir->input, &ir->ir, poll_result.rc_data[0],
287 poll_result.rc_data[0]);
288 ir_input_nokey(ir->input, &ir->ir);
289 }
290
291 ir->last_readcount = poll_result.read_count;
292 return;
293}
294
295static void ir_timer(unsigned long data)
296{
297 struct em28xx_IR *ir = (struct em28xx_IR *)data;
298
299 schedule_work(&ir->work);
300}
301
302static void em28xx_ir_work(struct work_struct *work)
303{
304 struct em28xx_IR *ir = container_of(work, struct em28xx_IR, work);
305
306 em28xx_ir_handle_key(ir);
307 mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
308}
309
310void em28xx_ir_start(struct em28xx_IR *ir)
311{
312 setup_timer(&ir->timer, ir_timer, (unsigned long)ir);
313 INIT_WORK(&ir->work, em28xx_ir_work);
314 schedule_work(&ir->work);
315}
316
317static void em28xx_ir_stop(struct em28xx_IR *ir)
318{
319 del_timer_sync(&ir->timer);
320 flush_scheduled_work();
321}
322
323int em28xx_ir_init(struct em28xx *dev)
324{
325 struct em28xx_IR *ir;
326 struct input_dev *input_dev;
327 u8 ir_config;
328 int err = -ENOMEM;
329
330 if (dev->board.ir_codes == NULL) {
331 /* No remote control support */
332 return 0;
333 }
334
335 ir = kzalloc(sizeof(*ir), GFP_KERNEL);
336 input_dev = input_allocate_device();
337 if (!ir || !input_dev)
338 goto err_out_free;
339
340 ir->input = input_dev;
341
342 /* Setup the proper handler based on the chip */
343 switch (dev->chip_id) {
344 case CHIP_ID_EM2860:
345 case CHIP_ID_EM2883:
346 ir->get_key = default_polling_getkey;
347 break;
348 case CHIP_ID_EM2874:
349 ir->get_key = em2874_polling_getkey;
350 /* For now we only support RC5, so enable it */
351 ir_config = EM2874_IR_RC5;
352 em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1);
353 break;
354 default:
355 printk("Unrecognized em28xx chip id: IR not supported\n");
356 goto err_out_free;
357 }
358
359 /* This is how often we ask the chip for IR information */
360 ir->polling = 100; /* ms */
361
362 /* init input device */
363 snprintf(ir->name, sizeof(ir->name), "em28xx IR (%s)",
364 dev->name);
365
366 usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
367 strlcat(ir->phys, "/input0", sizeof(ir->phys));
368
369 ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER, dev->board.ir_codes);
370 input_dev->name = ir->name;
371 input_dev->phys = ir->phys;
372 input_dev->id.bustype = BUS_USB;
373 input_dev->id.version = 1;
374 input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
375 input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
376
377 input_dev->dev.parent = &dev->udev->dev;
378 /* record handles to ourself */
379 ir->dev = dev;
380 dev->ir = ir;
381
382 em28xx_ir_start(ir);
383
384 /* all done */
385 err = input_register_device(ir->input);
386 if (err)
387 goto err_out_stop;
388
389 return 0;
390 err_out_stop:
391 em28xx_ir_stop(ir);
392 dev->ir = NULL;
393 err_out_free:
394 input_free_device(input_dev);
395 kfree(ir);
396 return err;
397}
398
399int em28xx_ir_fini(struct em28xx *dev)
400{
401 struct em28xx_IR *ir = dev->ir;
402
403 /* skip detach on non attached boards */
404 if (!ir)
405 return 0;
406
407 em28xx_ir_stop(ir);
408 input_unregister_device(ir->input);
409 kfree(ir);
410
411 /* done */
412 dev->ir = NULL;
413 return 0;
414}
415
416/**********************************************************
417 Handle Webcam snapshot button
418 **********************************************************/
419
131static void em28xx_query_sbutton(struct work_struct *work) 420static void em28xx_query_sbutton(struct work_struct *work)
132{ 421{
133 /* Poll the register and see if the button is depressed */ 422 /* Poll the register and see if the button is depressed */
@@ -210,9 +499,3 @@ void em28xx_deregister_snapshot_button(struct em28xx *dev)
210 } 499 }
211 return; 500 return;
212} 501}
213
214/* ----------------------------------------------------------------------
215 * Local variables:
216 * c-basic-offset: 8
217 * End:
218 */
diff --git a/drivers/media/video/em28xx/em28xx-reg.h b/drivers/media/video/em28xx/em28xx-reg.h
index fac1ab23f621..65dcb91bdcc2 100644
--- a/drivers/media/video/em28xx/em28xx-reg.h
+++ b/drivers/media/video/em28xx/em28xx-reg.h
@@ -17,17 +17,58 @@
17 17
18/* em28xx registers */ 18/* em28xx registers */
19 19
20#define EM28XX_R00_CHIPCFG 0x00
21
22/* em28xx Chip Configuration 0x00 */
23#define EM28XX_CHIPCFG_VENDOR_AUDIO 0x80
24#define EM28XX_CHIPCFG_I2S_VOLUME_CAPABLE 0x40
25#define EM28XX_CHIPCFG_I2S_5_SAMPRATES 0x30
26#define EM28XX_CHIPCFG_I2S_3_SAMPRATES 0x20
27#define EM28XX_CHIPCFG_AC97 0x10
28#define EM28XX_CHIPCFG_AUDIOMASK 0x30
29
20 /* GPIO/GPO registers */ 30 /* GPIO/GPO registers */
21#define EM2880_R04_GPO 0x04 /* em2880-em2883 only */ 31#define EM2880_R04_GPO 0x04 /* em2880-em2883 only */
22#define EM28XX_R08_GPIO 0x08 /* em2820 or upper */ 32#define EM28XX_R08_GPIO 0x08 /* em2820 or upper */
23 33
24#define EM28XX_R06_I2C_CLK 0x06 34#define EM28XX_R06_I2C_CLK 0x06
35
36/* em28xx I2C Clock Register (0x06) */
37#define EM28XX_I2C_CLK_ACK_LAST_READ 0x80
38#define EM28XX_I2C_CLK_WAIT_ENABLE 0x40
39#define EM28XX_I2C_EEPROM_ON_BOARD 0x08
40#define EM28XX_I2C_EEPROM_KEY_VALID 0x04
41#define EM2874_I2C_SECONDARY_BUS_SELECT 0x04 /* em2874 has two i2c busses */
42#define EM28XX_I2C_FREQ_1_5_MHZ 0x03 /* bus frequency (bits [1-0]) */
43#define EM28XX_I2C_FREQ_25_KHZ 0x02
44#define EM28XX_I2C_FREQ_400_KHZ 0x01
45#define EM28XX_I2C_FREQ_100_KHZ 0x00
46
47
25#define EM28XX_R0A_CHIPID 0x0a 48#define EM28XX_R0A_CHIPID 0x0a
26#define EM28XX_R0C_USBSUSP 0x0c /* */ 49#define EM28XX_R0C_USBSUSP 0x0c /* */
27 50
28#define EM28XX_R0E_AUDIOSRC 0x0e 51#define EM28XX_R0E_AUDIOSRC 0x0e
29#define EM28XX_R0F_XCLK 0x0f 52#define EM28XX_R0F_XCLK 0x0f
30 53
54/* em28xx XCLK Register (0x0f) */
55#define EM28XX_XCLK_AUDIO_UNMUTE 0x80 /* otherwise audio muted */
56#define EM28XX_XCLK_I2S_MSB_TIMING 0x40 /* otherwise standard timing */
57#define EM28XX_XCLK_IR_RC5_MODE 0x20 /* otherwise NEC mode */
58#define EM28XX_XCLK_IR_NEC_CHK_PARITY 0x10
59#define EM28XX_XCLK_FREQUENCY_30MHZ 0x00 /* Freq. select (bits [3-0]) */
60#define EM28XX_XCLK_FREQUENCY_15MHZ 0x01
61#define EM28XX_XCLK_FREQUENCY_10MHZ 0x02
62#define EM28XX_XCLK_FREQUENCY_7_5MHZ 0x03
63#define EM28XX_XCLK_FREQUENCY_6MHZ 0x04
64#define EM28XX_XCLK_FREQUENCY_5MHZ 0x05
65#define EM28XX_XCLK_FREQUENCY_4_3MHZ 0x06
66#define EM28XX_XCLK_FREQUENCY_12MHZ 0x07
67#define EM28XX_XCLK_FREQUENCY_20MHZ 0x08
68#define EM28XX_XCLK_FREQUENCY_20MHZ_2 0x09
69#define EM28XX_XCLK_FREQUENCY_48MHZ 0x0a
70#define EM28XX_XCLK_FREQUENCY_24MHZ 0x0b
71
31#define EM28XX_R10_VINMODE 0x10 72#define EM28XX_R10_VINMODE 0x10
32#define EM28XX_R11_VINCTRL 0x11 73#define EM28XX_R11_VINCTRL 0x11
33#define EM28XX_R12_VINENABLE 0x12 /* */ 74#define EM28XX_R12_VINENABLE 0x12 /* */
@@ -56,6 +97,19 @@
56#define EM28XX_R26_COMPR 0x26 97#define EM28XX_R26_COMPR 0x26
57#define EM28XX_R27_OUTFMT 0x27 98#define EM28XX_R27_OUTFMT 0x27
58 99
100/* em28xx Output Format Register (0x27) */
101#define EM28XX_OUTFMT_RGB_8_RGRG 0x00
102#define EM28XX_OUTFMT_RGB_8_GRGR 0x01
103#define EM28XX_OUTFMT_RGB_8_GBGB 0x02
104#define EM28XX_OUTFMT_RGB_8_BGBG 0x03
105#define EM28XX_OUTFMT_RGB_16_656 0x04
106#define EM28XX_OUTFMT_RGB_8_BAYER 0x08 /* Pattern in Reg 0x10[1-0] */
107#define EM28XX_OUTFMT_YUV211 0x10
108#define EM28XX_OUTFMT_YUV422_Y0UY1V 0x14
109#define EM28XX_OUTFMT_YUV422_Y1UY0V 0x15
110#define EM28XX_OUTFMT_YUV411 0x18
111
112
59#define EM28XX_R28_XMIN 0x28 113#define EM28XX_R28_XMIN 0x28
60#define EM28XX_R29_XMAX 0x29 114#define EM28XX_R29_XMAX 0x29
61#define EM28XX_R2A_YMIN 0x2a 115#define EM28XX_R2A_YMIN 0x2a
@@ -71,10 +125,32 @@
71#define EM28XX_R42_AC97ADDR 0x42 125#define EM28XX_R42_AC97ADDR 0x42
72#define EM28XX_R43_AC97BUSY 0x43 126#define EM28XX_R43_AC97BUSY 0x43
73 127
74/* em202 registers */ 128#define EM28XX_R45_IR 0x45
75#define EM28XX_R02_MASTER_AC97 0x02 129 /* 0x45 bit 7 - parity bit
76#define EM28XX_R10_LINE_IN_AC97 0x10 130 bits 6-0 - count
77#define EM28XX_R14_VIDEO_AC97 0x14 131 0x46 IR brand
132 0x47 IR data
133 */
134
135/* em2874 registers */
136#define EM2874_R50_IR_CONFIG 0x50
137#define EM2874_R51_IR 0x51
138#define EM2874_R5F_TS_ENABLE 0x5f
139#define EM2874_R80_GPIO 0x80
140
141/* em2874 IR config register (0x50) */
142#define EM2874_IR_NEC 0x00
143#define EM2874_IR_RC5 0x04
144#define EM2874_IR_RC5_MODE_0 0x08
145#define EM2874_IR_RC5_MODE_6A 0x0b
146
147/* em2874 Transport Stream Enable Register (0x5f) */
148#define EM2874_TS1_CAPTURE_ENABLE (1 << 0)
149#define EM2874_TS1_FILTER_ENABLE (1 << 1)
150#define EM2874_TS1_NULL_DISCARD (1 << 2)
151#define EM2874_TS2_CAPTURE_ENABLE (1 << 4)
152#define EM2874_TS2_FILTER_ENABLE (1 << 5)
153#define EM2874_TS2_NULL_DISCARD (1 << 6)
78 154
79/* register settings */ 155/* register settings */
80#define EM2800_AUDIO_SRC_TUNER 0x0d 156#define EM2800_AUDIO_SRC_TUNER 0x0d
@@ -84,6 +160,75 @@
84 160
85/* FIXME: Need to be populated with the other chip ID's */ 161/* FIXME: Need to be populated with the other chip ID's */
86enum em28xx_chip_id { 162enum em28xx_chip_id {
163 CHIP_ID_EM2820 = 18,
164 CHIP_ID_EM2840 = 20,
165 CHIP_ID_EM2750 = 33,
87 CHIP_ID_EM2860 = 34, 166 CHIP_ID_EM2860 = 34,
167 CHIP_ID_EM2870 = 35,
88 CHIP_ID_EM2883 = 36, 168 CHIP_ID_EM2883 = 36,
169 CHIP_ID_EM2874 = 65,
89}; 170};
171
172/*
173 * Registers used by em202 and other AC97 chips
174 */
175
176/* Standard AC97 registers */
177#define AC97_RESET 0x00
178
179 /* Output volumes */
180#define AC97_MASTER_VOL 0x02
181#define AC97_LINE_LEVEL_VOL 0x04 /* Some devices use for headphones */
182#define AC97_MASTER_MONO_VOL 0x06
183
184 /* Input volumes */
185#define AC97_PC_BEEP_VOL 0x0a
186#define AC97_PHONE_VOL 0x0c
187#define AC97_MIC_VOL 0x0e
188#define AC97_LINEIN_VOL 0x10
189#define AC97_CD_VOL 0x12
190#define AC97_VIDEO_VOL 0x14
191#define AC97_AUX_VOL 0x16
192#define AC97_PCM_OUT_VOL 0x18
193
194 /* capture registers */
195#define AC97_RECORD_SELECT 0x1a
196#define AC97_RECORD_GAIN 0x1c
197
198 /* control registers */
199#define AC97_GENERAL_PURPOSE 0x20
200#define AC97_3D_CTRL 0x22
201#define AC97_AUD_INT_AND_PAG 0x24
202#define AC97_POWER_DOWN_CTRL 0x26
203#define AC97_EXT_AUD_ID 0x28
204#define AC97_EXT_AUD_CTRL 0x2a
205
206/* Supported rate varies for each AC97 device
207 if write an unsupported value, it will return the closest one
208 */
209#define AC97_PCM_OUT_FRONT_SRATE 0x2c
210#define AC97_PCM_OUT_SURR_SRATE 0x2e
211#define AC97_PCM_OUT_LFE_SRATE 0x30
212#define AC97_PCM_IN_SRATE 0x32
213
214 /* For devices with more than 2 channels, extra output volumes */
215#define AC97_LFE_MASTER_VOL 0x36
216#define AC97_SURR_MASTER_VOL 0x38
217
218 /* Digital SPDIF output control */
219#define AC97_SPDIF_OUT_CTRL 0x3a
220
221 /* Vendor ID identifier */
222#define AC97_VENDOR_ID1 0x7c
223#define AC97_VENDOR_ID2 0x7e
224
225/* EMP202 vendor registers */
226#define EM202_EXT_MODEM_CTRL 0x3e
227#define EM202_GPIO_CONF 0x4c
228#define EM202_GPIO_POLARITY 0x4e
229#define EM202_GPIO_STICKY 0x50
230#define EM202_GPIO_MASK 0x52
231#define EM202_GPIO_STATUS 0x54
232#define EM202_SPDIF_OUT_SEL 0x6a
233#define EM202_ANTIPOP 0x72
234#define EM202_EAPD_GPIO_ACCESS 0x74
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 4ea1f1e04897..53527536481e 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -39,6 +39,7 @@
39#include "em28xx.h" 39#include "em28xx.h"
40#include <media/v4l2-common.h> 40#include <media/v4l2-common.h>
41#include <media/v4l2-ioctl.h> 41#include <media/v4l2-ioctl.h>
42#include <media/v4l2-chip-ident.h>
42#include <media/msp3400.h> 43#include <media/msp3400.h>
43#include <media/tuner.h> 44#include <media/tuner.h>
44 45
@@ -47,9 +48,8 @@
47 "Mauro Carvalho Chehab <mchehab@infradead.org>, " \ 48 "Mauro Carvalho Chehab <mchehab@infradead.org>, " \
48 "Sascha Sommer <saschasommer@freenet.de>" 49 "Sascha Sommer <saschasommer@freenet.de>"
49 50
50#define DRIVER_NAME "em28xx"
51#define DRIVER_DESC "Empia em28xx based USB video device driver" 51#define DRIVER_DESC "Empia em28xx based USB video device driver"
52#define EM28XX_VERSION_CODE KERNEL_VERSION(0, 1, 0) 52#define EM28XX_VERSION_CODE KERNEL_VERSION(0, 1, 1)
53 53
54#define em28xx_videodbg(fmt, arg...) do {\ 54#define em28xx_videodbg(fmt, arg...) do {\
55 if (video_debug) \ 55 if (video_debug) \
@@ -72,19 +72,13 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
72MODULE_DESCRIPTION(DRIVER_DESC); 72MODULE_DESCRIPTION(DRIVER_DESC);
73MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
74 74
75static LIST_HEAD(em28xx_devlist);
76static DEFINE_MUTEX(em28xx_devlist_mutex);
77
78static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
79static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; 75static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
80static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; 76static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
81static unsigned int radio_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; 77static unsigned int radio_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
82 78
83module_param_array(card, int, NULL, 0444);
84module_param_array(video_nr, int, NULL, 0444); 79module_param_array(video_nr, int, NULL, 0444);
85module_param_array(vbi_nr, int, NULL, 0444); 80module_param_array(vbi_nr, int, NULL, 0444);
86module_param_array(radio_nr, int, NULL, 0444); 81module_param_array(radio_nr, int, NULL, 0444);
87MODULE_PARM_DESC(card, "card type");
88MODULE_PARM_DESC(video_nr, "video device numbers"); 82MODULE_PARM_DESC(video_nr, "video device numbers");
89MODULE_PARM_DESC(vbi_nr, "vbi device numbers"); 83MODULE_PARM_DESC(vbi_nr, "vbi device numbers");
90MODULE_PARM_DESC(radio_nr, "radio device numbers"); 84MODULE_PARM_DESC(radio_nr, "radio device numbers");
@@ -93,8 +87,15 @@ static unsigned int video_debug;
93module_param(video_debug, int, 0644); 87module_param(video_debug, int, 0644);
94MODULE_PARM_DESC(video_debug, "enable debug messages [video]"); 88MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
95 89
96/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS */ 90/* supported video standards */
97static unsigned long em28xx_devused; 91static struct em28xx_fmt format[] = {
92 {
93 .name = "16bpp YUY2, 4:2:2, packed",
94 .fourcc = V4L2_PIX_FMT_YUYV,
95 .depth = 16,
96 .reg = EM28XX_OUTFMT_YUV422_Y0UY1V,
97 },
98};
98 99
99/* supported controls */ 100/* supported controls */
100/* Common to all boards */ 101/* Common to all boards */
@@ -120,8 +121,6 @@ static struct v4l2_queryctrl em28xx_qctrl[] = {
120 } 121 }
121}; 122};
122 123
123static struct usb_driver em28xx_usb_driver;
124
125/* ------------------------------------------------------------------ 124/* ------------------------------------------------------------------
126 DMA and thread functions 125 DMA and thread functions
127 ------------------------------------------------------------------*/ 126 ------------------------------------------------------------------*/
@@ -386,16 +385,18 @@ buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
386 struct em28xx *dev = fh->dev; 385 struct em28xx *dev = fh->dev;
387 struct v4l2_frequency f; 386 struct v4l2_frequency f;
388 387
389 *size = 16 * fh->dev->width * fh->dev->height >> 3; 388 *size = (fh->dev->width * fh->dev->height * dev->format->depth + 7) >> 3;
389
390 if (0 == *count) 390 if (0 == *count)
391 *count = EM28XX_DEF_BUF; 391 *count = EM28XX_DEF_BUF;
392 392
393 if (*count < EM28XX_MIN_BUF) 393 if (*count < EM28XX_MIN_BUF)
394 *count = EM28XX_MIN_BUF; 394 *count = EM28XX_MIN_BUF;
395 395
396 /* Ask tuner to go to analog mode */ 396 /* Ask tuner to go to analog or radio mode */
397 memset(&f, 0, sizeof(f)); 397 memset(&f, 0, sizeof(f));
398 f.frequency = dev->ctl_freq; 398 f.frequency = dev->ctl_freq;
399 f.type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
399 400
400 em28xx_i2c_call_clients(dev, VIDIOC_S_FREQUENCY, &f); 401 em28xx_i2c_call_clients(dev, VIDIOC_S_FREQUENCY, &f);
401 402
@@ -438,9 +439,7 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
438 struct em28xx *dev = fh->dev; 439 struct em28xx *dev = fh->dev;
439 int rc = 0, urb_init = 0; 440 int rc = 0, urb_init = 0;
440 441
441 /* FIXME: It assumes depth = 16 */ 442 buf->vb.size = (fh->dev->width * fh->dev->height * dev->format->depth + 7) >> 3;
442 /* The only currently supported format is 16 bits/pixel */
443 buf->vb.size = 16 * dev->width * dev->height >> 3;
444 443
445 if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) 444 if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
446 return -EINVAL; 445 return -EINVAL;
@@ -508,56 +507,6 @@ static struct videobuf_queue_ops em28xx_video_qops = {
508 507
509/********************* v4l2 interface **************************************/ 508/********************* v4l2 interface **************************************/
510 509
511/*
512 * em28xx_config()
513 * inits registers with sane defaults
514 */
515static int em28xx_config(struct em28xx *dev)
516{
517 int retval;
518
519 /* Sets I2C speed to 100 KHz */
520 if (!dev->is_em2800) {
521 retval = em28xx_write_regs_req(dev, 0x00, 0x06, "\x40", 1);
522 if (retval < 0) {
523 em28xx_errdev("%s: em28xx_write_regs_req failed! retval [%d]\n",
524 __func__, retval);
525 return retval;
526 }
527 }
528
529 /* enable vbi capturing */
530
531/* em28xx_write_regs_req(dev, 0x00, 0x0e, "\xC0", 1); audio register */
532/* em28xx_write_regs_req(dev, 0x00, 0x0f, "\x80", 1); clk register */
533 em28xx_write_regs_req(dev, 0x00, 0x11, "\x51", 1);
534
535 dev->mute = 1; /* maybe not the right place... */
536 dev->volume = 0x1f;
537
538 em28xx_outfmt_set_yuv422(dev);
539 em28xx_colorlevels_set_default(dev);
540 em28xx_compression_disable(dev);
541
542 return 0;
543}
544
545/*
546 * em28xx_config_i2c()
547 * configure i2c attached devices
548 */
549static void em28xx_config_i2c(struct em28xx *dev)
550{
551 struct v4l2_routing route;
552 int zero = 0;
553
554 route.input = INPUT(dev->ctl_input)->vmux;
555 route.output = 0;
556 em28xx_i2c_call_clients(dev, VIDIOC_INT_RESET, &zero);
557 em28xx_i2c_call_clients(dev, VIDIOC_INT_S_VIDEO_ROUTING, &route);
558 em28xx_i2c_call_clients(dev, VIDIOC_STREAMON, NULL);
559}
560
561static void video_mux(struct em28xx *dev, int index) 510static void video_mux(struct em28xx *dev, int index)
562{ 511{
563 struct v4l2_routing route; 512 struct v4l2_routing route;
@@ -566,10 +515,14 @@ static void video_mux(struct em28xx *dev, int index)
566 route.output = 0; 515 route.output = 0;
567 dev->ctl_input = index; 516 dev->ctl_input = index;
568 dev->ctl_ainput = INPUT(index)->amux; 517 dev->ctl_ainput = INPUT(index)->amux;
518 dev->ctl_aoutput = INPUT(index)->aout;
519
520 if (!dev->ctl_aoutput)
521 dev->ctl_aoutput = EM28XX_AOUT_MASTER;
569 522
570 em28xx_i2c_call_clients(dev, VIDIOC_INT_S_VIDEO_ROUTING, &route); 523 em28xx_i2c_call_clients(dev, VIDIOC_INT_S_VIDEO_ROUTING, &route);
571 524
572 if (dev->has_msp34xx) { 525 if (dev->board.has_msp34xx) {
573 if (dev->i2s_speed) { 526 if (dev->i2s_speed) {
574 em28xx_i2c_call_clients(dev, VIDIOC_INT_I2S_CLOCK_FREQ, 527 em28xx_i2c_call_clients(dev, VIDIOC_INT_I2S_CLOCK_FREQ,
575 &dev->i2s_speed); 528 &dev->i2s_speed);
@@ -595,12 +548,10 @@ static int res_get(struct em28xx_fh *fh)
595 return rc; 548 return rc;
596 549
597 if (dev->stream_on) 550 if (dev->stream_on)
598 return -EINVAL; 551 return -EBUSY;
599 552
600 mutex_lock(&dev->lock);
601 dev->stream_on = 1; 553 dev->stream_on = 1;
602 fh->stream_on = 1; 554 fh->stream_on = 1;
603 mutex_unlock(&dev->lock);
604 return rc; 555 return rc;
605} 556}
606 557
@@ -613,10 +564,8 @@ static void res_free(struct em28xx_fh *fh)
613{ 564{
614 struct em28xx *dev = fh->dev; 565 struct em28xx *dev = fh->dev;
615 566
616 mutex_lock(&dev->lock);
617 fh->stream_on = 0; 567 fh->stream_on = 0;
618 dev->stream_on = 0; 568 dev->stream_on = 0;
619 mutex_unlock(&dev->lock);
620} 569}
621 570
622/* 571/*
@@ -703,8 +652,8 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
703 652
704 f->fmt.pix.width = dev->width; 653 f->fmt.pix.width = dev->width;
705 f->fmt.pix.height = dev->height; 654 f->fmt.pix.height = dev->height;
706 f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; 655 f->fmt.pix.pixelformat = dev->format->fourcc;
707 f->fmt.pix.bytesperline = dev->width * 2; 656 f->fmt.pix.bytesperline = (dev->width * dev->format->depth + 7) >> 3;
708 f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * dev->height; 657 f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * dev->height;
709 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; 658 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
710 659
@@ -716,6 +665,17 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
716 return 0; 665 return 0;
717} 666}
718 667
668static struct em28xx_fmt *format_by_fourcc(unsigned int fourcc)
669{
670 unsigned int i;
671
672 for (i = 0; i < ARRAY_SIZE(format); i++)
673 if (format[i].fourcc == fourcc)
674 return &format[i];
675
676 return NULL;
677}
678
719static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 679static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
720 struct v4l2_format *f) 680 struct v4l2_format *f)
721{ 681{
@@ -726,24 +686,30 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
726 unsigned int maxw = norm_maxw(dev); 686 unsigned int maxw = norm_maxw(dev);
727 unsigned int maxh = norm_maxh(dev); 687 unsigned int maxh = norm_maxh(dev);
728 unsigned int hscale, vscale; 688 unsigned int hscale, vscale;
689 struct em28xx_fmt *fmt;
690
691 fmt = format_by_fourcc(f->fmt.pix.pixelformat);
692 if (!fmt) {
693 em28xx_videodbg("Fourcc format (%08x) invalid.\n",
694 f->fmt.pix.pixelformat);
695 return -EINVAL;
696 }
729 697
730 /* width must even because of the YUYV format 698 /* width must even because of the YUYV format
731 height must be even because of interlacing */ 699 height must be even because of interlacing */
732 height &= 0xfffe; 700 height &= 0xfffe;
733 width &= 0xfffe; 701 width &= 0xfffe;
734 702
735 if (height < 32) 703 if (unlikely(height < 32))
736 height = 32; 704 height = 32;
737 if (height > maxh) 705 if (unlikely(height > maxh))
738 height = maxh; 706 height = maxh;
739 if (width < 48) 707 if (unlikely(width < 48))
740 width = 48; 708 width = 48;
741 if (width > maxw) 709 if (unlikely(width > maxw))
742 width = maxw; 710 width = maxw;
743 711
744 mutex_lock(&dev->lock); 712 if (dev->board.is_em2800) {
745
746 if (dev->is_em2800) {
747 /* the em2800 can only scale down to 50% */ 713 /* the em2800 can only scale down to 50% */
748 if (height % (maxh / 2)) 714 if (height % (maxh / 2))
749 height = maxh; 715 height = maxh;
@@ -766,13 +732,12 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
766 732
767 f->fmt.pix.width = width; 733 f->fmt.pix.width = width;
768 f->fmt.pix.height = height; 734 f->fmt.pix.height = height;
769 f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; 735 f->fmt.pix.pixelformat = fmt->fourcc;
770 f->fmt.pix.bytesperline = width * 2; 736 f->fmt.pix.bytesperline = (dev->width * fmt->depth + 7) >> 3;
771 f->fmt.pix.sizeimage = width * 2 * height; 737 f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * height;
772 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; 738 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
773 f->fmt.pix.field = V4L2_FIELD_INTERLACED; 739 f->fmt.pix.field = V4L2_FIELD_INTERLACED;
774 740
775 mutex_unlock(&dev->lock);
776 return 0; 741 return 0;
777} 742}
778 743
@@ -782,14 +747,21 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
782 struct em28xx_fh *fh = priv; 747 struct em28xx_fh *fh = priv;
783 struct em28xx *dev = fh->dev; 748 struct em28xx *dev = fh->dev;
784 int rc; 749 int rc;
750 struct em28xx_fmt *fmt;
785 751
786 rc = check_dev(dev); 752 rc = check_dev(dev);
787 if (rc < 0) 753 if (rc < 0)
788 return rc; 754 return rc;
789 755
756 mutex_lock(&dev->lock);
757
790 vidioc_try_fmt_vid_cap(file, priv, f); 758 vidioc_try_fmt_vid_cap(file, priv, f);
791 759
792 mutex_lock(&dev->lock); 760 fmt = format_by_fourcc(f->fmt.pix.pixelformat);
761 if (!fmt) {
762 rc = -EINVAL;
763 goto out;
764 }
793 765
794 if (videobuf_queue_is_busy(&fh->vb_vidq)) { 766 if (videobuf_queue_is_busy(&fh->vb_vidq)) {
795 em28xx_errdev("%s queue busy\n", __func__); 767 em28xx_errdev("%s queue busy\n", __func__);
@@ -806,6 +778,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
806 /* set new image size */ 778 /* set new image size */
807 dev->width = f->fmt.pix.width; 779 dev->width = f->fmt.pix.width;
808 dev->height = f->fmt.pix.height; 780 dev->height = f->fmt.pix.height;
781 dev->format = fmt;
809 get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale); 782 get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
810 783
811 em28xx_set_alternate(dev); 784 em28xx_set_alternate(dev);
@@ -831,15 +804,12 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id * norm)
831 804
832 mutex_lock(&dev->lock); 805 mutex_lock(&dev->lock);
833 dev->norm = *norm; 806 dev->norm = *norm;
834 mutex_unlock(&dev->lock);
835 807
836 /* Adjusts width/height, if needed */ 808 /* Adjusts width/height, if needed */
837 f.fmt.pix.width = dev->width; 809 f.fmt.pix.width = dev->width;
838 f.fmt.pix.height = dev->height; 810 f.fmt.pix.height = dev->height;
839 vidioc_try_fmt_vid_cap(file, priv, &f); 811 vidioc_try_fmt_vid_cap(file, priv, &f);
840 812
841 mutex_lock(&dev->lock);
842
843 /* set new image size */ 813 /* set new image size */
844 dev->width = f.fmt.pix.width; 814 dev->width = f.fmt.pix.width;
845 dev->height = f.fmt.pix.height; 815 dev->height = f.fmt.pix.height;
@@ -928,20 +898,38 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
928{ 898{
929 struct em28xx_fh *fh = priv; 899 struct em28xx_fh *fh = priv;
930 struct em28xx *dev = fh->dev; 900 struct em28xx *dev = fh->dev;
931 unsigned int index = a->index;
932
933 if (a->index > 1)
934 return -EINVAL;
935
936 index = dev->ctl_ainput;
937 901
938 if (index == 0) 902 switch (a->index) {
903 case EM28XX_AMUX_VIDEO:
939 strcpy(a->name, "Television"); 904 strcpy(a->name, "Television");
940 else 905 break;
906 case EM28XX_AMUX_LINE_IN:
941 strcpy(a->name, "Line In"); 907 strcpy(a->name, "Line In");
908 break;
909 case EM28XX_AMUX_VIDEO2:
910 strcpy(a->name, "Television alt");
911 break;
912 case EM28XX_AMUX_PHONE:
913 strcpy(a->name, "Phone");
914 break;
915 case EM28XX_AMUX_MIC:
916 strcpy(a->name, "Mic");
917 break;
918 case EM28XX_AMUX_CD:
919 strcpy(a->name, "CD");
920 break;
921 case EM28XX_AMUX_AUX:
922 strcpy(a->name, "Aux");
923 break;
924 case EM28XX_AMUX_PCM_OUT:
925 strcpy(a->name, "PCM");
926 break;
927 default:
928 return -EINVAL;
929 }
942 930
931 a->index = dev->ctl_ainput;
943 a->capability = V4L2_AUDCAP_STEREO; 932 a->capability = V4L2_AUDCAP_STEREO;
944 a->index = index;
945 933
946 return 0; 934 return 0;
947} 935}
@@ -951,9 +939,15 @@ static int vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *a)
951 struct em28xx_fh *fh = priv; 939 struct em28xx_fh *fh = priv;
952 struct em28xx *dev = fh->dev; 940 struct em28xx *dev = fh->dev;
953 941
954 if (a->index != dev->ctl_ainput) 942 mutex_lock(&dev->lock);
955 return -EINVAL; 943
944 dev->ctl_ainput = INPUT(a->index)->amux;
945 dev->ctl_aoutput = INPUT(a->index)->aout;
956 946
947 if (!dev->ctl_aoutput)
948 dev->ctl_aoutput = EM28XX_AOUT_MASTER;
949
950 mutex_unlock(&dev->lock);
957 return 0; 951 return 0;
958} 952}
959 953
@@ -974,7 +968,7 @@ static int vidioc_queryctrl(struct file *file, void *priv,
974 968
975 qc->id = id; 969 qc->id = id;
976 970
977 if (!dev->has_msp34xx) { 971 if (!dev->board.has_msp34xx) {
978 for (i = 0; i < ARRAY_SIZE(em28xx_qctrl); i++) { 972 for (i = 0; i < ARRAY_SIZE(em28xx_qctrl); i++) {
979 if (qc->id && qc->id == em28xx_qctrl[i].id) { 973 if (qc->id && qc->id == em28xx_qctrl[i].id) {
980 memcpy(qc, &(em28xx_qctrl[i]), sizeof(*qc)); 974 memcpy(qc, &(em28xx_qctrl[i]), sizeof(*qc));
@@ -1002,17 +996,14 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
1002 rc = check_dev(dev); 996 rc = check_dev(dev);
1003 if (rc < 0) 997 if (rc < 0)
1004 return rc; 998 return rc;
1005 mutex_lock(&dev->lock); 999 rc = 0;
1006 1000
1007 if (!dev->has_msp34xx) 1001 mutex_lock(&dev->lock);
1008 rc = em28xx_get_ctrl(dev, ctrl);
1009 else
1010 rc = -EINVAL;
1011 1002
1012 if (rc == -EINVAL) { 1003 if (dev->board.has_msp34xx)
1013 em28xx_i2c_call_clients(dev, VIDIOC_G_CTRL, ctrl); 1004 em28xx_i2c_call_clients(dev, VIDIOC_G_CTRL, ctrl);
1014 rc = 0; 1005 else
1015 } 1006 rc = em28xx_get_ctrl(dev, ctrl);
1016 1007
1017 mutex_unlock(&dev->lock); 1008 mutex_unlock(&dev->lock);
1018 return rc; 1009 return rc;
@@ -1032,7 +1023,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
1032 1023
1033 mutex_lock(&dev->lock); 1024 mutex_lock(&dev->lock);
1034 1025
1035 if (dev->has_msp34xx) 1026 if (dev->board.has_msp34xx)
1036 em28xx_i2c_call_clients(dev, VIDIOC_S_CTRL, ctrl); 1027 em28xx_i2c_call_clients(dev, VIDIOC_S_CTRL, ctrl);
1037 else { 1028 else {
1038 rc = 1; 1029 rc = 1;
@@ -1112,8 +1103,10 @@ static int vidioc_g_frequency(struct file *file, void *priv,
1112 struct em28xx_fh *fh = priv; 1103 struct em28xx_fh *fh = priv;
1113 struct em28xx *dev = fh->dev; 1104 struct em28xx *dev = fh->dev;
1114 1105
1106 mutex_lock(&dev->lock);
1115 f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; 1107 f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
1116 f->frequency = dev->ctl_freq; 1108 f->frequency = dev->ctl_freq;
1109 mutex_unlock(&dev->lock);
1117 1110
1118 return 0; 1111 return 0;
1119} 1112}
@@ -1143,6 +1136,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
1143 em28xx_i2c_call_clients(dev, VIDIOC_S_FREQUENCY, f); 1136 em28xx_i2c_call_clients(dev, VIDIOC_S_FREQUENCY, f);
1144 1137
1145 mutex_unlock(&dev->lock); 1138 mutex_unlock(&dev->lock);
1139
1146 return 0; 1140 return 0;
1147} 1141}
1148 1142
@@ -1159,6 +1153,21 @@ static int em28xx_reg_len(int reg)
1159 } 1153 }
1160} 1154}
1161 1155
1156static int vidioc_g_chip_ident(struct file *file, void *priv,
1157 struct v4l2_chip_ident *chip)
1158{
1159 struct em28xx_fh *fh = priv;
1160 struct em28xx *dev = fh->dev;
1161
1162 chip->ident = V4L2_IDENT_NONE;
1163 chip->revision = 0;
1164
1165 em28xx_i2c_call_clients(dev, VIDIOC_G_CHIP_IDENT, chip);
1166
1167 return 0;
1168}
1169
1170
1162static int vidioc_g_register(struct file *file, void *priv, 1171static int vidioc_g_register(struct file *file, void *priv,
1163 struct v4l2_register *reg) 1172 struct v4l2_register *reg)
1164{ 1173{
@@ -1166,19 +1175,43 @@ static int vidioc_g_register(struct file *file, void *priv,
1166 struct em28xx *dev = fh->dev; 1175 struct em28xx *dev = fh->dev;
1167 int ret; 1176 int ret;
1168 1177
1169 if (!v4l2_chip_match_host(reg->match_type, reg->match_chip)) 1178 switch (reg->match_type) {
1179 case V4L2_CHIP_MATCH_AC97:
1180 mutex_lock(&dev->lock);
1181 ret = em28xx_read_ac97(dev, reg->reg);
1182 mutex_unlock(&dev->lock);
1183 if (ret < 0)
1184 return ret;
1185
1186 reg->val = ret;
1187 return 0;
1188 case V4L2_CHIP_MATCH_I2C_DRIVER:
1189 em28xx_i2c_call_clients(dev, VIDIOC_DBG_G_REGISTER, reg);
1190 return 0;
1191 case V4L2_CHIP_MATCH_I2C_ADDR:
1192 /* Not supported yet */
1170 return -EINVAL; 1193 return -EINVAL;
1194 default:
1195 if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
1196 return -EINVAL;
1197 }
1171 1198
1199 /* Match host */
1172 if (em28xx_reg_len(reg->reg) == 1) { 1200 if (em28xx_reg_len(reg->reg) == 1) {
1201 mutex_lock(&dev->lock);
1173 ret = em28xx_read_reg(dev, reg->reg); 1202 ret = em28xx_read_reg(dev, reg->reg);
1203 mutex_unlock(&dev->lock);
1204
1174 if (ret < 0) 1205 if (ret < 0)
1175 return ret; 1206 return ret;
1176 1207
1177 reg->val = ret; 1208 reg->val = ret;
1178 } else { 1209 } else {
1179 __le64 val = 0; 1210 __le64 val = 0;
1211 mutex_lock(&dev->lock);
1180 ret = em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS, 1212 ret = em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
1181 reg->reg, (char *)&val, 2); 1213 reg->reg, (char *)&val, 2);
1214 mutex_unlock(&dev->lock);
1182 if (ret < 0) 1215 if (ret < 0)
1183 return ret; 1216 return ret;
1184 1217
@@ -1194,11 +1227,35 @@ static int vidioc_s_register(struct file *file, void *priv,
1194 struct em28xx_fh *fh = priv; 1227 struct em28xx_fh *fh = priv;
1195 struct em28xx *dev = fh->dev; 1228 struct em28xx *dev = fh->dev;
1196 __le64 buf; 1229 __le64 buf;
1230 int rc;
1197 1231
1232 switch (reg->match_type) {
1233 case V4L2_CHIP_MATCH_AC97:
1234 mutex_lock(&dev->lock);
1235 rc = em28xx_write_ac97(dev, reg->reg, reg->val);
1236 mutex_unlock(&dev->lock);
1237
1238 return rc;
1239 case V4L2_CHIP_MATCH_I2C_DRIVER:
1240 em28xx_i2c_call_clients(dev, VIDIOC_DBG_S_REGISTER, reg);
1241 return 0;
1242 case V4L2_CHIP_MATCH_I2C_ADDR:
1243 /* Not supported yet */
1244 return -EINVAL;
1245 default:
1246 if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
1247 return -EINVAL;
1248 }
1249
1250 /* Match host */
1198 buf = cpu_to_le64(reg->val); 1251 buf = cpu_to_le64(reg->val);
1199 1252
1200 return em28xx_write_regs(dev, reg->reg, (char *)&buf, 1253 mutex_lock(&dev->lock);
1201 em28xx_reg_len(reg->reg)); 1254 rc = em28xx_write_regs(dev, reg->reg, (char *)&buf,
1255 em28xx_reg_len(reg->reg));
1256 mutex_unlock(&dev->lock);
1257
1258 return rc;
1202} 1259}
1203#endif 1260#endif
1204 1261
@@ -1235,10 +1292,15 @@ static int vidioc_streamon(struct file *file, void *priv,
1235 return rc; 1292 return rc;
1236 1293
1237 1294
1238 if (unlikely(res_get(fh) < 0)) 1295 mutex_lock(&dev->lock);
1239 return -EBUSY; 1296 rc = res_get(fh);
1297
1298 if (likely(rc >= 0))
1299 rc = videobuf_streamon(&fh->vb_vidq);
1240 1300
1241 return (videobuf_streamon(&fh->vb_vidq)); 1301 mutex_unlock(&dev->lock);
1302
1303 return rc;
1242} 1304}
1243 1305
1244static int vidioc_streamoff(struct file *file, void *priv, 1306static int vidioc_streamoff(struct file *file, void *priv,
@@ -1257,9 +1319,13 @@ static int vidioc_streamoff(struct file *file, void *priv,
1257 if (type != fh->type) 1319 if (type != fh->type)
1258 return -EINVAL; 1320 return -EINVAL;
1259 1321
1322 mutex_lock(&dev->lock);
1323
1260 videobuf_streamoff(&fh->vb_vidq); 1324 videobuf_streamoff(&fh->vb_vidq);
1261 res_free(fh); 1325 res_free(fh);
1262 1326
1327 mutex_unlock(&dev->lock);
1328
1263 return 0; 1329 return 0;
1264} 1330}
1265 1331
@@ -1271,7 +1337,7 @@ static int vidioc_querycap(struct file *file, void *priv,
1271 1337
1272 strlcpy(cap->driver, "em28xx", sizeof(cap->driver)); 1338 strlcpy(cap->driver, "em28xx", sizeof(cap->driver));
1273 strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card)); 1339 strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
1274 strlcpy(cap->bus_info, dev->udev->dev.bus_id, sizeof(cap->bus_info)); 1340 strlcpy(cap->bus_info, dev_name(&dev->udev->dev), sizeof(cap->bus_info));
1275 1341
1276 cap->version = EM28XX_VERSION_CODE; 1342 cap->version = EM28XX_VERSION_CODE;
1277 1343
@@ -1288,15 +1354,13 @@ static int vidioc_querycap(struct file *file, void *priv,
1288} 1354}
1289 1355
1290static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, 1356static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
1291 struct v4l2_fmtdesc *fmtd) 1357 struct v4l2_fmtdesc *f)
1292{ 1358{
1293 if (fmtd->index != 0) 1359 if (unlikely(f->index >= ARRAY_SIZE(format)))
1294 return -EINVAL; 1360 return -EINVAL;
1295 1361
1296 fmtd->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1362 strlcpy(f->description, format[f->index].name, sizeof(f->description));
1297 strcpy(fmtd->description, "Packed YUY2"); 1363 f->pixelformat = format[f->index].fourcc;
1298 fmtd->pixelformat = V4L2_PIX_FMT_YUYV;
1299 memset(fmtd->reserved, 0, sizeof(fmtd->reserved));
1300 1364
1301 return 0; 1365 return 0;
1302} 1366}
@@ -1424,7 +1488,7 @@ static int radio_querycap(struct file *file, void *priv,
1424 1488
1425 strlcpy(cap->driver, "em28xx", sizeof(cap->driver)); 1489 strlcpy(cap->driver, "em28xx", sizeof(cap->driver));
1426 strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card)); 1490 strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
1427 strlcpy(cap->bus_info, dev->udev->dev.bus_id, sizeof(cap->bus_info)); 1491 strlcpy(cap->bus_info, dev_name(&dev->udev->dev), sizeof(cap->bus_info));
1428 1492
1429 cap->version = EM28XX_VERSION_CODE; 1493 cap->version = EM28XX_VERSION_CODE;
1430 cap->capabilities = V4L2_CAP_TUNER; 1494 cap->capabilities = V4L2_CAP_TUNER;
@@ -1442,7 +1506,10 @@ static int radio_g_tuner(struct file *file, void *priv,
1442 strcpy(t->name, "Radio"); 1506 strcpy(t->name, "Radio");
1443 t->type = V4L2_TUNER_RADIO; 1507 t->type = V4L2_TUNER_RADIO;
1444 1508
1509 mutex_lock(&dev->lock);
1445 em28xx_i2c_call_clients(dev, VIDIOC_G_TUNER, t); 1510 em28xx_i2c_call_clients(dev, VIDIOC_G_TUNER, t);
1511 mutex_unlock(&dev->lock);
1512
1446 return 0; 1513 return 0;
1447} 1514}
1448 1515
@@ -1474,7 +1541,9 @@ static int radio_s_tuner(struct file *file, void *priv,
1474 if (0 != t->index) 1541 if (0 != t->index)
1475 return -EINVAL; 1542 return -EINVAL;
1476 1543
1544 mutex_lock(&dev->lock);
1477 em28xx_i2c_call_clients(dev, VIDIOC_S_TUNER, t); 1545 em28xx_i2c_call_clients(dev, VIDIOC_S_TUNER, t);
1546 mutex_unlock(&dev->lock);
1478 1547
1479 return 0; 1548 return 0;
1480} 1549}
@@ -1516,28 +1585,13 @@ static int radio_queryctrl(struct file *file, void *priv,
1516static int em28xx_v4l2_open(struct inode *inode, struct file *filp) 1585static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
1517{ 1586{
1518 int minor = iminor(inode); 1587 int minor = iminor(inode);
1519 int errCode = 0, radio = 0; 1588 int errCode = 0, radio;
1520 struct em28xx *h, *dev = NULL; 1589 struct em28xx *dev;
1590 enum v4l2_buf_type fh_type;
1521 struct em28xx_fh *fh; 1591 struct em28xx_fh *fh;
1522 enum v4l2_buf_type fh_type = 0;
1523 1592
1524 mutex_lock(&em28xx_devlist_mutex); 1593 dev = em28xx_get_device(inode, &fh_type, &radio);
1525 list_for_each_entry(h, &em28xx_devlist, devlist) { 1594
1526 if (h->vdev->minor == minor) {
1527 dev = h;
1528 fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1529 }
1530 if (h->vbi_dev->minor == minor) {
1531 dev = h;
1532 fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
1533 }
1534 if (h->radio_dev &&
1535 h->radio_dev->minor == minor) {
1536 radio = 1;
1537 dev = h;
1538 }
1539 }
1540 mutex_unlock(&em28xx_devlist_mutex);
1541 if (NULL == dev) 1595 if (NULL == dev)
1542 return -ENODEV; 1596 return -ENODEV;
1543 1597
@@ -1571,7 +1625,7 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
1571 /* Needed, since GPIO might have disabled power of 1625 /* Needed, since GPIO might have disabled power of
1572 some i2c device 1626 some i2c device
1573 */ 1627 */
1574 em28xx_config_i2c(dev); 1628 em28xx_wake_i2c(dev);
1575 1629
1576 } 1630 }
1577 if (fh->radio) { 1631 if (fh->radio) {
@@ -1595,16 +1649,11 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
1595 * unregisters the v4l2,i2c and usb devices 1649 * unregisters the v4l2,i2c and usb devices
1596 * called when the device gets disconected or at module unload 1650 * called when the device gets disconected or at module unload
1597*/ 1651*/
1598static void em28xx_release_resources(struct em28xx *dev) 1652void em28xx_release_analog_resources(struct em28xx *dev)
1599{ 1653{
1600 1654
1601 /*FIXME: I2C IR should be disconnected */ 1655 /*FIXME: I2C IR should be disconnected */
1602 1656
1603 em28xx_info("V4L2 devices /dev/video%d and /dev/vbi%d deregistered\n",
1604 dev->vdev->num, dev->vbi_dev->num);
1605 list_del(&dev->devlist);
1606 if (dev->sbutton_input_dev)
1607 em28xx_deregister_snapshot_button(dev);
1608 if (dev->radio_dev) { 1657 if (dev->radio_dev) {
1609 if (-1 != dev->radio_dev->minor) 1658 if (-1 != dev->radio_dev->minor)
1610 video_unregister_device(dev->radio_dev); 1659 video_unregister_device(dev->radio_dev);
@@ -1613,6 +1662,8 @@ static void em28xx_release_resources(struct em28xx *dev)
1613 dev->radio_dev = NULL; 1662 dev->radio_dev = NULL;
1614 } 1663 }
1615 if (dev->vbi_dev) { 1664 if (dev->vbi_dev) {
1665 em28xx_info("V4L2 device /dev/vbi%d deregistered\n",
1666 dev->vbi_dev->num);
1616 if (-1 != dev->vbi_dev->minor) 1667 if (-1 != dev->vbi_dev->minor)
1617 video_unregister_device(dev->vbi_dev); 1668 video_unregister_device(dev->vbi_dev);
1618 else 1669 else
@@ -1620,17 +1671,14 @@ static void em28xx_release_resources(struct em28xx *dev)
1620 dev->vbi_dev = NULL; 1671 dev->vbi_dev = NULL;
1621 } 1672 }
1622 if (dev->vdev) { 1673 if (dev->vdev) {
1674 em28xx_info("V4L2 device /dev/video%d deregistered\n",
1675 dev->vdev->num);
1623 if (-1 != dev->vdev->minor) 1676 if (-1 != dev->vdev->minor)
1624 video_unregister_device(dev->vdev); 1677 video_unregister_device(dev->vdev);
1625 else 1678 else
1626 video_device_release(dev->vdev); 1679 video_device_release(dev->vdev);
1627 dev->vdev = NULL; 1680 dev->vdev = NULL;
1628 } 1681 }
1629 em28xx_i2c_unregister(dev);
1630 usb_put_dev(dev->udev);
1631
1632 /* Mark device as unused */
1633 em28xx_devused &= ~(1<<dev->devno);
1634} 1682}
1635 1683
1636/* 1684/*
@@ -1647,11 +1695,10 @@ static int em28xx_v4l2_close(struct inode *inode, struct file *filp)
1647 em28xx_videodbg("users=%d\n", dev->users); 1695 em28xx_videodbg("users=%d\n", dev->users);
1648 1696
1649 1697
1698 mutex_lock(&dev->lock);
1650 if (res_check(fh)) 1699 if (res_check(fh))
1651 res_free(fh); 1700 res_free(fh);
1652 1701
1653 mutex_lock(&dev->lock);
1654
1655 if (dev->users == 1) { 1702 if (dev->users == 1) {
1656 videobuf_stop(&fh->vb_vidq); 1703 videobuf_stop(&fh->vb_vidq);
1657 videobuf_mmap_free(&fh->vb_vidq); 1704 videobuf_mmap_free(&fh->vb_vidq);
@@ -1665,9 +1712,12 @@ static int em28xx_v4l2_close(struct inode *inode, struct file *filp)
1665 return 0; 1712 return 0;
1666 } 1713 }
1667 1714
1715 /* Save some power by putting tuner to sleep */
1716 em28xx_i2c_call_clients(dev, TUNER_SET_STANDBY, NULL);
1717
1668 /* do this before setting alternate! */ 1718 /* do this before setting alternate! */
1669 em28xx_uninit_isoc(dev); 1719 em28xx_uninit_isoc(dev);
1670 em28xx_set_mode(dev, EM28XX_MODE_UNDEFINED); 1720 em28xx_set_mode(dev, EM28XX_SUSPEND);
1671 1721
1672 /* set alternate 0 */ 1722 /* set alternate 0 */
1673 dev->alt = 0; 1723 dev->alt = 0;
@@ -1706,8 +1756,12 @@ em28xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
1706 */ 1756 */
1707 1757
1708 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 1758 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1709 if (unlikely(res_get(fh))) 1759 mutex_lock(&dev->lock);
1710 return -EBUSY; 1760 rc = res_get(fh);
1761 mutex_unlock(&dev->lock);
1762
1763 if (unlikely(rc < 0))
1764 return rc;
1711 1765
1712 return videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0, 1766 return videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0,
1713 filp->f_flags & O_NONBLOCK); 1767 filp->f_flags & O_NONBLOCK);
@@ -1729,7 +1783,11 @@ static unsigned int em28xx_v4l2_poll(struct file *filp, poll_table * wait)
1729 if (rc < 0) 1783 if (rc < 0)
1730 return rc; 1784 return rc;
1731 1785
1732 if (unlikely(res_get(fh) < 0)) 1786 mutex_lock(&dev->lock);
1787 rc = res_get(fh);
1788 mutex_unlock(&dev->lock);
1789
1790 if (unlikely(rc < 0))
1733 return POLLERR; 1791 return POLLERR;
1734 1792
1735 if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type) 1793 if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
@@ -1747,13 +1805,17 @@ static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
1747 struct em28xx *dev = fh->dev; 1805 struct em28xx *dev = fh->dev;
1748 int rc; 1806 int rc;
1749 1807
1750 if (unlikely(res_get(fh) < 0))
1751 return -EBUSY;
1752
1753 rc = check_dev(dev); 1808 rc = check_dev(dev);
1754 if (rc < 0) 1809 if (rc < 0)
1755 return rc; 1810 return rc;
1756 1811
1812 mutex_lock(&dev->lock);
1813 rc = res_get(fh);
1814 mutex_unlock(&dev->lock);
1815
1816 if (unlikely(rc < 0))
1817 return rc;
1818
1757 rc = videobuf_mmap_mapper(&fh->vb_vidq, vma); 1819 rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
1758 1820
1759 em28xx_videodbg("vma start=0x%08lx, size=%ld, ret=%d\n", 1821 em28xx_videodbg("vma start=0x%08lx, size=%ld, ret=%d\n",
@@ -1810,6 +1872,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
1810#ifdef CONFIG_VIDEO_ADV_DEBUG 1872#ifdef CONFIG_VIDEO_ADV_DEBUG
1811 .vidioc_g_register = vidioc_g_register, 1873 .vidioc_g_register = vidioc_g_register,
1812 .vidioc_s_register = vidioc_s_register, 1874 .vidioc_s_register = vidioc_s_register,
1875 .vidioc_g_chip_ident = vidioc_g_chip_ident,
1813#endif 1876#endif
1814#ifdef CONFIG_VIDEO_V4L1_COMPAT 1877#ifdef CONFIG_VIDEO_V4L1_COMPAT
1815 .vidiocgmbuf = vidiocgmbuf, 1878 .vidiocgmbuf = vidiocgmbuf,
@@ -1865,44 +1928,6 @@ static struct video_device em28xx_radio_template = {
1865/******************************** usb interface ******************************/ 1928/******************************** usb interface ******************************/
1866 1929
1867 1930
1868static LIST_HEAD(em28xx_extension_devlist);
1869static DEFINE_MUTEX(em28xx_extension_devlist_lock);
1870
1871int em28xx_register_extension(struct em28xx_ops *ops)
1872{
1873 struct em28xx *dev = NULL;
1874
1875 mutex_lock(&em28xx_devlist_mutex);
1876 mutex_lock(&em28xx_extension_devlist_lock);
1877 list_add_tail(&ops->next, &em28xx_extension_devlist);
1878 list_for_each_entry(dev, &em28xx_devlist, devlist) {
1879 if (dev)
1880 ops->init(dev);
1881 }
1882 printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name);
1883 mutex_unlock(&em28xx_extension_devlist_lock);
1884 mutex_unlock(&em28xx_devlist_mutex);
1885 return 0;
1886}
1887EXPORT_SYMBOL(em28xx_register_extension);
1888
1889void em28xx_unregister_extension(struct em28xx_ops *ops)
1890{
1891 struct em28xx *dev = NULL;
1892
1893 mutex_lock(&em28xx_devlist_mutex);
1894 list_for_each_entry(dev, &em28xx_devlist, devlist) {
1895 if (dev)
1896 ops->fini(dev);
1897 }
1898
1899 mutex_lock(&em28xx_extension_devlist_lock);
1900 printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name);
1901 list_del(&ops->next);
1902 mutex_unlock(&em28xx_extension_devlist_lock);
1903 mutex_unlock(&em28xx_devlist_mutex);
1904}
1905EXPORT_SYMBOL(em28xx_unregister_extension);
1906 1931
1907static struct video_device *em28xx_vdev_init(struct em28xx *dev, 1932static struct video_device *em28xx_vdev_init(struct em28xx *dev,
1908 const struct video_device *template, 1933 const struct video_device *template,
@@ -1925,11 +1950,43 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
1925 return vfd; 1950 return vfd;
1926} 1951}
1927 1952
1928 1953int em28xx_register_analog_devices(struct em28xx *dev)
1929static int register_analog_devices(struct em28xx *dev)
1930{ 1954{
1931 int ret; 1955 int ret;
1932 1956
1957 printk(KERN_INFO "%s: v4l2 driver version %d.%d.%d\n",
1958 dev->name,
1959 (EM28XX_VERSION_CODE >> 16) & 0xff,
1960 (EM28XX_VERSION_CODE >> 8) & 0xff, EM28XX_VERSION_CODE & 0xff);
1961
1962 /* Analog specific initialization */
1963 dev->format = &format[0];
1964 video_mux(dev, 0);
1965
1966 /* enable vbi capturing */
1967
1968/* em28xx_write_reg(dev, EM28XX_R0E_AUDIOSRC, 0xc0); audio register */
1969/* em28xx_write_reg(dev, EM28XX_R0F_XCLK, 0x80); clk register */
1970 em28xx_write_reg(dev, EM28XX_R11_VINCTRL, 0x51);
1971
1972 dev->mute = 1; /* maybe not the right place... */
1973 dev->volume = 0x1f;
1974
1975 em28xx_set_outfmt(dev);
1976 em28xx_colorlevels_set_default(dev);
1977 em28xx_compression_disable(dev);
1978
1979 /* set default norm */
1980 dev->norm = em28xx_video_template.current_norm;
1981 dev->width = norm_maxw(dev);
1982 dev->height = norm_maxh(dev);
1983 dev->interlaced = EM28XX_INTERLACED_DEFAULT;
1984 dev->hscale = 0;
1985 dev->vscale = 0;
1986
1987 /* FIXME: This is a very bad hack! Not all devices have TV on input 2 */
1988 dev->ctl_input = 2;
1989
1933 /* allocate and fill video video_device struct */ 1990 /* allocate and fill video video_device struct */
1934 dev->vdev = em28xx_vdev_init(dev, &em28xx_video_template, "video"); 1991 dev->vdev = em28xx_vdev_init(dev, &em28xx_video_template, "video");
1935 if (!dev->vdev) { 1992 if (!dev->vdev) {
@@ -1978,383 +2035,3 @@ static int register_analog_devices(struct em28xx *dev)
1978 2035
1979 return 0; 2036 return 0;
1980} 2037}
1981
1982
1983/*
1984 * em28xx_init_dev()
1985 * allocates and inits the device structs, registers i2c bus and v4l device
1986 */
1987static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
1988 int minor)
1989{
1990 struct em28xx_ops *ops = NULL;
1991 struct em28xx *dev = *devhandle;
1992 int retval = -ENOMEM;
1993 int errCode;
1994 unsigned int maxh, maxw;
1995
1996 dev->udev = udev;
1997 mutex_init(&dev->lock);
1998 mutex_init(&dev->ctrl_urb_lock);
1999 spin_lock_init(&dev->slock);
2000 init_waitqueue_head(&dev->open);
2001 init_waitqueue_head(&dev->wait_frame);
2002 init_waitqueue_head(&dev->wait_stream);
2003
2004 dev->em28xx_write_regs = em28xx_write_regs;
2005 dev->em28xx_read_reg = em28xx_read_reg;
2006 dev->em28xx_read_reg_req_len = em28xx_read_reg_req_len;
2007 dev->em28xx_write_regs_req = em28xx_write_regs_req;
2008 dev->em28xx_read_reg_req = em28xx_read_reg_req;
2009 dev->is_em2800 = em28xx_boards[dev->model].is_em2800;
2010
2011 em28xx_pre_card_setup(dev);
2012
2013 errCode = em28xx_config(dev);
2014 if (errCode) {
2015 em28xx_errdev("error configuring device\n");
2016 return -ENOMEM;
2017 }
2018
2019 /* register i2c bus */
2020 errCode = em28xx_i2c_register(dev);
2021 if (errCode < 0) {
2022 em28xx_errdev("%s: em28xx_i2c_register - errCode [%d]!\n",
2023 __func__, errCode);
2024 return errCode;
2025 }
2026
2027 /* Do board specific init and eeprom reading */
2028 em28xx_card_setup(dev);
2029
2030 /* Configure audio */
2031 errCode = em28xx_audio_analog_set(dev);
2032 if (errCode < 0) {
2033 em28xx_errdev("%s: em28xx_audio_analog_set - errCode [%d]!\n",
2034 __func__, errCode);
2035 return errCode;
2036 }
2037
2038 /* configure the device */
2039 em28xx_config_i2c(dev);
2040
2041 /* set default norm */
2042 dev->norm = em28xx_video_template.current_norm;
2043
2044 maxw = norm_maxw(dev);
2045 maxh = norm_maxh(dev);
2046
2047 /* set default image size */
2048 dev->width = maxw;
2049 dev->height = maxh;
2050 dev->interlaced = EM28XX_INTERLACED_DEFAULT;
2051 dev->hscale = 0;
2052 dev->vscale = 0;
2053 dev->ctl_input = 2;
2054
2055 errCode = em28xx_config(dev);
2056 if (errCode < 0) {
2057 em28xx_errdev("%s: em28xx_config - errCode [%d]!\n",
2058 __func__, errCode);
2059 return errCode;
2060 }
2061
2062 /* init video dma queues */
2063 INIT_LIST_HEAD(&dev->vidq.active);
2064 INIT_LIST_HEAD(&dev->vidq.queued);
2065
2066
2067 if (dev->has_msp34xx) {
2068 /* Send a reset to other chips via gpio */
2069 errCode = em28xx_write_regs_req(dev, 0x00, 0x08, "\xf7", 1);
2070 if (errCode < 0) {
2071 em28xx_errdev("%s: em28xx_write_regs_req - msp34xx(1) failed! errCode [%d]\n",
2072 __func__, errCode);
2073 return errCode;
2074 }
2075 msleep(3);
2076
2077 errCode = em28xx_write_regs_req(dev, 0x00, 0x08, "\xff", 1);
2078 if (errCode < 0) {
2079 em28xx_errdev("%s: em28xx_write_regs_req - msp34xx(2) failed! errCode [%d]\n",
2080 __func__, errCode);
2081 return errCode;
2082 }
2083 msleep(3);
2084 }
2085
2086 video_mux(dev, 0);
2087
2088 mutex_lock(&em28xx_devlist_mutex);
2089 list_add_tail(&dev->devlist, &em28xx_devlist);
2090 retval = register_analog_devices(dev);
2091 if (retval < 0) {
2092 em28xx_release_resources(dev);
2093 mutex_unlock(&em28xx_devlist_mutex);
2094 goto fail_reg_devices;
2095 }
2096
2097 mutex_lock(&em28xx_extension_devlist_lock);
2098 if (!list_empty(&em28xx_extension_devlist)) {
2099 list_for_each_entry(ops, &em28xx_extension_devlist, next) {
2100 if (ops->id)
2101 ops->init(dev);
2102 }
2103 }
2104 mutex_unlock(&em28xx_extension_devlist_lock);
2105 mutex_unlock(&em28xx_devlist_mutex);
2106
2107 return 0;
2108
2109fail_reg_devices:
2110 mutex_unlock(&dev->lock);
2111 return retval;
2112}
2113
2114#if defined(CONFIG_MODULES) && defined(MODULE)
2115static void request_module_async(struct work_struct *work)
2116{
2117 struct em28xx *dev = container_of(work,
2118 struct em28xx, request_module_wk);
2119
2120 if (dev->has_audio_class)
2121 request_module("snd-usb-audio");
2122 else
2123 request_module("em28xx-alsa");
2124
2125 if (dev->has_dvb)
2126 request_module("em28xx-dvb");
2127}
2128
2129static void request_modules(struct em28xx *dev)
2130{
2131 INIT_WORK(&dev->request_module_wk, request_module_async);
2132 schedule_work(&dev->request_module_wk);
2133}
2134#else
2135#define request_modules(dev)
2136#endif /* CONFIG_MODULES */
2137
2138/*
2139 * em28xx_usb_probe()
2140 * checks for supported devices
2141 */
2142static int em28xx_usb_probe(struct usb_interface *interface,
2143 const struct usb_device_id *id)
2144{
2145 const struct usb_endpoint_descriptor *endpoint;
2146 struct usb_device *udev;
2147 struct usb_interface *uif;
2148 struct em28xx *dev = NULL;
2149 int retval = -ENODEV;
2150 int i, nr, ifnum;
2151
2152 udev = usb_get_dev(interface_to_usbdev(interface));
2153 ifnum = interface->altsetting[0].desc.bInterfaceNumber;
2154
2155 /* Check to see next free device and mark as used */
2156 nr = find_first_zero_bit(&em28xx_devused, EM28XX_MAXBOARDS);
2157 em28xx_devused |= 1<<nr;
2158
2159 /* Don't register audio interfaces */
2160 if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
2161 em28xx_err(DRIVER_NAME " audio device (%04x:%04x): interface %i, class %i\n",
2162 udev->descriptor.idVendor,
2163 udev->descriptor.idProduct,
2164 ifnum,
2165 interface->altsetting[0].desc.bInterfaceClass);
2166
2167 em28xx_devused &= ~(1<<nr);
2168 return -ENODEV;
2169 }
2170
2171 em28xx_err(DRIVER_NAME " new video device (%04x:%04x): interface %i, class %i\n",
2172 udev->descriptor.idVendor,
2173 udev->descriptor.idProduct,
2174 ifnum,
2175 interface->altsetting[0].desc.bInterfaceClass);
2176
2177 endpoint = &interface->cur_altsetting->endpoint[1].desc;
2178
2179 /* check if the device has the iso in endpoint at the correct place */
2180 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
2181 USB_ENDPOINT_XFER_ISOC) {
2182 em28xx_err(DRIVER_NAME " probing error: endpoint is non-ISO endpoint!\n");
2183 em28xx_devused &= ~(1<<nr);
2184 return -ENODEV;
2185 }
2186 if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) {
2187 em28xx_err(DRIVER_NAME " probing error: endpoint is ISO OUT endpoint!\n");
2188 em28xx_devused &= ~(1<<nr);
2189 return -ENODEV;
2190 }
2191
2192 if (nr >= EM28XX_MAXBOARDS) {
2193 printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
2194 EM28XX_MAXBOARDS);
2195 em28xx_devused &= ~(1<<nr);
2196 return -ENOMEM;
2197 }
2198
2199 /* allocate memory for our device state and initialize it */
2200 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2201 if (dev == NULL) {
2202 em28xx_err(DRIVER_NAME ": out of memory!\n");
2203 em28xx_devused &= ~(1<<nr);
2204 return -ENOMEM;
2205 }
2206
2207 snprintf(dev->name, 29, "em28xx #%d", nr);
2208 dev->devno = nr;
2209 dev->model = id->driver_info;
2210 dev->alt = -1;
2211
2212 /* Checks if audio is provided by some interface */
2213 for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
2214 uif = udev->config->interface[i];
2215 if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
2216 dev->has_audio_class = 1;
2217 break;
2218 }
2219 }
2220
2221 printk(KERN_INFO DRIVER_NAME " %s usb audio class\n",
2222 dev->has_audio_class ? "Has" : "Doesn't have");
2223
2224 /* compute alternate max packet sizes */
2225 uif = udev->actconfig->interface[0];
2226
2227 dev->num_alt = uif->num_altsetting;
2228 em28xx_info("Alternate settings: %i\n", dev->num_alt);
2229/* dev->alt_max_pkt_size = kmalloc(sizeof(*dev->alt_max_pkt_size)* */
2230 dev->alt_max_pkt_size = kmalloc(32 * dev->num_alt, GFP_KERNEL);
2231
2232 if (dev->alt_max_pkt_size == NULL) {
2233 em28xx_errdev("out of memory!\n");
2234 em28xx_devused &= ~(1<<nr);
2235 kfree(dev);
2236 return -ENOMEM;
2237 }
2238
2239 for (i = 0; i < dev->num_alt ; i++) {
2240 u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
2241 wMaxPacketSize);
2242 dev->alt_max_pkt_size[i] =
2243 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
2244 em28xx_info("Alternate setting %i, max size= %i\n", i,
2245 dev->alt_max_pkt_size[i]);
2246 }
2247
2248 if ((card[nr] >= 0) && (card[nr] < em28xx_bcount))
2249 dev->model = card[nr];
2250
2251 /* allocate device struct */
2252 retval = em28xx_init_dev(&dev, udev, nr);
2253 if (retval) {
2254 em28xx_devused &= ~(1<<dev->devno);
2255 kfree(dev);
2256
2257 return retval;
2258 }
2259
2260 em28xx_info("Found %s\n", em28xx_boards[dev->model].name);
2261
2262 /* save our data pointer in this interface device */
2263 usb_set_intfdata(interface, dev);
2264
2265 request_modules(dev);
2266
2267 return 0;
2268}
2269
2270/*
2271 * em28xx_usb_disconnect()
2272 * called when the device gets diconencted
2273 * video device will be unregistered on v4l2_close in case it is still open
2274 */
2275static void em28xx_usb_disconnect(struct usb_interface *interface)
2276{
2277 struct em28xx *dev;
2278 struct em28xx_ops *ops = NULL;
2279
2280 dev = usb_get_intfdata(interface);
2281 usb_set_intfdata(interface, NULL);
2282
2283 if (!dev)
2284 return;
2285
2286 em28xx_info("disconnecting %s\n", dev->vdev->name);
2287
2288 /* wait until all current v4l2 io is finished then deallocate
2289 resources */
2290 mutex_lock(&dev->lock);
2291
2292 wake_up_interruptible_all(&dev->open);
2293
2294 if (dev->users) {
2295 em28xx_warn
2296 ("device /dev/video%d is open! Deregistration and memory "
2297 "deallocation are deferred on close.\n",
2298 dev->vdev->num);
2299
2300 dev->state |= DEV_MISCONFIGURED;
2301 em28xx_uninit_isoc(dev);
2302 dev->state |= DEV_DISCONNECTED;
2303 wake_up_interruptible(&dev->wait_frame);
2304 wake_up_interruptible(&dev->wait_stream);
2305 } else {
2306 dev->state |= DEV_DISCONNECTED;
2307 em28xx_release_resources(dev);
2308 }
2309 mutex_unlock(&dev->lock);
2310
2311 mutex_lock(&em28xx_extension_devlist_lock);
2312 if (!list_empty(&em28xx_extension_devlist)) {
2313 list_for_each_entry(ops, &em28xx_extension_devlist, next) {
2314 ops->fini(dev);
2315 }
2316 }
2317 mutex_unlock(&em28xx_extension_devlist_lock);
2318
2319 if (!dev->users) {
2320 kfree(dev->alt_max_pkt_size);
2321 kfree(dev);
2322 }
2323}
2324
2325static struct usb_driver em28xx_usb_driver = {
2326 .name = "em28xx",
2327 .probe = em28xx_usb_probe,
2328 .disconnect = em28xx_usb_disconnect,
2329 .id_table = em28xx_id_table,
2330};
2331
2332static int __init em28xx_module_init(void)
2333{
2334 int result;
2335
2336 printk(KERN_INFO DRIVER_NAME " v4l2 driver version %d.%d.%d loaded\n",
2337 (EM28XX_VERSION_CODE >> 16) & 0xff,
2338 (EM28XX_VERSION_CODE >> 8) & 0xff, EM28XX_VERSION_CODE & 0xff);
2339#ifdef SNAPSHOT
2340 printk(KERN_INFO DRIVER_NAME " snapshot date %04d-%02d-%02d\n",
2341 SNAPSHOT / 10000, (SNAPSHOT / 100) % 100, SNAPSHOT % 100);
2342#endif
2343
2344 /* register this driver with the USB subsystem */
2345 result = usb_register(&em28xx_usb_driver);
2346 if (result)
2347 em28xx_err(DRIVER_NAME
2348 " usb_register failed. Error number %d.\n", result);
2349
2350 return result;
2351}
2352
2353static void __exit em28xx_module_exit(void)
2354{
2355 /* deregister this driver with the USB subsystem */
2356 usb_deregister(&em28xx_usb_driver);
2357}
2358
2359module_init(em28xx_module_init);
2360module_exit(em28xx_module_exit);
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 5956e9b3062f..b5eddc26388e 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -67,7 +67,6 @@
67#define EM2820_BOARD_HERCULES_SMART_TV_USB2 26 67#define EM2820_BOARD_HERCULES_SMART_TV_USB2 26
68#define EM2820_BOARD_PINNACLE_USB_2_FM1216ME 27 68#define EM2820_BOARD_PINNACLE_USB_2_FM1216ME 27
69#define EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE 28 69#define EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE 28
70#define EM2820_BOARD_PINNACLE_DVC_100 29
71#define EM2820_BOARD_VIDEOLOGY_20K14XUSB 30 70#define EM2820_BOARD_VIDEOLOGY_20K14XUSB 30
72#define EM2821_BOARD_USBGEAR_VD204 31 71#define EM2821_BOARD_USBGEAR_VD204 31
73#define EM2821_BOARD_SUPERCOMP_USB_2 32 72#define EM2821_BOARD_SUPERCOMP_USB_2 32
@@ -97,6 +96,8 @@
97#define EM2882_BOARD_PINNACLE_HYBRID_PRO 56 96#define EM2882_BOARD_PINNACLE_HYBRID_PRO 56
98#define EM2883_BOARD_KWORLD_HYBRID_A316 57 97#define EM2883_BOARD_KWORLD_HYBRID_A316 57
99#define EM2820_BOARD_COMPRO_VIDEOMATE_FORYOU 58 98#define EM2820_BOARD_COMPRO_VIDEOMATE_FORYOU 58
99#define EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 60
100#define EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2 61
100 101
101/* Limits minimum and default number of buffers */ 102/* Limits minimum and default number of buffers */
102#define EM28XX_MIN_BUF 4 103#define EM28XX_MIN_BUF 4
@@ -159,7 +160,7 @@
159#define EM2800_I2C_WRITE_TIMEOUT 20 160#define EM2800_I2C_WRITE_TIMEOUT 20
160 161
161enum em28xx_mode { 162enum em28xx_mode {
162 EM28XX_MODE_UNDEFINED, 163 EM28XX_SUSPEND,
163 EM28XX_ANALOG_MODE, 164 EM28XX_ANALOG_MODE,
164 EM28XX_DIGITAL_MODE, 165 EM28XX_DIGITAL_MODE,
165}; 166};
@@ -207,9 +208,12 @@ struct em28xx_usb_isoc_ctl {
207 208
208}; 209};
209 210
211/* Struct to enumberate video formats */
210struct em28xx_fmt { 212struct em28xx_fmt {
211 char *name; 213 char *name;
212 u32 fourcc; /* v4l2 format id */ 214 u32 fourcc; /* v4l2 format id */
215 int depth;
216 int reg;
213}; 217};
214 218
215/* buffer for one video frame */ 219/* buffer for one video frame */
@@ -255,54 +259,105 @@ enum enum28xx_itype {
255 EM28XX_RADIO, 259 EM28XX_RADIO,
256}; 260};
257 261
262enum em28xx_ac97_mode {
263 EM28XX_NO_AC97 = 0,
264 EM28XX_AC97_EM202,
265 EM28XX_AC97_SIGMATEL,
266 EM28XX_AC97_OTHER,
267};
268
269struct em28xx_audio_mode {
270 enum em28xx_ac97_mode ac97;
271
272 u16 ac97_feat;
273 u32 ac97_vendor_id;
274
275 unsigned int has_audio:1;
276
277 unsigned int i2s_3rates:1;
278 unsigned int i2s_5rates:1;
279};
280
281/* em28xx has two audio inputs: tuner and line in.
282 However, on most devices, an auxiliary AC97 codec device is used.
283 The AC97 device may have several different inputs and outputs,
284 depending on their model. So, it is possible to use AC97 mixer to
285 address more than two different entries.
286 */
258enum em28xx_amux { 287enum em28xx_amux {
259 EM28XX_AMUX_VIDEO, 288 /* This is the only entry for em28xx tuner input */
260 EM28XX_AMUX_LINE_IN, 289 EM28XX_AMUX_VIDEO, /* em28xx tuner, AC97 mixer Video */
261 EM28XX_AMUX_AC97_VIDEO, 290
262 EM28XX_AMUX_AC97_LINE_IN, 291 EM28XX_AMUX_LINE_IN, /* AC97 mixer Line In */
292
293 /* Some less-common mixer setups */
294 EM28XX_AMUX_VIDEO2, /* em28xx Line in, AC97 mixer Video */
295 EM28XX_AMUX_PHONE,
296 EM28XX_AMUX_MIC,
297 EM28XX_AMUX_CD,
298 EM28XX_AMUX_AUX,
299 EM28XX_AMUX_PCM_OUT,
300};
301
302enum em28xx_aout {
303 EM28XX_AOUT_MASTER = 1 << 0,
304 EM28XX_AOUT_LINE = 1 << 1,
305 EM28XX_AOUT_MONO = 1 << 2,
306 EM28XX_AOUT_LFE = 1 << 3,
307 EM28XX_AOUT_SURR = 1 << 4,
308};
309
310struct em28xx_reg_seq {
311 int reg;
312 unsigned char val, mask;
313 int sleep;
263}; 314};
264 315
265struct em28xx_input { 316struct em28xx_input {
266 enum enum28xx_itype type; 317 enum enum28xx_itype type;
267 unsigned int vmux; 318 unsigned int vmux;
268 enum em28xx_amux amux; 319 enum em28xx_amux amux;
320 enum em28xx_aout aout;
321 struct em28xx_reg_seq *gpio;
269}; 322};
270 323
271#define INPUT(nr) (&em28xx_boards[dev->model].input[nr]) 324#define INPUT(nr) (&em28xx_boards[dev->model].input[nr])
272 325
273enum em28xx_decoder { 326enum em28xx_decoder {
327 EM28XX_NODECODER,
274 EM28XX_TVP5150, 328 EM28XX_TVP5150,
275 EM28XX_SAA7113, 329 EM28XX_SAA711X,
276 EM28XX_SAA7114
277};
278
279struct em28xx_reg_seq {
280 int reg;
281 unsigned char val, mask;
282 int sleep;
283}; 330};
284 331
285struct em28xx_board { 332struct em28xx_board {
286 char *name; 333 char *name;
287 int vchannels; 334 int vchannels;
288 int tuner_type; 335 int tuner_type;
336 int tuner_addr;
289 337
290 /* i2c flags */ 338 /* i2c flags */
291 unsigned int tda9887_conf; 339 unsigned int tda9887_conf;
292 340
341 /* GPIO sequences */
342 struct em28xx_reg_seq *dvb_gpio;
343 struct em28xx_reg_seq *suspend_gpio;
344 struct em28xx_reg_seq *tuner_gpio;
345
293 unsigned int is_em2800:1; 346 unsigned int is_em2800:1;
294 unsigned int has_msp34xx:1; 347 unsigned int has_msp34xx:1;
295 unsigned int mts_firmware:1; 348 unsigned int mts_firmware:1;
296 unsigned int has_12mhz_i2s:1;
297 unsigned int max_range_640_480:1; 349 unsigned int max_range_640_480:1;
298 unsigned int has_dvb:1; 350 unsigned int has_dvb:1;
299 unsigned int has_snapshot_button:1; 351 unsigned int has_snapshot_button:1;
300 unsigned int valid:1; 352 unsigned int valid:1;
301 353
354 unsigned char xclk, i2c_speed;
355
302 enum em28xx_decoder decoder; 356 enum em28xx_decoder decoder;
303 357
304 struct em28xx_input input[MAX_EM28XX_INPUT]; 358 struct em28xx_input input[MAX_EM28XX_INPUT];
305 struct em28xx_input radio; 359 struct em28xx_input radio;
360 IR_KEYTAB_TYPE *ir_codes;
306}; 361};
307 362
308struct em28xx_eeprom { 363struct em28xx_eeprom {
@@ -369,32 +424,26 @@ struct em28xx {
369 char name[30]; /* name (including minor) of the device */ 424 char name[30]; /* name (including minor) of the device */
370 int model; /* index in the device_data struct */ 425 int model; /* index in the device_data struct */
371 int devno; /* marks the number of this device */ 426 int devno; /* marks the number of this device */
372 unsigned int is_em2800:1; 427 enum em28xx_chip_id chip_id;
373 unsigned int has_msp34xx:1; 428
374 unsigned int has_tda9887:1; 429 struct em28xx_board board;
430
375 unsigned int stream_on:1; /* Locks streams */ 431 unsigned int stream_on:1; /* Locks streams */
376 unsigned int has_audio_class:1; 432 unsigned int has_audio_class:1;
377 unsigned int has_12mhz_i2s:1; 433 unsigned int has_alsa_audio:1;
378 unsigned int max_range_640_480:1;
379 unsigned int has_dvb:1;
380 unsigned int has_snapshot_button:1;
381 unsigned int valid:1; /* report for validated boards */
382 434
383 /* Some older em28xx chips needs a waiting time after writing */ 435 struct em28xx_fmt *format;
384 unsigned int wait_after_write;
385 436
386 /* GPIO sequences for analog and digital mode */ 437 struct em28xx_IR *ir;
387 struct em28xx_reg_seq *analog_gpio, *digital_gpio;
388 438
389 /* GPIO sequences for tuner callbacks */ 439 /* Some older em28xx chips needs a waiting time after writing */
390 struct em28xx_reg_seq *tun_analog_gpio, *tun_digital_gpio; 440 unsigned int wait_after_write;
391 441
392 int video_inputs; /* number of video inputs */
393 struct list_head devlist; 442 struct list_head devlist;
394 443
395 u32 i2s_speed; /* I2S speed for audio digital stream */ 444 u32 i2s_speed; /* I2S speed for audio digital stream */
396 445
397 enum em28xx_decoder decoder; 446 struct em28xx_audio_mode audio_mode;
398 447
399 int tuner_type; /* type of the tuner */ 448 int tuner_type; /* type of the tuner */
400 int tuner_addr; /* tuner address */ 449 int tuner_addr; /* tuner address */
@@ -409,6 +458,7 @@ struct em28xx {
409 int ctl_freq; /* selected frequency */ 458 int ctl_freq; /* selected frequency */
410 unsigned int ctl_input; /* selected input */ 459 unsigned int ctl_input; /* selected input */
411 unsigned int ctl_ainput;/* selected audio input */ 460 unsigned int ctl_ainput;/* selected audio input */
461 unsigned int ctl_aoutput;/* selected audio output */
412 int mute; 462 int mute;
413 int volume; 463 int volume;
414 /* frame properties */ 464 /* frame properties */
@@ -469,6 +519,9 @@ struct em28xx {
469 519
470 enum em28xx_mode mode; 520 enum em28xx_mode mode;
471 521
522 /* register numbers for GPO/GPIO registers */
523 u16 reg_gpo_num, reg_gpio_num;
524
472 /* Caches GPO and GPIO registers */ 525 /* Caches GPO and GPIO registers */
473 unsigned char reg_gpo, reg_gpio; 526 unsigned char reg_gpo, reg_gpio;
474 527
@@ -508,11 +561,17 @@ int em28xx_read_reg(struct em28xx *dev, u16 reg);
508int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf, 561int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
509 int len); 562 int len);
510int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len); 563int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len);
564int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val);
565
566int em28xx_read_ac97(struct em28xx *dev, u8 reg);
567int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val);
568
511int em28xx_audio_analog_set(struct em28xx *dev); 569int em28xx_audio_analog_set(struct em28xx *dev);
570int em28xx_audio_setup(struct em28xx *dev);
512 571
513int em28xx_colorlevels_set_default(struct em28xx *dev); 572int em28xx_colorlevels_set_default(struct em28xx *dev);
514int em28xx_capture_start(struct em28xx *dev, int start); 573int em28xx_capture_start(struct em28xx *dev, int start);
515int em28xx_outfmt_set_yuv422(struct em28xx *dev); 574int em28xx_set_outfmt(struct em28xx *dev);
516int em28xx_resolution_set(struct em28xx *dev); 575int em28xx_resolution_set(struct em28xx *dev);
517int em28xx_set_alternate(struct em28xx *dev); 576int em28xx_set_alternate(struct em28xx *dev);
518int em28xx_init_isoc(struct em28xx *dev, int max_packets, 577int em28xx_init_isoc(struct em28xx *dev, int max_packets,
@@ -521,10 +580,20 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
521void em28xx_uninit_isoc(struct em28xx *dev); 580void em28xx_uninit_isoc(struct em28xx *dev);
522int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode); 581int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode);
523int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio); 582int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio);
524 583void em28xx_wake_i2c(struct em28xx *dev);
525/* Provided by em28xx-video.c */ 584void em28xx_remove_from_devlist(struct em28xx *dev);
585void em28xx_add_into_devlist(struct em28xx *dev);
586struct em28xx *em28xx_get_device(struct inode *inode,
587 enum v4l2_buf_type *fh_type,
588 int *has_radio);
526int em28xx_register_extension(struct em28xx_ops *dev); 589int em28xx_register_extension(struct em28xx_ops *dev);
527void em28xx_unregister_extension(struct em28xx_ops *dev); 590void em28xx_unregister_extension(struct em28xx_ops *dev);
591void em28xx_init_extension(struct em28xx *dev);
592void em28xx_close_extension(struct em28xx *dev);
593
594/* Provided by em28xx-video.c */
595int em28xx_register_analog_devices(struct em28xx *dev);
596void em28xx_release_analog_resources(struct em28xx *dev);
528 597
529/* Provided by em28xx-cards.c */ 598/* Provided by em28xx-cards.c */
530extern int em2800_variant_detect(struct usb_device *udev, int model); 599extern int em2800_variant_detect(struct usb_device *udev, int model);
@@ -535,9 +604,9 @@ extern struct usb_device_id em28xx_id_table[];
535extern const unsigned int em28xx_bcount; 604extern const unsigned int em28xx_bcount;
536void em28xx_set_ir(struct em28xx *dev, struct IR_i2c *ir); 605void em28xx_set_ir(struct em28xx *dev, struct IR_i2c *ir);
537int em28xx_tuner_callback(void *ptr, int component, int command, int arg); 606int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
607void em28xx_release_resources(struct em28xx *dev);
538 608
539/* Provided by em28xx-input.c */ 609/* Provided by em28xx-input.c */
540/* TODO: Check if the standard get_key handlers on ir-common can be used */
541int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw); 610int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
542int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw); 611int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
543int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key, 612int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
@@ -545,6 +614,9 @@ int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
545void em28xx_register_snapshot_button(struct em28xx *dev); 614void em28xx_register_snapshot_button(struct em28xx *dev);
546void em28xx_deregister_snapshot_button(struct em28xx *dev); 615void em28xx_deregister_snapshot_button(struct em28xx *dev);
547 616
617int em28xx_ir_init(struct em28xx *dev);
618int em28xx_ir_fini(struct em28xx *dev);
619
548/* printk macros */ 620/* printk macros */
549 621
550#define em28xx_err(fmt, arg...) do {\ 622#define em28xx_err(fmt, arg...) do {\
@@ -564,7 +636,7 @@ void em28xx_deregister_snapshot_button(struct em28xx *dev);
564static inline int em28xx_compression_disable(struct em28xx *dev) 636static inline int em28xx_compression_disable(struct em28xx *dev)
565{ 637{
566 /* side effect of disabling scaler and mixer */ 638 /* side effect of disabling scaler and mixer */
567 return em28xx_write_regs(dev, EM28XX_R26_COMPR, "\x00", 1); 639 return em28xx_write_reg(dev, EM28XX_R26_COMPR, 0x00);
568} 640}
569 641
570static inline int em28xx_contrast_get(struct em28xx *dev) 642static inline int em28xx_contrast_get(struct em28xx *dev)
@@ -636,7 +708,7 @@ static inline int em28xx_gamma_set(struct em28xx *dev, s32 val)
636/*FIXME: maxw should be dependent of alt mode */ 708/*FIXME: maxw should be dependent of alt mode */
637static inline unsigned int norm_maxw(struct em28xx *dev) 709static inline unsigned int norm_maxw(struct em28xx *dev)
638{ 710{
639 if (dev->max_range_640_480) 711 if (dev->board.max_range_640_480)
640 return 640; 712 return 640;
641 else 713 else
642 return 720; 714 return 720;
@@ -644,7 +716,7 @@ static inline unsigned int norm_maxw(struct em28xx *dev)
644 716
645static inline unsigned int norm_maxh(struct em28xx *dev) 717static inline unsigned int norm_maxh(struct em28xx *dev)
646{ 718{
647 if (dev->max_range_640_480) 719 if (dev->board.max_range_640_480)
648 return 480; 720 return 480;
649 else 721 else
650 return (dev->norm & V4L2_STD_625_50) ? 576 : 480; 722 return (dev->norm & V4L2_STD_625_50) ? 576 : 480;
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 9d0ef96c23ff..83c07112c59d 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -1581,7 +1581,7 @@ et61x251_vidioc_querycap(struct et61x251_device* cam, void __user * arg)
1581 1581
1582 strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card)); 1582 strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card));
1583 if (usb_make_path(cam->usbdev, cap.bus_info, sizeof(cap.bus_info)) < 0) 1583 if (usb_make_path(cam->usbdev, cap.bus_info, sizeof(cap.bus_info)) < 0)
1584 strlcpy(cap.bus_info, cam->usbdev->dev.bus_id, 1584 strlcpy(cap.bus_info, dev_name(&cam->usbdev->dev),
1585 sizeof(cap.bus_info)); 1585 sizeof(cap.bus_info));
1586 1586
1587 if (copy_to_user(arg, &cap, sizeof(cap))) 1587 if (copy_to_user(arg, &cap, sizeof(cap)))
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 6b557c057fac..ee6a691dff22 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -12,12 +12,13 @@ menuconfig USB_GSPCA
12 "Video For Linux" to use this driver. 12 "Video For Linux" to use this driver.
13 13
14 To compile this driver as modules, choose M here: the 14 To compile this driver as modules, choose M here: the
15 modules will be called gspca_main. 15 module will be called gspca_main.
16 16
17 17
18if USB_GSPCA && VIDEO_V4L2 18if USB_GSPCA && VIDEO_V4L2
19 19
20source "drivers/media/video/gspca/m5602/Kconfig" 20source "drivers/media/video/gspca/m5602/Kconfig"
21source "drivers/media/video/gspca/stv06xx/Kconfig"
21 22
22config USB_GSPCA_CONEX 23config USB_GSPCA_CONEX
23 tristate "Conexant Camera Driver" 24 tristate "Conexant Camera Driver"
@@ -64,6 +65,16 @@ config USB_GSPCA_OV519
64 To compile this driver as a module, choose M here: the 65 To compile this driver as a module, choose M here: the
65 module will be called gspca_ov519. 66 module will be called gspca_ov519.
66 67
68config USB_GSPCA_OV534
69 tristate "OV534 USB Camera Driver"
70 depends on VIDEO_V4L2 && USB_GSPCA
71 help
72 Say Y here if you want support for cameras based on the OV534 chip.
73 (e.g. Sony Playstation EYE)
74
75 To compile this driver as a module, choose M here: the
76 module will be called gspca_ov534.
77
67config USB_GSPCA_PAC207 78config USB_GSPCA_PAC207
68 tristate "Pixart PAC207 USB Camera Driver" 79 tristate "Pixart PAC207 USB Camera Driver"
69 depends on VIDEO_V4L2 && USB_GSPCA 80 depends on VIDEO_V4L2 && USB_GSPCA
@@ -83,10 +94,11 @@ config USB_GSPCA_PAC7311
83 module will be called gspca_pac7311. 94 module will be called gspca_pac7311.
84 95
85config USB_GSPCA_SONIXB 96config USB_GSPCA_SONIXB
86 tristate "SN9C102 USB Camera Driver" 97 tristate "SONIX Bayer USB Camera Driver"
87 depends on VIDEO_V4L2 && USB_GSPCA 98 depends on VIDEO_V4L2 && USB_GSPCA
88 help 99 help
89 Say Y here if you want support for cameras based on the SONIXB chip. 100 Say Y here if you want support for cameras based on the Sonix
101 chips with Bayer format (SN9C101, SN9C102 and SN9C103).
90 102
91 To compile this driver as a module, choose M here: the 103 To compile this driver as a module, choose M here: the
92 module will be called gspca_sonixb. 104 module will be called gspca_sonixb.
@@ -95,7 +107,8 @@ config USB_GSPCA_SONIXJ
95 tristate "SONIX JPEG USB Camera Driver" 107 tristate "SONIX JPEG USB Camera Driver"
96 depends on VIDEO_V4L2 && USB_GSPCA 108 depends on VIDEO_V4L2 && USB_GSPCA
97 help 109 help
98 Say Y here if you want support for cameras based on the SONIXJ chip. 110 Say Y here if you want support for cameras based on the Sonix
111 chips with JPEG format (SN9C102P, SN9C105 and >= SN9C110).
99 112
100 To compile this driver as a module, choose M here: the 113 To compile this driver as a module, choose M here: the
101 module will be called gspca_sonixj 114 module will be called gspca_sonixj
@@ -171,7 +184,7 @@ config USB_GSPCA_SUNPLUS
171 SPCA504(abc) SPCA533 SPCA536 chips. 184 SPCA504(abc) SPCA533 SPCA536 chips.
172 185
173 To compile this driver as a module, choose M here: the 186 To compile this driver as a module, choose M here: the
174 module will be called gspca_spca5xx. 187 module will be called gspca_sunplus.
175 188
176config USB_GSPCA_T613 189config USB_GSPCA_T613
177 tristate "T613 (JPEG Compliance) USB Camera Driver" 190 tristate "T613 (JPEG Compliance) USB Camera Driver"
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile
index 22734f5a6c32..bd8d9ee40504 100644
--- a/drivers/media/video/gspca/Makefile
+++ b/drivers/media/video/gspca/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_USB_GSPCA_ETOMS) += gspca_etoms.o
4obj-$(CONFIG_USB_GSPCA_FINEPIX) += gspca_finepix.o 4obj-$(CONFIG_USB_GSPCA_FINEPIX) += gspca_finepix.o
5obj-$(CONFIG_USB_GSPCA_MARS) += gspca_mars.o 5obj-$(CONFIG_USB_GSPCA_MARS) += gspca_mars.o
6obj-$(CONFIG_USB_GSPCA_OV519) += gspca_ov519.o 6obj-$(CONFIG_USB_GSPCA_OV519) += gspca_ov519.o
7obj-$(CONFIG_USB_GSPCA_OV534) += gspca_ov534.o
7obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o 8obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o
8obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o 9obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o
9obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o 10obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o
@@ -27,6 +28,7 @@ gspca_etoms-objs := etoms.o
27gspca_finepix-objs := finepix.o 28gspca_finepix-objs := finepix.o
28gspca_mars-objs := mars.o 29gspca_mars-objs := mars.o
29gspca_ov519-objs := ov519.o 30gspca_ov519-objs := ov519.o
31gspca_ov534-objs := ov534.o
30gspca_pac207-objs := pac207.o 32gspca_pac207-objs := pac207.o
31gspca_pac7311-objs := pac7311.o 33gspca_pac7311-objs := pac7311.o
32gspca_sonixb-objs := sonixb.o 34gspca_sonixb-objs := sonixb.o
@@ -45,4 +47,4 @@ gspca_vc032x-objs := vc032x.o
45gspca_zc3xx-objs := zc3xx.o 47gspca_zc3xx-objs := zc3xx.o
46 48
47obj-$(CONFIG_USB_M5602) += m5602/ 49obj-$(CONFIG_USB_M5602) += m5602/
48 50obj-$(CONFIG_USB_STV06XX) += stv06xx/
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index de28354ea5ba..1753f5bb3544 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -93,7 +93,7 @@ static struct ctrl sd_ctrls[] = {
93 }, 93 },
94}; 94};
95 95
96static struct v4l2_pix_format vga_mode[] = { 96static const struct v4l2_pix_format vga_mode[] = {
97 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 97 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
98 .bytesperline = 176, 98 .bytesperline = 176,
99 .sizeimage = 176 * 144 * 3 / 8 + 590, 99 .sizeimage = 176 * 144 * 3 / 8 + 590,
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index 3be30b420a26..f3cd8ff5cc92 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -112,7 +112,7 @@ static struct ctrl sd_ctrls[] = {
112 }, 112 },
113}; 113};
114 114
115static struct v4l2_pix_format vga_mode[] = { 115static const struct v4l2_pix_format vga_mode[] = {
116 {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, 116 {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
117 .bytesperline = 320, 117 .bytesperline = 320,
118 .sizeimage = 320 * 240, 118 .sizeimage = 320 * 240,
@@ -125,7 +125,7 @@ static struct v4l2_pix_format vga_mode[] = {
125 .priv = 0}, */ 125 .priv = 0}, */
126}; 126};
127 127
128static struct v4l2_pix_format sif_mode[] = { 128static const struct v4l2_pix_format sif_mode[] = {
129 {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, 129 {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
130 .bytesperline = 176, 130 .bytesperline = 176,
131 .sizeimage = 176 * 144, 131 .sizeimage = 176 * 144,
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index 607942fd7970..afc8b2dd307b 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -72,7 +72,7 @@ struct usb_fpix {
72} 72}
73 73
74/* These cameras only support 320x200. */ 74/* These cameras only support 320x200. */
75static struct v4l2_pix_format fpix_mode[1] = { 75static const struct v4l2_pix_format fpix_mode[1] = {
76 { 320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 76 { 320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
77 .bytesperline = 320, 77 .bytesperline = 320,
78 .sizeimage = 320 * 240 * 3 / 8 + 590, 78 .sizeimage = 320 * 240 * 3 / 8 + 590,
@@ -314,9 +314,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
314 int ret; 314 int ret;
315 int size_ret; 315 int size_ret;
316 316
317 /* Reset bulk in endpoint */
318 usb_clear_halt(gspca_dev->dev, gspca_dev->cam.epaddr);
319
320 /* Init the device */ 317 /* Init the device */
321 memset(gspca_dev->usb_buf, 0, 12); 318 memset(gspca_dev->usb_buf, 0, 12);
322 gspca_dev->usb_buf[0] = 0xc6; 319 gspca_dev->usb_buf[0] = 0xc6;
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 02a6e9ef0337..8b9f3bde5740 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -30,7 +30,6 @@
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/pagemap.h> 31#include <linux/pagemap.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/kref.h>
34#include <asm/page.h> 33#include <asm/page.h>
35#include <linux/uaccess.h> 34#include <linux/uaccess.h>
36#include <linux/jiffies.h> 35#include <linux/jiffies.h>
@@ -45,7 +44,7 @@ MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
45MODULE_DESCRIPTION("GSPCA USB Camera Driver"); 44MODULE_DESCRIPTION("GSPCA USB Camera Driver");
46MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
47 46
48#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 3, 0) 47#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 4, 0)
49 48
50static int video_nr = -1; 49static int video_nr = -1;
51 50
@@ -150,8 +149,11 @@ static void fill_frame(struct gspca_dev *gspca_dev,
150 149
151 /* check the packet status and length */ 150 /* check the packet status and length */
152 len = urb->iso_frame_desc[i].actual_length; 151 len = urb->iso_frame_desc[i].actual_length;
153 if (len == 0) 152 if (len == 0) {
153 if (gspca_dev->empty_packet == 0)
154 gspca_dev->empty_packet = 1;
154 continue; 155 continue;
156 }
155 st = urb->iso_frame_desc[i].status; 157 st = urb->iso_frame_desc[i].status;
156 if (st) { 158 if (st) {
157 PDEBUG(D_ERR, 159 PDEBUG(D_ERR,
@@ -170,7 +172,6 @@ static void fill_frame(struct gspca_dev *gspca_dev,
170 } 172 }
171 173
172 /* resubmit the URB */ 174 /* resubmit the URB */
173 urb->status = 0;
174 st = usb_submit_urb(urb, GFP_ATOMIC); 175 st = usb_submit_urb(urb, GFP_ATOMIC);
175 if (st < 0) 176 if (st < 0)
176 PDEBUG(D_ERR|D_PACK, "usb_submit_urb() ret %d", st); 177 PDEBUG(D_ERR|D_PACK, "usb_submit_urb() ret %d", st);
@@ -200,11 +201,18 @@ static void bulk_irq(struct urb *urb
200{ 201{
201 struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context; 202 struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
202 struct gspca_frame *frame; 203 struct gspca_frame *frame;
204 int st;
203 205
204 PDEBUG(D_PACK, "bulk irq"); 206 PDEBUG(D_PACK, "bulk irq");
205 if (!gspca_dev->streaming) 207 if (!gspca_dev->streaming)
206 return; 208 return;
207 if (urb->status != 0 && urb->status != -ECONNRESET) { 209 switch (urb->status) {
210 case 0:
211 break;
212 case -ECONNRESET:
213 urb->status = 0;
214 break;
215 default:
208#ifdef CONFIG_PM 216#ifdef CONFIG_PM
209 if (!gspca_dev->frozen) 217 if (!gspca_dev->frozen)
210#endif 218#endif
@@ -223,6 +231,13 @@ static void bulk_irq(struct urb *urb
223 urb->transfer_buffer, 231 urb->transfer_buffer,
224 urb->actual_length); 232 urb->actual_length);
225 } 233 }
234
235 /* resubmit the URB */
236 if (gspca_dev->cam.bulk_nurbs != 0) {
237 st = usb_submit_urb(urb, GFP_ATOMIC);
238 if (st < 0)
239 PDEBUG(D_ERR|D_PACK, "usb_submit_urb() ret %d", st);
240 }
226} 241}
227 242
228/* 243/*
@@ -285,7 +300,6 @@ struct gspca_frame *gspca_frame_add(struct gspca_dev *gspca_dev,
285 frame->v4l2_buf.bytesused = frame->data_end - frame->data; 300 frame->v4l2_buf.bytesused = frame->data_end - frame->data;
286 frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_QUEUED; 301 frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_QUEUED;
287 frame->v4l2_buf.flags |= V4L2_BUF_FLAG_DONE; 302 frame->v4l2_buf.flags |= V4L2_BUF_FLAG_DONE;
288 atomic_inc(&gspca_dev->nevent);
289 wake_up_interruptible(&gspca_dev->wq); /* event = new frame */ 303 wake_up_interruptible(&gspca_dev->wq); /* event = new frame */
290 i = (gspca_dev->fr_i + 1) % gspca_dev->nframes; 304 i = (gspca_dev->fr_i + 1) % gspca_dev->nframes;
291 gspca_dev->fr_i = i; 305 gspca_dev->fr_i = i;
@@ -379,7 +393,6 @@ static int frame_alloc(struct gspca_dev *gspca_dev,
379 gspca_dev->fr_i = gspca_dev->fr_o = gspca_dev->fr_q = 0; 393 gspca_dev->fr_i = gspca_dev->fr_o = gspca_dev->fr_q = 0;
380 gspca_dev->last_packet_type = DISCARD_PACKET; 394 gspca_dev->last_packet_type = DISCARD_PACKET;
381 gspca_dev->sequence = 0; 395 gspca_dev->sequence = 0;
382 atomic_set(&gspca_dev->nevent, 0);
383 return 0; 396 return 0;
384} 397}
385 398
@@ -520,11 +533,14 @@ static int create_urbs(struct gspca_dev *gspca_dev,
520 nurbs = DEF_NURBS; 533 nurbs = DEF_NURBS;
521 } else { /* bulk */ 534 } else { /* bulk */
522 npkt = 0; 535 npkt = 0;
523 bsize = gspca_dev->cam. bulk_size; 536 bsize = gspca_dev->cam.bulk_size;
524 if (bsize == 0) 537 if (bsize == 0)
525 bsize = psize; 538 bsize = psize;
526 PDEBUG(D_STREAM, "bulk bsize:%d", bsize); 539 PDEBUG(D_STREAM, "bulk bsize:%d", bsize);
527 nurbs = 1; 540 if (gspca_dev->cam.bulk_nurbs != 0)
541 nurbs = gspca_dev->cam.bulk_nurbs;
542 else
543 nurbs = 1;
528 } 544 }
529 545
530 gspca_dev->nurbs = nurbs; 546 gspca_dev->nurbs = nurbs;
@@ -597,6 +613,12 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
597 if (ret < 0) 613 if (ret < 0)
598 goto out; 614 goto out;
599 615
616 /* clear the bulk endpoint */
617 if (gspca_dev->alt == 0) /* if bulk transfer */
618 usb_clear_halt(gspca_dev->dev,
619 usb_rcvintpipe(gspca_dev->dev,
620 gspca_dev->cam.epaddr));
621
600 /* start the cam */ 622 /* start the cam */
601 ret = gspca_dev->sd_desc->start(gspca_dev); 623 ret = gspca_dev->sd_desc->start(gspca_dev);
602 if (ret < 0) { 624 if (ret < 0) {
@@ -604,10 +626,9 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
604 goto out; 626 goto out;
605 } 627 }
606 gspca_dev->streaming = 1; 628 gspca_dev->streaming = 1;
607 atomic_set(&gspca_dev->nevent, 0);
608 629
609 /* bulk transfers are started by the subdriver */ 630 /* some bulk transfers are started by the subdriver */
610 if (gspca_dev->alt == 0) 631 if (gspca_dev->alt == 0 && gspca_dev->cam.bulk_nurbs == 0)
611 break; 632 break;
612 633
613 /* submit the URBs */ 634 /* submit the URBs */
@@ -618,8 +639,11 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
618 "usb_submit_urb [%d] err %d", n, ret); 639 "usb_submit_urb [%d] err %d", n, ret);
619 gspca_dev->streaming = 0; 640 gspca_dev->streaming = 0;
620 destroy_urbs(gspca_dev); 641 destroy_urbs(gspca_dev);
621 if (ret == -ENOSPC) 642 if (ret == -ENOSPC) {
643 msleep(20); /* wait for kill
644 * complete */
622 break; /* try the previous alt */ 645 break; /* try the previous alt */
646 }
623 goto out; 647 goto out;
624 } 648 }
625 } 649 }
@@ -637,7 +661,7 @@ static int gspca_set_alt0(struct gspca_dev *gspca_dev)
637 661
638 ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0); 662 ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
639 if (ret < 0) 663 if (ret < 0)
640 PDEBUG(D_ERR|D_STREAM, "set interface 0 err %d", ret); 664 PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret);
641 return ret; 665 return ret;
642} 666}
643 667
@@ -645,7 +669,6 @@ static int gspca_set_alt0(struct gspca_dev *gspca_dev)
645static void gspca_stream_off(struct gspca_dev *gspca_dev) 669static void gspca_stream_off(struct gspca_dev *gspca_dev)
646{ 670{
647 gspca_dev->streaming = 0; 671 gspca_dev->streaming = 0;
648 atomic_set(&gspca_dev->nevent, 0);
649 if (gspca_dev->present 672 if (gspca_dev->present
650 && gspca_dev->sd_desc->stopN) 673 && gspca_dev->sd_desc->stopN)
651 gspca_dev->sd_desc->stopN(gspca_dev); 674 gspca_dev->sd_desc->stopN(gspca_dev);
@@ -727,7 +750,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
727 if (fmtdesc->index == index) 750 if (fmtdesc->index == index)
728 break; /* new format */ 751 break; /* new format */
729 index++; 752 index++;
730 if (index >= sizeof fmt_tb / sizeof fmt_tb[0]) 753 if (index >= ARRAY_SIZE(fmt_tb))
731 return -EINVAL; 754 return -EINVAL;
732 } 755 }
733 } 756 }
@@ -752,8 +775,6 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
752 struct gspca_dev *gspca_dev = priv; 775 struct gspca_dev *gspca_dev = priv;
753 int mode; 776 int mode;
754 777
755 if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
756 return -EINVAL;
757 mode = gspca_dev->curr_mode; 778 mode = gspca_dev->curr_mode;
758 memcpy(&fmt->fmt.pix, &gspca_dev->cam.cam_mode[mode], 779 memcpy(&fmt->fmt.pix, &gspca_dev->cam.cam_mode[mode],
759 sizeof fmt->fmt.pix); 780 sizeof fmt->fmt.pix);
@@ -765,8 +786,6 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
765{ 786{
766 int w, h, mode, mode2; 787 int w, h, mode, mode2;
767 788
768 if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
769 return -EINVAL;
770 w = fmt->fmt.pix.width; 789 w = fmt->fmt.pix.width;
771 h = fmt->fmt.pix.height; 790 h = fmt->fmt.pix.height;
772 791
@@ -846,11 +865,11 @@ out:
846 return ret; 865 return ret;
847} 866}
848 867
849static void gspca_delete(struct kref *kref) 868static void gspca_release(struct video_device *vfd)
850{ 869{
851 struct gspca_dev *gspca_dev = container_of(kref, struct gspca_dev, kref); 870 struct gspca_dev *gspca_dev = container_of(vfd, struct gspca_dev, vdev);
852 871
853 PDEBUG(D_STREAM, "device deleted"); 872 PDEBUG(D_STREAM, "device released");
854 873
855 kfree(gspca_dev->usb_buf); 874 kfree(gspca_dev->usb_buf);
856 kfree(gspca_dev); 875 kfree(gspca_dev);
@@ -862,7 +881,7 @@ static int dev_open(struct inode *inode, struct file *file)
862 int ret; 881 int ret;
863 882
864 PDEBUG(D_STREAM, "%s open", current->comm); 883 PDEBUG(D_STREAM, "%s open", current->comm);
865 gspca_dev = video_drvdata(file); 884 gspca_dev = (struct gspca_dev *) video_devdata(file);
866 if (mutex_lock_interruptible(&gspca_dev->queue_lock)) 885 if (mutex_lock_interruptible(&gspca_dev->queue_lock))
867 return -ERESTARTSYS; 886 return -ERESTARTSYS;
868 if (!gspca_dev->present) { 887 if (!gspca_dev->present) {
@@ -883,17 +902,14 @@ static int dev_open(struct inode *inode, struct file *file)
883 902
884 gspca_dev->users++; 903 gspca_dev->users++;
885 904
886 /* one more user */
887 kref_get(&gspca_dev->kref);
888
889 file->private_data = gspca_dev; 905 file->private_data = gspca_dev;
890#ifdef GSPCA_DEBUG 906#ifdef GSPCA_DEBUG
891 /* activate the v4l2 debug */ 907 /* activate the v4l2 debug */
892 if (gspca_debug & D_V4L2) 908 if (gspca_debug & D_V4L2)
893 gspca_dev->vdev->debug |= V4L2_DEBUG_IOCTL 909 gspca_dev->vdev.debug |= V4L2_DEBUG_IOCTL
894 | V4L2_DEBUG_IOCTL_ARG; 910 | V4L2_DEBUG_IOCTL_ARG;
895 else 911 else
896 gspca_dev->vdev->debug &= ~(V4L2_DEBUG_IOCTL 912 gspca_dev->vdev.debug &= ~(V4L2_DEBUG_IOCTL
897 | V4L2_DEBUG_IOCTL_ARG); 913 | V4L2_DEBUG_IOCTL_ARG);
898#endif 914#endif
899 ret = 0; 915 ret = 0;
@@ -932,8 +948,6 @@ static int dev_close(struct inode *inode, struct file *file)
932 948
933 PDEBUG(D_STREAM, "close done"); 949 PDEBUG(D_STREAM, "close done");
934 950
935 kref_put(&gspca_dev->kref, gspca_delete);
936
937 return 0; 951 return 0;
938} 952}
939 953
@@ -1053,6 +1067,35 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
1053 return -EINVAL; 1067 return -EINVAL;
1054} 1068}
1055 1069
1070/*fixme: have an audio flag in gspca_dev?*/
1071static int vidioc_s_audio(struct file *file, void *priv,
1072 struct v4l2_audio *audio)
1073{
1074 if (audio->index != 0)
1075 return -EINVAL;
1076 return 0;
1077}
1078
1079static int vidioc_g_audio(struct file *file, void *priv,
1080 struct v4l2_audio *audio)
1081{
1082 memset(audio, 0, sizeof *audio);
1083 strcpy(audio->name, "Microphone");
1084 return 0;
1085}
1086
1087static int vidioc_enumaudio(struct file *file, void *priv,
1088 struct v4l2_audio *audio)
1089{
1090 if (audio->index != 0)
1091 return -EINVAL;
1092
1093 strcpy(audio->name, "Microphone");
1094 audio->capability = 0;
1095 audio->mode = 0;
1096 return 0;
1097}
1098
1056static int vidioc_querymenu(struct file *file, void *priv, 1099static int vidioc_querymenu(struct file *file, void *priv,
1057 struct v4l2_querymenu *qmenu) 1100 struct v4l2_querymenu *qmenu)
1058{ 1101{
@@ -1096,8 +1139,6 @@ static int vidioc_reqbufs(struct file *file, void *priv,
1096 struct gspca_dev *gspca_dev = priv; 1139 struct gspca_dev *gspca_dev = priv;
1097 int i, ret = 0; 1140 int i, ret = 0;
1098 1141
1099 if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1100 return -EINVAL;
1101 switch (rb->memory) { 1142 switch (rb->memory) {
1102 case GSPCA_MEMORY_READ: /* (internal call) */ 1143 case GSPCA_MEMORY_READ: /* (internal call) */
1103 case V4L2_MEMORY_MMAP: 1144 case V4L2_MEMORY_MMAP:
@@ -1162,8 +1203,7 @@ static int vidioc_querybuf(struct file *file, void *priv,
1162 struct gspca_dev *gspca_dev = priv; 1203 struct gspca_dev *gspca_dev = priv;
1163 struct gspca_frame *frame; 1204 struct gspca_frame *frame;
1164 1205
1165 if (v4l2_buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE 1206 if (v4l2_buf->index < 0
1166 || v4l2_buf->index < 0
1167 || v4l2_buf->index >= gspca_dev->nframes) 1207 || v4l2_buf->index >= gspca_dev->nframes)
1168 return -EINVAL; 1208 return -EINVAL;
1169 1209
@@ -1186,7 +1226,8 @@ static int vidioc_streamon(struct file *file, void *priv,
1186 ret = -ENODEV; 1226 ret = -ENODEV;
1187 goto out; 1227 goto out;
1188 } 1228 }
1189 if (gspca_dev->nframes == 0) { 1229 if (gspca_dev->nframes == 0
1230 || !(gspca_dev->frame[0].v4l2_buf.flags & V4L2_BUF_FLAG_QUEUED)) {
1190 ret = -EINVAL; 1231 ret = -EINVAL;
1191 goto out; 1232 goto out;
1192 } 1233 }
@@ -1236,7 +1277,6 @@ static int vidioc_streamoff(struct file *file, void *priv,
1236 gspca_dev->fr_i = gspca_dev->fr_o = gspca_dev->fr_q = 0; 1277 gspca_dev->fr_i = gspca_dev->fr_o = gspca_dev->fr_q = 0;
1237 gspca_dev->last_packet_type = DISCARD_PACKET; 1278 gspca_dev->last_packet_type = DISCARD_PACKET;
1238 gspca_dev->sequence = 0; 1279 gspca_dev->sequence = 0;
1239 atomic_set(&gspca_dev->nevent, 0);
1240 ret = 0; 1280 ret = 0;
1241out: 1281out:
1242 mutex_unlock(&gspca_dev->queue_lock); 1282 mutex_unlock(&gspca_dev->queue_lock);
@@ -1281,6 +1321,17 @@ static int vidioc_g_parm(struct file *filp, void *priv,
1281 memset(parm, 0, sizeof *parm); 1321 memset(parm, 0, sizeof *parm);
1282 parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1322 parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1283 parm->parm.capture.readbuffers = gspca_dev->nbufread; 1323 parm->parm.capture.readbuffers = gspca_dev->nbufread;
1324
1325 if (gspca_dev->sd_desc->get_streamparm) {
1326 int ret;
1327
1328 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
1329 return -ERESTARTSYS;
1330 ret = gspca_dev->sd_desc->get_streamparm(gspca_dev, parm);
1331 mutex_unlock(&gspca_dev->usb_lock);
1332 return ret;
1333 }
1334
1284 return 0; 1335 return 0;
1285} 1336}
1286 1337
@@ -1295,6 +1346,17 @@ static int vidioc_s_parm(struct file *filp, void *priv,
1295 parm->parm.capture.readbuffers = gspca_dev->nbufread; 1346 parm->parm.capture.readbuffers = gspca_dev->nbufread;
1296 else 1347 else
1297 gspca_dev->nbufread = n; 1348 gspca_dev->nbufread = n;
1349
1350 if (gspca_dev->sd_desc->set_streamparm) {
1351 int ret;
1352
1353 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
1354 return -ERESTARTSYS;
1355 ret = gspca_dev->sd_desc->set_streamparm(gspca_dev, parm);
1356 mutex_unlock(&gspca_dev->usb_lock);
1357 return ret;
1358 }
1359
1298 return 0; 1360 return 0;
1299} 1361}
1300 1362
@@ -1440,33 +1502,22 @@ static int frame_wait(struct gspca_dev *gspca_dev,
1440 i = gspca_dev->fr_o; 1502 i = gspca_dev->fr_o;
1441 j = gspca_dev->fr_queue[i]; 1503 j = gspca_dev->fr_queue[i];
1442 frame = &gspca_dev->frame[j]; 1504 frame = &gspca_dev->frame[j];
1443 if (frame->v4l2_buf.flags & V4L2_BUF_FLAG_DONE) {
1444 atomic_dec(&gspca_dev->nevent);
1445 goto ok;
1446 }
1447 if (nonblock_ing) /* no frame yet */
1448 return -EAGAIN;
1449 1505
1450 /* wait till a frame is ready */ 1506 if (!(frame->v4l2_buf.flags & V4L2_BUF_FLAG_DONE)) {
1451 for (;;) { 1507 if (nonblock_ing)
1508 return -EAGAIN;
1509
1510 /* wait till a frame is ready */
1452 ret = wait_event_interruptible_timeout(gspca_dev->wq, 1511 ret = wait_event_interruptible_timeout(gspca_dev->wq,
1453 atomic_read(&gspca_dev->nevent) > 0, 1512 (frame->v4l2_buf.flags & V4L2_BUF_FLAG_DONE) ||
1454 msecs_to_jiffies(3000)); 1513 !gspca_dev->streaming || !gspca_dev->present,
1455 if (ret <= 0) { 1514 msecs_to_jiffies(3000));
1456 if (ret < 0) 1515 if (ret < 0)
1457 return ret; /* interrupt */ 1516 return ret;
1458 return -EIO; /* timeout */ 1517 if (ret == 0 || !gspca_dev->streaming || !gspca_dev->present)
1459 }
1460 atomic_dec(&gspca_dev->nevent);
1461 if (!gspca_dev->streaming || !gspca_dev->present)
1462 return -EIO; 1518 return -EIO;
1463 i = gspca_dev->fr_o;
1464 j = gspca_dev->fr_queue[i];
1465 frame = &gspca_dev->frame[j];
1466 if (frame->v4l2_buf.flags & V4L2_BUF_FLAG_DONE)
1467 break;
1468 } 1519 }
1469ok: 1520
1470 gspca_dev->fr_o = (i + 1) % gspca_dev->nframes; 1521 gspca_dev->fr_o = (i + 1) % gspca_dev->nframes;
1471 PDEBUG(D_FRAM, "frame wait q:%d i:%d o:%d", 1522 PDEBUG(D_FRAM, "frame wait q:%d i:%d o:%d",
1472 gspca_dev->fr_q, 1523 gspca_dev->fr_q,
@@ -1494,8 +1545,6 @@ static int vidioc_dqbuf(struct file *file, void *priv,
1494 int i, ret; 1545 int i, ret;
1495 1546
1496 PDEBUG(D_FRAM, "dqbuf"); 1547 PDEBUG(D_FRAM, "dqbuf");
1497 if (v4l2_buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1498 return -EINVAL;
1499 if (v4l2_buf->memory != gspca_dev->memory) 1548 if (v4l2_buf->memory != gspca_dev->memory)
1500 return -EINVAL; 1549 return -EINVAL;
1501 1550
@@ -1550,8 +1599,6 @@ static int vidioc_qbuf(struct file *file, void *priv,
1550 int i, index, ret; 1599 int i, index, ret;
1551 1600
1552 PDEBUG(D_FRAM, "qbuf %d", v4l2_buf->index); 1601 PDEBUG(D_FRAM, "qbuf %d", v4l2_buf->index);
1553 if (v4l2_buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1554 return -EINVAL;
1555 1602
1556 if (mutex_lock_interruptible(&gspca_dev->queue_lock)) 1603 if (mutex_lock_interruptible(&gspca_dev->queue_lock))
1557 return -ERESTARTSYS; 1604 return -ERESTARTSYS;
@@ -1761,7 +1808,7 @@ static struct file_operations dev_fops = {
1761 .release = dev_close, 1808 .release = dev_close,
1762 .read = dev_read, 1809 .read = dev_read,
1763 .mmap = dev_mmap, 1810 .mmap = dev_mmap,
1764 .ioctl = video_ioctl2, 1811 .unlocked_ioctl = __video_ioctl2,
1765#ifdef CONFIG_COMPAT 1812#ifdef CONFIG_COMPAT
1766 .compat_ioctl = v4l_compat_ioctl32, 1813 .compat_ioctl = v4l_compat_ioctl32,
1767#endif 1814#endif
@@ -1781,6 +1828,9 @@ static const struct v4l2_ioctl_ops dev_ioctl_ops = {
1781 .vidioc_queryctrl = vidioc_queryctrl, 1828 .vidioc_queryctrl = vidioc_queryctrl,
1782 .vidioc_g_ctrl = vidioc_g_ctrl, 1829 .vidioc_g_ctrl = vidioc_g_ctrl,
1783 .vidioc_s_ctrl = vidioc_s_ctrl, 1830 .vidioc_s_ctrl = vidioc_s_ctrl,
1831 .vidioc_g_audio = vidioc_g_audio,
1832 .vidioc_s_audio = vidioc_s_audio,
1833 .vidioc_enumaudio = vidioc_enumaudio,
1784 .vidioc_querymenu = vidioc_querymenu, 1834 .vidioc_querymenu = vidioc_querymenu,
1785 .vidioc_enum_input = vidioc_enum_input, 1835 .vidioc_enum_input = vidioc_enum_input,
1786 .vidioc_g_input = vidioc_g_input, 1836 .vidioc_g_input = vidioc_g_input,
@@ -1802,7 +1852,7 @@ static struct video_device gspca_template = {
1802 .name = "gspca main driver", 1852 .name = "gspca main driver",
1803 .fops = &dev_fops, 1853 .fops = &dev_fops,
1804 .ioctl_ops = &dev_ioctl_ops, 1854 .ioctl_ops = &dev_ioctl_ops,
1805 .release = video_device_release, 1855 .release = gspca_release,
1806 .minor = -1, 1856 .minor = -1,
1807}; 1857};
1808 1858
@@ -1840,7 +1890,6 @@ int gspca_dev_probe(struct usb_interface *intf,
1840 err("couldn't kzalloc gspca struct"); 1890 err("couldn't kzalloc gspca struct");
1841 return -ENOMEM; 1891 return -ENOMEM;
1842 } 1892 }
1843 kref_init(&gspca_dev->kref);
1844 gspca_dev->usb_buf = kmalloc(USB_BUF_SZ, GFP_KERNEL); 1893 gspca_dev->usb_buf = kmalloc(USB_BUF_SZ, GFP_KERNEL);
1845 if (!gspca_dev->usb_buf) { 1894 if (!gspca_dev->usb_buf) {
1846 err("out of memory"); 1895 err("out of memory");
@@ -1852,12 +1901,13 @@ int gspca_dev_probe(struct usb_interface *intf,
1852 gspca_dev->nbalt = intf->num_altsetting; 1901 gspca_dev->nbalt = intf->num_altsetting;
1853 gspca_dev->sd_desc = sd_desc; 1902 gspca_dev->sd_desc = sd_desc;
1854 gspca_dev->nbufread = 2; 1903 gspca_dev->nbufread = 2;
1904 gspca_dev->empty_packet = -1; /* don't check the empty packets */
1855 1905
1856 /* configure the subdriver and initialize the USB device */ 1906 /* configure the subdriver and initialize the USB device */
1857 ret = gspca_dev->sd_desc->config(gspca_dev, id); 1907 ret = sd_desc->config(gspca_dev, id);
1858 if (ret < 0) 1908 if (ret < 0)
1859 goto out; 1909 goto out;
1860 ret = gspca_dev->sd_desc->init(gspca_dev); 1910 ret = sd_desc->init(gspca_dev);
1861 if (ret < 0) 1911 if (ret < 0)
1862 goto out; 1912 goto out;
1863 ret = gspca_set_alt0(gspca_dev); 1913 ret = gspca_set_alt0(gspca_dev);
@@ -1871,18 +1921,15 @@ int gspca_dev_probe(struct usb_interface *intf,
1871 init_waitqueue_head(&gspca_dev->wq); 1921 init_waitqueue_head(&gspca_dev->wq);
1872 1922
1873 /* init video stuff */ 1923 /* init video stuff */
1874 gspca_dev->vdev = video_device_alloc(); 1924 memcpy(&gspca_dev->vdev, &gspca_template, sizeof gspca_template);
1875 memcpy(gspca_dev->vdev, &gspca_template, sizeof gspca_template); 1925 gspca_dev->vdev.parent = &dev->dev;
1876 gspca_dev->vdev->parent = &dev->dev;
1877 gspca_dev->module = module; 1926 gspca_dev->module = module;
1878 gspca_dev->present = 1; 1927 gspca_dev->present = 1;
1879 video_set_drvdata(gspca_dev->vdev, gspca_dev); 1928 ret = video_register_device(&gspca_dev->vdev,
1880 ret = video_register_device(gspca_dev->vdev,
1881 VFL_TYPE_GRABBER, 1929 VFL_TYPE_GRABBER,
1882 video_nr); 1930 video_nr);
1883 if (ret < 0) { 1931 if (ret < 0) {
1884 err("video_register_device err %d", ret); 1932 err("video_register_device err %d", ret);
1885 video_device_release(gspca_dev->vdev);
1886 goto out; 1933 goto out;
1887 } 1934 }
1888 1935
@@ -1906,15 +1953,14 @@ void gspca_disconnect(struct usb_interface *intf)
1906{ 1953{
1907 struct gspca_dev *gspca_dev = usb_get_intfdata(intf); 1954 struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
1908 1955
1909 usb_set_intfdata(intf, NULL);
1910
1911/* We don't want people trying to open up the device */
1912 video_unregister_device(gspca_dev->vdev);
1913
1914 gspca_dev->present = 0; 1956 gspca_dev->present = 0;
1915 gspca_dev->streaming = 0; 1957 gspca_dev->streaming = 0;
1916 1958
1917 kref_put(&gspca_dev->kref, gspca_delete); 1959 usb_set_intfdata(intf, NULL);
1960
1961 /* release the device */
1962 /* (this will call gspca_release() immediatly or on last close) */
1963 video_unregister_device(&gspca_dev->vdev);
1918 1964
1919 PDEBUG(D_PROBE, "disconnect complete"); 1965 PDEBUG(D_PROBE, "disconnect complete");
1920} 1966}
@@ -1992,7 +2038,7 @@ int gspca_auto_gain_n_exposure(struct gspca_dev *gspca_dev, int avg_lum,
1992 desired lumination fast (with the risc of a slight overshoot) */ 2038 desired lumination fast (with the risc of a slight overshoot) */
1993 steps = abs(desired_avg_lum - avg_lum) / deadzone; 2039 steps = abs(desired_avg_lum - avg_lum) / deadzone;
1994 2040
1995 PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d\n", 2041 PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
1996 avg_lum, desired_avg_lum, steps); 2042 avg_lum, desired_avg_lum, steps);
1997 2043
1998 for (i = 0; i < steps; i++) { 2044 for (i = 0; i < steps; i++) {
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index d25e8d69373b..c90af9cb1e07 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -56,8 +56,12 @@ extern int gspca_debug;
56/* device information - set at probe time */ 56/* device information - set at probe time */
57struct cam { 57struct cam {
58 int bulk_size; /* buffer size when image transfer by bulk */ 58 int bulk_size; /* buffer size when image transfer by bulk */
59 struct v4l2_pix_format *cam_mode; /* size nmodes */ 59 const struct v4l2_pix_format *cam_mode; /* size nmodes */
60 char nmodes; 60 char nmodes;
61 __u8 bulk_nurbs; /* number of URBs in bulk mode
62 * - cannot be > MAX_NURBS
63 * - when 0 and bulk_size != 0 means
64 * 1 URB and submit done by subdriver */
61 __u8 epaddr; 65 __u8 epaddr;
62}; 66};
63 67
@@ -70,6 +74,8 @@ typedef void (*cam_v_op) (struct gspca_dev *);
70typedef int (*cam_cf_op) (struct gspca_dev *, const struct usb_device_id *); 74typedef int (*cam_cf_op) (struct gspca_dev *, const struct usb_device_id *);
71typedef int (*cam_jpg_op) (struct gspca_dev *, 75typedef int (*cam_jpg_op) (struct gspca_dev *,
72 struct v4l2_jpegcompression *); 76 struct v4l2_jpegcompression *);
77typedef int (*cam_streamparm_op) (struct gspca_dev *,
78 struct v4l2_streamparm *);
73typedef int (*cam_qmnu_op) (struct gspca_dev *, 79typedef int (*cam_qmnu_op) (struct gspca_dev *,
74 struct v4l2_querymenu *); 80 struct v4l2_querymenu *);
75typedef void (*cam_pkt_op) (struct gspca_dev *gspca_dev, 81typedef void (*cam_pkt_op) (struct gspca_dev *gspca_dev,
@@ -102,6 +108,8 @@ struct sd_desc {
102 cam_jpg_op get_jcomp; 108 cam_jpg_op get_jcomp;
103 cam_jpg_op set_jcomp; 109 cam_jpg_op set_jcomp;
104 cam_qmnu_op querymenu; 110 cam_qmnu_op querymenu;
111 cam_streamparm_op get_streamparm;
112 cam_streamparm_op set_streamparm;
105}; 113};
106 114
107/* packet types when moving from iso buf to frame buf */ 115/* packet types when moving from iso buf to frame buf */
@@ -120,10 +128,9 @@ struct gspca_frame {
120}; 128};
121 129
122struct gspca_dev { 130struct gspca_dev {
123 struct video_device *vdev; 131 struct video_device vdev; /* !! must be the first item */
124 struct module *module; /* subdriver handling the device */ 132 struct module *module; /* subdriver handling the device */
125 struct usb_device *dev; 133 struct usb_device *dev;
126 struct kref kref;
127 struct file *capt_file; /* file doing video capture */ 134 struct file *capt_file; /* file doing video capture */
128 135
129 struct cam cam; /* device information */ 136 struct cam cam; /* device information */
@@ -142,22 +149,20 @@ struct gspca_dev {
142 char fr_q; /* next frame to queue */ 149 char fr_q; /* next frame to queue */
143 char fr_o; /* next frame to dequeue */ 150 char fr_o; /* next frame to dequeue */
144 signed char fr_queue[GSPCA_MAX_FRAMES]; /* frame queue */ 151 signed char fr_queue[GSPCA_MAX_FRAMES]; /* frame queue */
145 char last_packet_type; 152 __u8 last_packet_type;
153 __s8 empty_packet; /* if (-1) don't check empty packets */
154 __u8 streaming;
146 155
147 __u8 iface; /* USB interface number */
148 __u8 alt; /* USB alternate setting */
149 __u8 curr_mode; /* current camera mode */ 156 __u8 curr_mode; /* current camera mode */
150 __u32 pixfmt; /* current mode parameters */ 157 __u32 pixfmt; /* current mode parameters */
151 __u16 width; 158 __u16 width;
152 __u16 height; 159 __u16 height;
160 __u32 sequence; /* frame sequence number */
153 161
154 atomic_t nevent; /* number of frames done */
155 wait_queue_head_t wq; /* wait queue */ 162 wait_queue_head_t wq; /* wait queue */
156 struct mutex usb_lock; /* usb exchange protection */ 163 struct mutex usb_lock; /* usb exchange protection */
157 struct mutex read_lock; /* read protection */ 164 struct mutex read_lock; /* read protection */
158 struct mutex queue_lock; /* ISOC queue protection */ 165 struct mutex queue_lock; /* ISOC queue protection */
159 __u32 sequence; /* frame sequence number */
160 char streaming;
161#ifdef CONFIG_PM 166#ifdef CONFIG_PM
162 char frozen; /* suspend - resume */ 167 char frozen; /* suspend - resume */
163#endif 168#endif
@@ -166,6 +171,8 @@ struct gspca_dev {
166 char nbufread; /* number of buffers for read() */ 171 char nbufread; /* number of buffers for read() */
167 char nurbs; /* number of allocated URBs */ 172 char nurbs; /* number of allocated URBs */
168 char memory; /* memory type (V4L2_MEMORY_xxx) */ 173 char memory; /* memory type (V4L2_MEMORY_xxx) */
174 __u8 iface; /* USB interface number */
175 __u8 alt; /* USB alternate setting */
169 __u8 nbalt; /* number of USB alternate settings */ 176 __u8 nbalt; /* number of USB alternate settings */
170}; 177};
171 178
diff --git a/drivers/media/video/gspca/m5602/m5602_bridge.h b/drivers/media/video/gspca/m5602/m5602_bridge.h
index 1a37ae4bc82d..a3f3b7a0c7e7 100644
--- a/drivers/media/video/gspca/m5602/m5602_bridge.h
+++ b/drivers/media/video/gspca/m5602/m5602_bridge.h
@@ -25,59 +25,59 @@
25 25
26/*****************************************************************************/ 26/*****************************************************************************/
27 27
28#define M5602_XB_SENSOR_TYPE 0x00 28#define M5602_XB_SENSOR_TYPE 0x00
29#define M5602_XB_SENSOR_CTRL 0x01 29#define M5602_XB_SENSOR_CTRL 0x01
30#define M5602_XB_LINE_OF_FRAME_H 0x02 30#define M5602_XB_LINE_OF_FRAME_H 0x02
31#define M5602_XB_LINE_OF_FRAME_L 0x03 31#define M5602_XB_LINE_OF_FRAME_L 0x03
32#define M5602_XB_PIX_OF_LINE_H 0x04 32#define M5602_XB_PIX_OF_LINE_H 0x04
33#define M5602_XB_PIX_OF_LINE_L 0x05 33#define M5602_XB_PIX_OF_LINE_L 0x05
34#define M5602_XB_VSYNC_PARA 0x06 34#define M5602_XB_VSYNC_PARA 0x06
35#define M5602_XB_HSYNC_PARA 0x07 35#define M5602_XB_HSYNC_PARA 0x07
36#define M5602_XB_TEST_MODE_1 0x08 36#define M5602_XB_TEST_MODE_1 0x08
37#define M5602_XB_TEST_MODE_2 0x09 37#define M5602_XB_TEST_MODE_2 0x09
38#define M5602_XB_SIG_INI 0x0a 38#define M5602_XB_SIG_INI 0x0a
39#define M5602_XB_DS_PARA 0x0e 39#define M5602_XB_DS_PARA 0x0e
40#define M5602_XB_TRIG_PARA 0x0f 40#define M5602_XB_TRIG_PARA 0x0f
41#define M5602_XB_CLK_PD 0x10 41#define M5602_XB_CLK_PD 0x10
42#define M5602_XB_MCU_CLK_CTRL 0x12 42#define M5602_XB_MCU_CLK_CTRL 0x12
43#define M5602_XB_MCU_CLK_DIV 0x13 43#define M5602_XB_MCU_CLK_DIV 0x13
44#define M5602_XB_SEN_CLK_CTRL 0x14 44#define M5602_XB_SEN_CLK_CTRL 0x14
45#define M5602_XB_SEN_CLK_DIV 0x15 45#define M5602_XB_SEN_CLK_DIV 0x15
46#define M5602_XB_AUD_CLK_CTRL 0x16 46#define M5602_XB_AUD_CLK_CTRL 0x16
47#define M5602_XB_AUD_CLK_DIV 0x17 47#define M5602_XB_AUD_CLK_DIV 0x17
48#define M5602_XB_DEVCTR1 0x41 48#define M5602_XB_DEVCTR1 0x41
49#define M5602_XB_EPSETR0 0x42 49#define M5602_XB_EPSETR0 0x42
50#define M5602_XB_EPAFCTR 0x47 50#define M5602_XB_EPAFCTR 0x47
51#define M5602_XB_EPBFCTR 0x49 51#define M5602_XB_EPBFCTR 0x49
52#define M5602_XB_EPEFCTR 0x4f 52#define M5602_XB_EPEFCTR 0x4f
53#define M5602_XB_TEST_REG 0x53 53#define M5602_XB_TEST_REG 0x53
54#define M5602_XB_ALT2SIZE 0x54 54#define M5602_XB_ALT2SIZE 0x54
55#define M5602_XB_ALT3SIZE 0x55 55#define M5602_XB_ALT3SIZE 0x55
56#define M5602_XB_OBSFRAME 0x56 56#define M5602_XB_OBSFRAME 0x56
57#define M5602_XB_PWR_CTL 0x59 57#define M5602_XB_PWR_CTL 0x59
58#define M5602_XB_ADC_CTRL 0x60 58#define M5602_XB_ADC_CTRL 0x60
59#define M5602_XB_ADC_DATA 0x61 59#define M5602_XB_ADC_DATA 0x61
60#define M5602_XB_MISC_CTRL 0x62 60#define M5602_XB_MISC_CTRL 0x62
61#define M5602_XB_SNAPSHOT 0x63 61#define M5602_XB_SNAPSHOT 0x63
62#define M5602_XB_SCRATCH_1 0x64 62#define M5602_XB_SCRATCH_1 0x64
63#define M5602_XB_SCRATCH_2 0x65 63#define M5602_XB_SCRATCH_2 0x65
64#define M5602_XB_SCRATCH_3 0x66 64#define M5602_XB_SCRATCH_3 0x66
65#define M5602_XB_SCRATCH_4 0x67 65#define M5602_XB_SCRATCH_4 0x67
66#define M5602_XB_I2C_CTRL 0x68 66#define M5602_XB_I2C_CTRL 0x68
67#define M5602_XB_I2C_CLK_DIV 0x69 67#define M5602_XB_I2C_CLK_DIV 0x69
68#define M5602_XB_I2C_DEV_ADDR 0x6a 68#define M5602_XB_I2C_DEV_ADDR 0x6a
69#define M5602_XB_I2C_REG_ADDR 0x6b 69#define M5602_XB_I2C_REG_ADDR 0x6b
70#define M5602_XB_I2C_DATA 0x6c 70#define M5602_XB_I2C_DATA 0x6c
71#define M5602_XB_I2C_STATUS 0x6d 71#define M5602_XB_I2C_STATUS 0x6d
72#define M5602_XB_GPIO_DAT_H 0x70 72#define M5602_XB_GPIO_DAT_H 0x70
73#define M5602_XB_GPIO_DAT_L 0x71 73#define M5602_XB_GPIO_DAT_L 0x71
74#define M5602_XB_GPIO_DIR_H 0x72 74#define M5602_XB_GPIO_DIR_H 0x72
75#define M5602_XB_GPIO_DIR_L 0x73 75#define M5602_XB_GPIO_DIR_L 0x73
76#define M5602_XB_GPIO_EN_H 0x74 76#define M5602_XB_GPIO_EN_H 0x74
77#define M5602_XB_GPIO_EN_L 0x75 77#define M5602_XB_GPIO_EN_L 0x75
78#define M5602_XB_GPIO_DAT 0x76 78#define M5602_XB_GPIO_DAT 0x76
79#define M5602_XB_GPIO_DIR 0x77 79#define M5602_XB_GPIO_DIR 0x77
80#define M5602_XB_MISC_CTL 0x70 80#define M5602_XB_MISC_CTL 0x70
81 81
82#define I2C_BUSY 0x80 82#define I2C_BUSY 0x80
83 83
@@ -90,13 +90,7 @@
90#define M5602_ISOC_ENDPOINT_ADDR 0x81 90#define M5602_ISOC_ENDPOINT_ADDR 0x81
91#define M5602_INTR_ENDPOINT_ADDR 0x82 91#define M5602_INTR_ENDPOINT_ADDR 0x82
92 92
93#define M5602_MAX_FRAMES 32
94#define M5602_URBS 2
95#define M5602_ISOC_PACKETS 14
96
97#define M5602_URB_TIMEOUT msecs_to_jiffies(2 * M5602_ISOC_PACKETS)
98#define M5602_URB_MSG_TIMEOUT 5000 93#define M5602_URB_MSG_TIMEOUT 5000
99#define M5602_FRAME_TIMEOUT 2
100 94
101/*****************************************************************************/ 95/*****************************************************************************/
102 96
@@ -115,7 +109,6 @@ static const unsigned char sensor_urb_skeleton[] = {
115 0x13, M5602_XB_I2C_CTRL, 0x81, 0x11 109 0x13, M5602_XB_I2C_CTRL, 0x81, 0x11
116}; 110};
117 111
118/* m5602 device descriptor, currently it just wraps the m5602_camera struct */
119struct sd { 112struct sd {
120 struct gspca_dev gspca_dev; 113 struct gspca_dev gspca_dev;
121 114
@@ -140,4 +133,10 @@ int m5602_read_bridge(
140int m5602_write_bridge( 133int m5602_write_bridge(
141 struct sd *sd, u8 address, u8 i2c_data); 134 struct sd *sd, u8 address, u8 i2c_data);
142 135
136int m5602_write_sensor(struct sd *sd, const u8 address,
137 u8 *i2c_data, const u8 len);
138
139int m5602_read_sensor(struct sd *sd, const u8 address,
140 u8 *i2c_data, const u8 len);
141
143#endif 142#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index fd6ce384b487..ed906fe31287 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -24,7 +24,7 @@
24 24
25/* Kernel module parameters */ 25/* Kernel module parameters */
26int force_sensor; 26int force_sensor;
27int dump_bridge; 27static int dump_bridge;
28int dump_sensor; 28int dump_sensor;
29 29
30static const __devinitdata struct usb_device_id m5602_table[] = { 30static const __devinitdata struct usb_device_id m5602_table[] = {
@@ -80,6 +80,97 @@ int m5602_write_bridge(struct sd *sd, u8 address, u8 i2c_data)
80 return (err < 0) ? err : 0; 80 return (err < 0) ? err : 0;
81} 81}
82 82
83int m5602_read_sensor(struct sd *sd, const u8 address,
84 u8 *i2c_data, const u8 len)
85{
86 int err, i;
87
88 if (!len || len > sd->sensor->i2c_regW)
89 return -EINVAL;
90
91 do {
92 err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, i2c_data);
93 } while ((*i2c_data & I2C_BUSY) && !err);
94 if (err < 0)
95 goto out;
96
97 err = m5602_write_bridge(sd, M5602_XB_I2C_DEV_ADDR,
98 sd->sensor->i2c_slave_id);
99 if (err < 0)
100 goto out;
101
102 err = m5602_write_bridge(sd, M5602_XB_I2C_REG_ADDR, address);
103 if (err < 0)
104 goto out;
105
106 if (sd->sensor->i2c_regW == 1) {
107 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, len);
108 if (err < 0)
109 goto out;
110
111 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x08);
112 if (err < 0)
113 goto out;
114 } else {
115 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x18 + len);
116 if (err < 0)
117 goto out;
118 }
119
120 for (i = 0; (i < len) && !err; i++) {
121 err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
122
123 PDEBUG(D_CONF, "Reading sensor register "
124 "0x%x containing 0x%x ", address, *i2c_data);
125 }
126out:
127 return err;
128}
129
130int m5602_write_sensor(struct sd *sd, const u8 address,
131 u8 *i2c_data, const u8 len)
132{
133 int err, i;
134 u8 *p;
135 struct usb_device *udev = sd->gspca_dev.dev;
136 __u8 *buf = sd->gspca_dev.usb_buf;
137
138 /* No sensor with a data width larger than 16 bits has yet been seen */
139 if (len > sd->sensor->i2c_regW || !len)
140 return -EINVAL;
141
142 memcpy(buf, sensor_urb_skeleton,
143 sizeof(sensor_urb_skeleton));
144
145 buf[11] = sd->sensor->i2c_slave_id;
146 buf[15] = address;
147
148 /* Special case larger sensor writes */
149 p = buf + 16;
150
151 /* Copy a four byte write sequence for each byte to be written to */
152 for (i = 0; i < len; i++) {
153 memcpy(p, sensor_urb_skeleton + 16, 4);
154 p[3] = i2c_data[i];
155 p += 4;
156 PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x",
157 address, i2c_data[i]);
158 }
159
160 /* Copy the tailer */
161 memcpy(p, sensor_urb_skeleton + 20, 4);
162
163 /* Set the total length */
164 p[3] = 0x10 + len;
165
166 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
167 0x04, 0x40, 0x19,
168 0x0000, buf,
169 20 + len * 4, M5602_URB_MSG_TIMEOUT);
170
171 return (err < 0) ? err : 0;
172}
173
83/* Dump all the registers of the m5602 bridge, 174/* Dump all the registers of the m5602 bridge,
84 unfortunately this breaks the camera until it's power cycled */ 175 unfortunately this breaks the camera until it's power cycled */
85static void m5602_dump_bridge(struct sd *sd) 176static void m5602_dump_bridge(struct sd *sd)
@@ -150,11 +241,15 @@ static int m5602_start_transfer(struct gspca_dev *gspca_dev)
150 241
151 /* Send start command to the camera */ 242 /* Send start command to the camera */
152 const u8 buffer[4] = {0x13, 0xf9, 0x0f, 0x01}; 243 const u8 buffer[4] = {0x13, 0xf9, 0x0f, 0x01};
244
245 if (sd->sensor->start)
246 sd->sensor->start(sd);
247
153 memcpy(buf, buffer, sizeof(buffer)); 248 memcpy(buf, buffer, sizeof(buffer));
154 err = usb_control_msg(gspca_dev->dev, 249 err = usb_control_msg(gspca_dev->dev,
155 usb_sndctrlpipe(gspca_dev->dev, 0), 250 usb_sndctrlpipe(gspca_dev->dev, 0),
156 0x04, 0x40, 0x19, 0x0000, buf, 251 0x04, 0x40, 0x19, 0x0000, buf,
157 4, M5602_URB_MSG_TIMEOUT); 252 sizeof(buffer), M5602_URB_MSG_TIMEOUT);
158 253
159 PDEBUG(D_STREAM, "Transfer started"); 254 PDEBUG(D_STREAM, "Transfer started");
160 return (err < 0) ? err : 0; 255 return (err < 0) ? err : 0;
@@ -284,6 +379,7 @@ static int __init mod_m5602_init(void)
284 PDEBUG(D_PROBE, "registered"); 379 PDEBUG(D_PROBE, "registered");
285 return 0; 380 return 0;
286} 381}
382
287static void __exit mod_m5602_exit(void) 383static void __exit mod_m5602_exit(void)
288{ 384{
289 usb_deregister(&sd_driver); 385 usb_deregister(&sd_driver);
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.c b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
index fb700c2d055a..c0e71c331454 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
@@ -18,6 +18,8 @@
18 18
19#include "m5602_mt9m111.h" 19#include "m5602_mt9m111.h"
20 20
21static void mt9m111_dump_registers(struct sd *sd);
22
21int mt9m111_probe(struct sd *sd) 23int mt9m111_probe(struct sd *sd)
22{ 24{
23 u8 data[2] = {0x00, 0x00}; 25 u8 data[2] = {0x00, 0x00};
@@ -44,12 +46,12 @@ int mt9m111_probe(struct sd *sd)
44 } else { 46 } else {
45 data[0] = preinit_mt9m111[i][2]; 47 data[0] = preinit_mt9m111[i][2];
46 data[1] = preinit_mt9m111[i][3]; 48 data[1] = preinit_mt9m111[i][3];
47 mt9m111_write_sensor(sd, 49 m5602_write_sensor(sd,
48 preinit_mt9m111[i][1], data, 2); 50 preinit_mt9m111[i][1], data, 2);
49 } 51 }
50 } 52 }
51 53
52 if (mt9m111_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2)) 54 if (m5602_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2))
53 return -ENODEV; 55 return -ENODEV;
54 56
55 if ((data[0] == 0x14) && (data[1] == 0x3a)) { 57 if ((data[0] == 0x14) && (data[1] == 0x3a)) {
@@ -72,7 +74,7 @@ int mt9m111_init(struct sd *sd)
72 int i, err = 0; 74 int i, err = 0;
73 75
74 /* Init the sensor */ 76 /* Init the sensor */
75 for (i = 0; i < ARRAY_SIZE(init_mt9m111); i++) { 77 for (i = 0; i < ARRAY_SIZE(init_mt9m111) && !err; i++) {
76 u8 data[2]; 78 u8 data[2];
77 79
78 if (init_mt9m111[i][0] == BRIDGE) { 80 if (init_mt9m111[i][0] == BRIDGE) {
@@ -82,7 +84,7 @@ int mt9m111_init(struct sd *sd)
82 } else { 84 } else {
83 data[0] = init_mt9m111[i][2]; 85 data[0] = init_mt9m111[i][2];
84 data[1] = init_mt9m111[i][3]; 86 data[1] = init_mt9m111[i][3];
85 err = mt9m111_write_sensor(sd, 87 err = m5602_write_sensor(sd,
86 init_mt9m111[i][1], data, 2); 88 init_mt9m111[i][1], data, 2);
87 } 89 }
88 } 90 }
@@ -104,12 +106,12 @@ int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
104 u8 data[2] = {0x00, 0x00}; 106 u8 data[2] = {0x00, 0x00};
105 struct sd *sd = (struct sd *) gspca_dev; 107 struct sd *sd = (struct sd *) gspca_dev;
106 108
107 err = mt9m111_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, 109 err = m5602_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
108 data, 2); 110 data, 2);
109 *val = data[0] & MT9M111_RMB_MIRROR_ROWS; 111 *val = data[0] & MT9M111_RMB_MIRROR_ROWS;
110 PDEBUG(D_V4L2, "Read vertical flip %d", *val); 112 PDEBUG(D_V4L2, "Read vertical flip %d", *val);
111 113
112 return (err < 0) ? err : 0; 114 return err;
113} 115}
114 116
115int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val) 117int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
@@ -121,19 +123,19 @@ int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
121 PDEBUG(D_V4L2, "Set vertical flip to %d", val); 123 PDEBUG(D_V4L2, "Set vertical flip to %d", val);
122 124
123 /* Set the correct page map */ 125 /* Set the correct page map */
124 err = mt9m111_write_sensor(sd, MT9M111_PAGE_MAP, data, 2); 126 err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
125 if (err < 0) 127 if (err < 0)
126 goto out; 128 goto out;
127 129
128 err = mt9m111_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, data, 2); 130 err = m5602_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, data, 2);
129 if (err < 0) 131 if (err < 0)
130 goto out; 132 goto out;
131 133
132 data[0] = (data[0] & 0xfe) | val; 134 data[0] = (data[0] & 0xfe) | val;
133 err = mt9m111_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, 135 err = m5602_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
134 data, 2); 136 data, 2);
135out: 137out:
136 return (err < 0) ? err : 0; 138 return err;
137} 139}
138 140
139int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val) 141int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
@@ -142,12 +144,12 @@ int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
142 u8 data[2] = {0x00, 0x00}; 144 u8 data[2] = {0x00, 0x00};
143 struct sd *sd = (struct sd *) gspca_dev; 145 struct sd *sd = (struct sd *) gspca_dev;
144 146
145 err = mt9m111_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, 147 err = m5602_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
146 data, 2); 148 data, 2);
147 *val = data[0] & MT9M111_RMB_MIRROR_COLS; 149 *val = data[0] & MT9M111_RMB_MIRROR_COLS;
148 PDEBUG(D_V4L2, "Read horizontal flip %d", *val); 150 PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
149 151
150 return (err < 0) ? err : 0; 152 return err;
151} 153}
152 154
153int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val) 155int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
@@ -159,19 +161,19 @@ int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
159 PDEBUG(D_V4L2, "Set horizontal flip to %d", val); 161 PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
160 162
161 /* Set the correct page map */ 163 /* Set the correct page map */
162 err = mt9m111_write_sensor(sd, MT9M111_PAGE_MAP, data, 2); 164 err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
163 if (err < 0) 165 if (err < 0)
164 goto out; 166 goto out;
165 167
166 err = mt9m111_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, data, 2); 168 err = m5602_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, data, 2);
167 if (err < 0) 169 if (err < 0)
168 goto out; 170 goto out;
169 171
170 data[0] = (data[0] & 0xfd) | ((val << 1) & 0x02); 172 data[0] = (data[0] & 0xfd) | ((val << 1) & 0x02);
171 err = mt9m111_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, 173 err = m5602_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
172 data, 2); 174 data, 2);
173out: 175out:
174 return (err < 0) ? err : 0; 176 return err;
175} 177}
176 178
177int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val) 179int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
@@ -180,7 +182,7 @@ int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
180 u8 data[2] = {0x00, 0x00}; 182 u8 data[2] = {0x00, 0x00};
181 struct sd *sd = (struct sd *) gspca_dev; 183 struct sd *sd = (struct sd *) gspca_dev;
182 184
183 err = mt9m111_read_sensor(sd, MT9M111_SC_GLOBAL_GAIN, data, 2); 185 err = m5602_read_sensor(sd, MT9M111_SC_GLOBAL_GAIN, data, 2);
184 tmp = ((data[1] << 8) | data[0]); 186 tmp = ((data[1] << 8) | data[0]);
185 187
186 *val = ((tmp & (1 << 10)) * 2) | 188 *val = ((tmp & (1 << 10)) * 2) |
@@ -190,7 +192,7 @@ int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
190 192
191 PDEBUG(D_V4L2, "Read gain %d", *val); 193 PDEBUG(D_V4L2, "Read gain %d", *val);
192 194
193 return (err < 0) ? err : 0; 195 return err;
194} 196}
195 197
196int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val) 198int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val)
@@ -200,7 +202,7 @@ int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val)
200 struct sd *sd = (struct sd *) gspca_dev; 202 struct sd *sd = (struct sd *) gspca_dev;
201 203
202 /* Set the correct page map */ 204 /* Set the correct page map */
203 err = mt9m111_write_sensor(sd, MT9M111_PAGE_MAP, data, 2); 205 err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
204 if (err < 0) 206 if (err < 0)
205 goto out; 207 goto out;
206 208
@@ -225,90 +227,13 @@ int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val)
225 PDEBUG(D_V4L2, "tmp=%d, data[1]=%d, data[0]=%d", tmp, 227 PDEBUG(D_V4L2, "tmp=%d, data[1]=%d, data[0]=%d", tmp,
226 data[1], data[0]); 228 data[1], data[0]);
227 229
228 err = mt9m111_write_sensor(sd, MT9M111_SC_GLOBAL_GAIN, 230 err = m5602_write_sensor(sd, MT9M111_SC_GLOBAL_GAIN,
229 data, 2); 231 data, 2);
230out: 232out:
231 return (err < 0) ? err : 0; 233 return err;
232}
233
234int mt9m111_read_sensor(struct sd *sd, const u8 address,
235 u8 *i2c_data, const u8 len) {
236 int err, i;
237
238 do {
239 err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, i2c_data);
240 } while ((*i2c_data & I2C_BUSY) && !err);
241 if (err < 0)
242 goto out;
243
244 err = m5602_write_bridge(sd, M5602_XB_I2C_DEV_ADDR,
245 sd->sensor->i2c_slave_id);
246 if (err < 0)
247 goto out;
248
249 err = m5602_write_bridge(sd, M5602_XB_I2C_REG_ADDR, address);
250 if (err < 0)
251 goto out;
252
253 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x1a);
254 if (err < 0)
255 goto out;
256
257 for (i = 0; i < len && !err; i++) {
258 err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
259
260 PDEBUG(D_CONF, "Reading sensor register "
261 "0x%x contains 0x%x ", address, *i2c_data);
262 }
263out:
264 return (err < 0) ? err : 0;
265}
266
267int mt9m111_write_sensor(struct sd *sd, const u8 address,
268 u8 *i2c_data, const u8 len)
269{
270 int err, i;
271 u8 *p;
272 struct usb_device *udev = sd->gspca_dev.dev;
273 __u8 *buf = sd->gspca_dev.usb_buf;
274
275 /* No sensor with a data width larger
276 than 16 bits has yet been seen, nor with 0 :p*/
277 if (len > 2 || !len)
278 return -EINVAL;
279
280 memcpy(buf, sensor_urb_skeleton,
281 sizeof(sensor_urb_skeleton));
282
283 buf[11] = sd->sensor->i2c_slave_id;
284 buf[15] = address;
285
286 p = buf + 16;
287
288 /* Copy a four byte write sequence for each byte to be written to */
289 for (i = 0; i < len; i++) {
290 memcpy(p, sensor_urb_skeleton + 16, 4);
291 p[3] = i2c_data[i];
292 p += 4;
293 PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x",
294 address, i2c_data[i]);
295 }
296
297 /* Copy the tailer */
298 memcpy(p, sensor_urb_skeleton + 20, 4);
299
300 /* Set the total length */
301 p[3] = 0x10 + len;
302
303 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
304 0x04, 0x40, 0x19,
305 0x0000, buf,
306 20 + len * 4, M5602_URB_MSG_TIMEOUT);
307
308 return (err < 0) ? err : 0;
309} 234}
310 235
311void mt9m111_dump_registers(struct sd *sd) 236static void mt9m111_dump_registers(struct sd *sd)
312{ 237{
313 u8 address, value[2] = {0x00, 0x00}; 238 u8 address, value[2] = {0x00, 0x00};
314 239
@@ -316,27 +241,27 @@ void mt9m111_dump_registers(struct sd *sd)
316 241
317 info("Dumping the mt9m111 sensor core registers"); 242 info("Dumping the mt9m111 sensor core registers");
318 value[1] = MT9M111_SENSOR_CORE; 243 value[1] = MT9M111_SENSOR_CORE;
319 mt9m111_write_sensor(sd, MT9M111_PAGE_MAP, value, 2); 244 m5602_write_sensor(sd, MT9M111_PAGE_MAP, value, 2);
320 for (address = 0; address < 0xff; address++) { 245 for (address = 0; address < 0xff; address++) {
321 mt9m111_read_sensor(sd, address, value, 2); 246 m5602_read_sensor(sd, address, value, 2);
322 info("register 0x%x contains 0x%x%x", 247 info("register 0x%x contains 0x%x%x",
323 address, value[0], value[1]); 248 address, value[0], value[1]);
324 } 249 }
325 250
326 info("Dumping the mt9m111 color pipeline registers"); 251 info("Dumping the mt9m111 color pipeline registers");
327 value[1] = MT9M111_COLORPIPE; 252 value[1] = MT9M111_COLORPIPE;
328 mt9m111_write_sensor(sd, MT9M111_PAGE_MAP, value, 2); 253 m5602_write_sensor(sd, MT9M111_PAGE_MAP, value, 2);
329 for (address = 0; address < 0xff; address++) { 254 for (address = 0; address < 0xff; address++) {
330 mt9m111_read_sensor(sd, address, value, 2); 255 m5602_read_sensor(sd, address, value, 2);
331 info("register 0x%x contains 0x%x%x", 256 info("register 0x%x contains 0x%x%x",
332 address, value[0], value[1]); 257 address, value[0], value[1]);
333 } 258 }
334 259
335 info("Dumping the mt9m111 camera control registers"); 260 info("Dumping the mt9m111 camera control registers");
336 value[1] = MT9M111_CAMERA_CONTROL; 261 value[1] = MT9M111_CAMERA_CONTROL;
337 mt9m111_write_sensor(sd, MT9M111_PAGE_MAP, value, 2); 262 m5602_write_sensor(sd, MT9M111_PAGE_MAP, value, 2);
338 for (address = 0; address < 0xff; address++) { 263 for (address = 0; address < 0xff; address++) {
339 mt9m111_read_sensor(sd, address, value, 2); 264 m5602_read_sensor(sd, address, value, 2);
340 info("register 0x%x contains 0x%x%x", 265 info("register 0x%x contains 0x%x%x",
341 address, value[0], value[1]); 266 address, value[0], value[1]);
342 } 267 }
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.h b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
index 315209d5aeef..e795ab7a36c9 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.h
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
@@ -87,14 +87,6 @@ int mt9m111_probe(struct sd *sd);
87int mt9m111_init(struct sd *sd); 87int mt9m111_init(struct sd *sd);
88int mt9m111_power_down(struct sd *sd); 88int mt9m111_power_down(struct sd *sd);
89 89
90int mt9m111_read_sensor(struct sd *sd, const u8 address,
91 u8 *i2c_data, const u8 len);
92
93int mt9m111_write_sensor(struct sd *sd, const u8 address,
94 u8 *i2c_data, const u8 len);
95
96void mt9m111_dump_registers(struct sd *sd);
97
98int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val); 90int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
99int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val); 91int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
100int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val); 92int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
@@ -106,14 +98,12 @@ static struct m5602_sensor mt9m111 = {
106 .name = "MT9M111", 98 .name = "MT9M111",
107 99
108 .i2c_slave_id = 0xba, 100 .i2c_slave_id = 0xba,
101 .i2c_regW = 2,
109 102
110 .probe = mt9m111_probe, 103 .probe = mt9m111_probe,
111 .init = mt9m111_init, 104 .init = mt9m111_init,
112 .power_down = mt9m111_power_down, 105 .power_down = mt9m111_power_down,
113 106
114 .read_sensor = mt9m111_read_sensor,
115 .write_sensor = mt9m111_write_sensor,
116
117 .nctrls = 3, 107 .nctrls = 3,
118 .ctrls = { 108 .ctrls = {
119 { 109 {
@@ -1003,7 +993,7 @@ static const unsigned char init_mt9m111[][4] =
1003 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00}, 993 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
1004 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00}, 994 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
1005 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00}, 995 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
1006 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00}, 996 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00}, /* 639*/
1007 {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00}, 997 {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00},
1008 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00}, 998 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
1009 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, 999 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.c b/drivers/media/video/gspca/m5602/m5602_ov9650.c
index 837c7e47661c..c908a8d6970a 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.c
@@ -18,77 +18,57 @@
18 18
19#include "m5602_ov9650.h" 19#include "m5602_ov9650.h"
20 20
21int ov9650_read_sensor(struct sd *sd, const u8 address, 21/* Vertically and horizontally flips the image if matched, needed for machines
22 u8 *i2c_data, const u8 len) 22 where the sensor is mounted upside down */
23{ 23static
24 int err, i; 24 const
25 25 struct dmi_system_id ov9650_flip_dmi_table[] = {
26 /* The ov9650 registers have a max depth of one byte */ 26 {
27 if (len > 1 || !len) 27 .ident = "ASUS A6VC",
28 return -EINVAL; 28 .matches = {
29 29 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
30 do { 30 DMI_MATCH(DMI_PRODUCT_NAME, "A6VC")
31 err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, i2c_data); 31 }
32 } while ((*i2c_data & I2C_BUSY) && !err); 32 },
33 33 {
34 m5602_write_bridge(sd, M5602_XB_I2C_DEV_ADDR, 34 .ident = "ASUS A6VM",
35 ov9650.i2c_slave_id); 35 .matches = {
36 m5602_write_bridge(sd, M5602_XB_I2C_REG_ADDR, address); 36 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
37 m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x10 + len); 37 DMI_MATCH(DMI_PRODUCT_NAME, "A6VM")
38 m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x08); 38 }
39 39 },
40 for (i = 0; i < len; i++) { 40 {
41 err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i])); 41 .ident = "ASUS A6JC",
42 42 .matches = {
43 PDEBUG(D_CONF, "Reading sensor register " 43 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
44 "0x%x containing 0x%x ", address, *i2c_data); 44 DMI_MATCH(DMI_PRODUCT_NAME, "A6JC")
45 } 45 }
46 return (err < 0) ? err : 0; 46 },
47} 47 {
48 48 .ident = "ASUS A6Ja",
49int ov9650_write_sensor(struct sd *sd, const u8 address, 49 .matches = {
50 u8 *i2c_data, const u8 len) 50 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
51{ 51 DMI_MATCH(DMI_PRODUCT_NAME, "A6J")
52 int err, i; 52 }
53 u8 *p; 53 },
54 struct usb_device *udev = sd->gspca_dev.dev; 54 {
55 __u8 *buf = sd->gspca_dev.usb_buf; 55 .ident = "ASUS A6Kt",
56 56 .matches = {
57 /* The ov9650 only supports one byte writes */ 57 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
58 if (len > 1 || !len) 58 DMI_MATCH(DMI_PRODUCT_NAME, "A6Kt")
59 return -EINVAL; 59 }
60 60 },
61 memcpy(buf, sensor_urb_skeleton, 61 {
62 sizeof(sensor_urb_skeleton)); 62 .ident = "Alienware Aurora m9700",
63 63 .matches = {
64 buf[11] = sd->sensor->i2c_slave_id; 64 DMI_MATCH(DMI_SYS_VENDOR, "alienware"),
65 buf[15] = address; 65 DMI_MATCH(DMI_PRODUCT_NAME, "Aurora m9700")
66 66 }
67 /* Special case larger sensor writes */ 67 },
68 p = buf + 16; 68 { }
69 69};
70 /* Copy a four byte write sequence for each byte to be written to */
71 for (i = 0; i < len; i++) {
72 memcpy(p, sensor_urb_skeleton + 16, 4);
73 p[3] = i2c_data[i];
74 p += 4;
75 PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x",
76 address, i2c_data[i]);
77 }
78
79 /* Copy the tailer */
80 memcpy(p, sensor_urb_skeleton + 20, 4);
81
82 /* Set the total length */
83 p[3] = 0x10 + len;
84
85 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
86 0x04, 0x40, 0x19,
87 0x0000, buf,
88 20 + len * 4, M5602_URB_MSG_TIMEOUT);
89 70
90 return (err < 0) ? err : 0; 71static void ov9650_dump_registers(struct sd *sd);
91}
92 72
93int ov9650_probe(struct sd *sd) 73int ov9650_probe(struct sd *sd)
94{ 74{
@@ -110,16 +90,16 @@ int ov9650_probe(struct sd *sd)
110 for (i = 0; i < ARRAY_SIZE(preinit_ov9650); i++) { 90 for (i = 0; i < ARRAY_SIZE(preinit_ov9650); i++) {
111 u8 data = preinit_ov9650[i][2]; 91 u8 data = preinit_ov9650[i][2];
112 if (preinit_ov9650[i][0] == SENSOR) 92 if (preinit_ov9650[i][0] == SENSOR)
113 ov9650_write_sensor(sd, 93 m5602_write_sensor(sd,
114 preinit_ov9650[i][1], &data, 1); 94 preinit_ov9650[i][1], &data, 1);
115 else 95 else
116 m5602_write_bridge(sd, preinit_ov9650[i][1], data); 96 m5602_write_bridge(sd, preinit_ov9650[i][1], data);
117 } 97 }
118 98
119 if (ov9650_read_sensor(sd, OV9650_PID, &prod_id, 1)) 99 if (m5602_read_sensor(sd, OV9650_PID, &prod_id, 1))
120 return -ENODEV; 100 return -ENODEV;
121 101
122 if (ov9650_read_sensor(sd, OV9650_VER, &ver_id, 1)) 102 if (m5602_read_sensor(sd, OV9650_VER, &ver_id, 1))
123 return -ENODEV; 103 return -ENODEV;
124 104
125 if ((prod_id == 0x96) && (ver_id == 0x52)) { 105 if ((prod_id == 0x96) && (ver_id == 0x52)) {
@@ -148,34 +128,90 @@ int ov9650_init(struct sd *sd)
148 for (i = 0; i < ARRAY_SIZE(init_ov9650) && !err; i++) { 128 for (i = 0; i < ARRAY_SIZE(init_ov9650) && !err; i++) {
149 data = init_ov9650[i][2]; 129 data = init_ov9650[i][2];
150 if (init_ov9650[i][0] == SENSOR) 130 if (init_ov9650[i][0] == SENSOR)
151 err = ov9650_write_sensor(sd, init_ov9650[i][1], 131 err = m5602_write_sensor(sd, init_ov9650[i][1],
152 &data, 1); 132 &data, 1);
153 else 133 else
154 err = m5602_write_bridge(sd, init_ov9650[i][1], data); 134 err = m5602_write_bridge(sd, init_ov9650[i][1], data);
155 } 135 }
156 136
157 if (!err && dmi_check_system(ov9650_flip_dmi_table)) { 137 if (dmi_check_system(ov9650_flip_dmi_table) && !err) {
158 info("vflip quirk active"); 138 info("vflip quirk active");
159 data = 0x30; 139 data = 0x30;
160 err = ov9650_write_sensor(sd, OV9650_MVFP, &data, 1); 140 err = m5602_write_sensor(sd, OV9650_MVFP, &data, 1);
161 } 141 }
142 return err;
143}
144
145int ov9650_start(struct sd *sd)
146{
147 int i, err = 0;
148 struct cam *cam = &sd->gspca_dev.cam;
162 149
163 return (err < 0) ? err : 0; 150 for (i = 0; i < ARRAY_SIZE(res_init_ov9650) && !err; i++) {
151 u8 data = res_init_ov9650[i][1];
152 err = m5602_write_bridge(sd, res_init_ov9650[i][0], data);
153 }
154 if (err < 0)
155 return err;
156
157 switch (cam->cam_mode[sd->gspca_dev.curr_mode].width)
158 {
159 case 640:
160 PDEBUG(D_V4L2, "Configuring camera for VGA mode");
161
162 for (i = 0; i < ARRAY_SIZE(VGA_ov9650) && !err; i++) {
163 u8 data = VGA_ov9650[i][2];
164 if (VGA_ov9650[i][0] == SENSOR)
165 err = m5602_write_sensor(sd,
166 VGA_ov9650[i][1], &data, 1);
167 else
168 err = m5602_write_bridge(sd, VGA_ov9650[i][1], data);
169 }
170 break;
171
172 case 352:
173 PDEBUG(D_V4L2, "Configuring camera for CIF mode");
174
175 for (i = 0; i < ARRAY_SIZE(CIF_ov9650) && !err; i++) {
176 u8 data = CIF_ov9650[i][2];
177 if (CIF_ov9650[i][0] == SENSOR)
178 err = m5602_write_sensor(sd,
179 CIF_ov9650[i][1], &data, 1);
180 else
181 err = m5602_write_bridge(sd, CIF_ov9650[i][1], data);
182 }
183 break;
184
185 case 320:
186 PDEBUG(D_V4L2, "Configuring camera for QVGA mode");
187
188 for (i = 0; i < ARRAY_SIZE(QVGA_ov9650) && !err; i++) {
189 u8 data = QVGA_ov9650[i][2];
190 if (QVGA_ov9650[i][0] == SENSOR)
191 err = m5602_write_sensor(sd,
192 QVGA_ov9650[i][1], &data, 1);
193 else
194 err = m5602_write_bridge(sd, QVGA_ov9650[i][1], data);
195 }
196 break;
197 }
198 return err;
164} 199}
165 200
166int ov9650_power_down(struct sd *sd) 201int ov9650_power_down(struct sd *sd)
167{ 202{
168 int i; 203 int i, err = 0;
169 for (i = 0; i < ARRAY_SIZE(power_down_ov9650); i++) { 204 for (i = 0; i < ARRAY_SIZE(power_down_ov9650) && !err; i++) {
170 u8 data = power_down_ov9650[i][2]; 205 u8 data = power_down_ov9650[i][2];
171 if (power_down_ov9650[i][0] == SENSOR) 206 if (power_down_ov9650[i][0] == SENSOR)
172 ov9650_write_sensor(sd, 207 err = m5602_write_sensor(sd,
173 power_down_ov9650[i][1], &data, 1); 208 power_down_ov9650[i][1], &data, 1);
174 else 209 else
175 m5602_write_bridge(sd, power_down_ov9650[i][1], data); 210 err = m5602_write_bridge(sd, power_down_ov9650[i][1],
211 data);
176 } 212 }
177 213
178 return 0; 214 return err;
179} 215}
180 216
181int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val) 217int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
@@ -184,24 +220,24 @@ int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
184 u8 i2c_data; 220 u8 i2c_data;
185 int err; 221 int err;
186 222
187 err = ov9650_read_sensor(sd, OV9650_COM1, &i2c_data, 1); 223 err = m5602_read_sensor(sd, OV9650_COM1, &i2c_data, 1);
188 if (err < 0) 224 if (err < 0)
189 goto out; 225 goto out;
190 *val = i2c_data & 0x03; 226 *val = i2c_data & 0x03;
191 227
192 err = ov9650_read_sensor(sd, OV9650_AECH, &i2c_data, 1); 228 err = m5602_read_sensor(sd, OV9650_AECH, &i2c_data, 1);
193 if (err < 0) 229 if (err < 0)
194 goto out; 230 goto out;
195 *val |= (i2c_data << 2); 231 *val |= (i2c_data << 2);
196 232
197 err = ov9650_read_sensor(sd, OV9650_AECHM, &i2c_data, 1); 233 err = m5602_read_sensor(sd, OV9650_AECHM, &i2c_data, 1);
198 if (err < 0) 234 if (err < 0)
199 goto out; 235 goto out;
200 *val |= (i2c_data & 0x3f) << 10; 236 *val |= (i2c_data & 0x3f) << 10;
201 237
202 PDEBUG(D_V4L2, "Read exposure %d", *val); 238 PDEBUG(D_V4L2, "Read exposure %d", *val);
203out: 239out:
204 return (err < 0) ? err : 0; 240 return err;
205} 241}
206 242
207int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val) 243int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
@@ -215,24 +251,24 @@ int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
215 251
216 /* The 6 MSBs */ 252 /* The 6 MSBs */
217 i2c_data = (val >> 10) & 0x3f; 253 i2c_data = (val >> 10) & 0x3f;
218 err = ov9650_write_sensor(sd, OV9650_AECHM, 254 err = m5602_write_sensor(sd, OV9650_AECHM,
219 &i2c_data, 1); 255 &i2c_data, 1);
220 if (err < 0) 256 if (err < 0)
221 goto out; 257 goto out;
222 258
223 /* The 8 middle bits */ 259 /* The 8 middle bits */
224 i2c_data = (val >> 2) & 0xff; 260 i2c_data = (val >> 2) & 0xff;
225 err = ov9650_write_sensor(sd, OV9650_AECH, 261 err = m5602_write_sensor(sd, OV9650_AECH,
226 &i2c_data, 1); 262 &i2c_data, 1);
227 if (err < 0) 263 if (err < 0)
228 goto out; 264 goto out;
229 265
230 /* The 2 LSBs */ 266 /* The 2 LSBs */
231 i2c_data = val & 0x03; 267 i2c_data = val & 0x03;
232 err = ov9650_write_sensor(sd, OV9650_COM1, &i2c_data, 1); 268 err = m5602_write_sensor(sd, OV9650_COM1, &i2c_data, 1);
233 269
234out: 270out:
235 return (err < 0) ? err : 0; 271 return err;
236} 272}
237 273
238int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val) 274int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
@@ -241,13 +277,13 @@ int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
241 u8 i2c_data; 277 u8 i2c_data;
242 struct sd *sd = (struct sd *) gspca_dev; 278 struct sd *sd = (struct sd *) gspca_dev;
243 279
244 ov9650_read_sensor(sd, OV9650_VREF, &i2c_data, 1); 280 m5602_read_sensor(sd, OV9650_VREF, &i2c_data, 1);
245 *val = (i2c_data & 0x03) << 8; 281 *val = (i2c_data & 0x03) << 8;
246 282
247 err = ov9650_read_sensor(sd, OV9650_GAIN, &i2c_data, 1); 283 err = m5602_read_sensor(sd, OV9650_GAIN, &i2c_data, 1);
248 *val |= i2c_data; 284 *val |= i2c_data;
249 PDEBUG(D_V4L2, "Read gain %d", *val); 285 PDEBUG(D_V4L2, "Read gain %d", *val);
250 return (err < 0) ? err : 0; 286 return err;
251} 287}
252 288
253int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val) 289int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val)
@@ -259,16 +295,16 @@ int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val)
259 /* The 2 MSB */ 295 /* The 2 MSB */
260 /* Read the OV9650_VREF register first to avoid 296 /* Read the OV9650_VREF register first to avoid
261 corrupting the VREF high and low bits */ 297 corrupting the VREF high and low bits */
262 ov9650_read_sensor(sd, OV9650_VREF, &i2c_data, 1); 298 m5602_read_sensor(sd, OV9650_VREF, &i2c_data, 1);
263 /* Mask away all uninteresting bits */ 299 /* Mask away all uninteresting bits */
264 i2c_data = ((val & 0x0300) >> 2) | 300 i2c_data = ((val & 0x0300) >> 2) |
265 (i2c_data & 0x3F); 301 (i2c_data & 0x3F);
266 err = ov9650_write_sensor(sd, OV9650_VREF, &i2c_data, 1); 302 err = m5602_write_sensor(sd, OV9650_VREF, &i2c_data, 1);
267 303
268 /* The 8 LSBs */ 304 /* The 8 LSBs */
269 i2c_data = val & 0xff; 305 i2c_data = val & 0xff;
270 err = ov9650_write_sensor(sd, OV9650_GAIN, &i2c_data, 1); 306 err = m5602_write_sensor(sd, OV9650_GAIN, &i2c_data, 1);
271 return (err < 0) ? err : 0; 307 return err;
272} 308}
273 309
274int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val) 310int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
@@ -277,12 +313,12 @@ int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
277 u8 i2c_data; 313 u8 i2c_data;
278 struct sd *sd = (struct sd *) gspca_dev; 314 struct sd *sd = (struct sd *) gspca_dev;
279 315
280 err = ov9650_read_sensor(sd, OV9650_RED, &i2c_data, 1); 316 err = m5602_read_sensor(sd, OV9650_RED, &i2c_data, 1);
281 *val = i2c_data; 317 *val = i2c_data;
282 318
283 PDEBUG(D_V4L2, "Read red gain %d", *val); 319 PDEBUG(D_V4L2, "Read red gain %d", *val);
284 320
285 return (err < 0) ? err : 0; 321 return err;
286} 322}
287 323
288int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val) 324int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
@@ -295,9 +331,9 @@ int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
295 val & 0xff); 331 val & 0xff);
296 332
297 i2c_data = val & 0xff; 333 i2c_data = val & 0xff;
298 err = ov9650_write_sensor(sd, OV9650_RED, &i2c_data, 1); 334 err = m5602_write_sensor(sd, OV9650_RED, &i2c_data, 1);
299 335
300 return (err < 0) ? err : 0; 336 return err;
301} 337}
302 338
303int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val) 339int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
@@ -306,12 +342,12 @@ int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
306 u8 i2c_data; 342 u8 i2c_data;
307 struct sd *sd = (struct sd *) gspca_dev; 343 struct sd *sd = (struct sd *) gspca_dev;
308 344
309 err = ov9650_read_sensor(sd, OV9650_BLUE, &i2c_data, 1); 345 err = m5602_read_sensor(sd, OV9650_BLUE, &i2c_data, 1);
310 *val = i2c_data; 346 *val = i2c_data;
311 347
312 PDEBUG(D_V4L2, "Read blue gain %d", *val); 348 PDEBUG(D_V4L2, "Read blue gain %d", *val);
313 349
314 return (err < 0) ? err : 0; 350 return err;
315} 351}
316 352
317int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val) 353int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
@@ -324,9 +360,9 @@ int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
324 val & 0xff); 360 val & 0xff);
325 361
326 i2c_data = val & 0xff; 362 i2c_data = val & 0xff;
327 err = ov9650_write_sensor(sd, OV9650_BLUE, &i2c_data, 1); 363 err = m5602_write_sensor(sd, OV9650_BLUE, &i2c_data, 1);
328 364
329 return (err < 0) ? err : 0; 365 return err;
330} 366}
331 367
332int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val) 368int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
@@ -335,14 +371,14 @@ int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
335 u8 i2c_data; 371 u8 i2c_data;
336 struct sd *sd = (struct sd *) gspca_dev; 372 struct sd *sd = (struct sd *) gspca_dev;
337 373
338 err = ov9650_read_sensor(sd, OV9650_MVFP, &i2c_data, 1); 374 err = m5602_read_sensor(sd, OV9650_MVFP, &i2c_data, 1);
339 if (dmi_check_system(ov9650_flip_dmi_table)) 375 if (dmi_check_system(ov9650_flip_dmi_table))
340 *val = ((i2c_data & OV9650_HFLIP) >> 5) ? 0 : 1; 376 *val = ((i2c_data & OV9650_HFLIP) >> 5) ? 0 : 1;
341 else 377 else
342 *val = (i2c_data & OV9650_HFLIP) >> 5; 378 *val = (i2c_data & OV9650_HFLIP) >> 5;
343 PDEBUG(D_V4L2, "Read horizontal flip %d", *val); 379 PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
344 380
345 return (err < 0) ? err : 0; 381 return err;
346} 382}
347 383
348int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val) 384int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
@@ -352,20 +388,20 @@ int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
352 struct sd *sd = (struct sd *) gspca_dev; 388 struct sd *sd = (struct sd *) gspca_dev;
353 389
354 PDEBUG(D_V4L2, "Set horizontal flip to %d", val); 390 PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
355 err = ov9650_read_sensor(sd, OV9650_MVFP, &i2c_data, 1); 391 err = m5602_read_sensor(sd, OV9650_MVFP, &i2c_data, 1);
356 if (err < 0) 392 if (err < 0)
357 goto out; 393 goto out;
358 394
359 if (dmi_check_system(ov9650_flip_dmi_table)) 395 if (dmi_check_system(ov9650_flip_dmi_table))
360 i2c_data = ((i2c_data & 0xdf) | 396 i2c_data = ((i2c_data & 0xdf) |
361 (((val ? 0 : 1) & 0x01) << 5)); 397 (((val ? 0 : 1) & 0x01) << 5));
362 else 398 else
363 i2c_data = ((i2c_data & 0xdf) | 399 i2c_data = ((i2c_data & 0xdf) |
364 ((val & 0x01) << 5)); 400 ((val & 0x01) << 5));
365 401
366 err = ov9650_write_sensor(sd, OV9650_MVFP, &i2c_data, 1); 402 err = m5602_write_sensor(sd, OV9650_MVFP, &i2c_data, 1);
367out: 403out:
368 return (err < 0) ? err : 0; 404 return err;
369} 405}
370 406
371int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val) 407int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
@@ -374,14 +410,14 @@ int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
374 u8 i2c_data; 410 u8 i2c_data;
375 struct sd *sd = (struct sd *) gspca_dev; 411 struct sd *sd = (struct sd *) gspca_dev;
376 412
377 err = ov9650_read_sensor(sd, OV9650_MVFP, &i2c_data, 1); 413 err = m5602_read_sensor(sd, OV9650_MVFP, &i2c_data, 1);
378 if (dmi_check_system(ov9650_flip_dmi_table)) 414 if (dmi_check_system(ov9650_flip_dmi_table))
379 *val = ((i2c_data & 0x10) >> 4) ? 0 : 1; 415 *val = ((i2c_data & 0x10) >> 4) ? 0 : 1;
380 else 416 else
381 *val = (i2c_data & 0x10) >> 4; 417 *val = (i2c_data & 0x10) >> 4;
382 PDEBUG(D_V4L2, "Read vertical flip %d", *val); 418 PDEBUG(D_V4L2, "Read vertical flip %d", *val);
383 419
384 return (err < 0) ? err : 0; 420 return err;
385} 421}
386 422
387int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val) 423int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
@@ -391,7 +427,7 @@ int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
391 struct sd *sd = (struct sd *) gspca_dev; 427 struct sd *sd = (struct sd *) gspca_dev;
392 428
393 PDEBUG(D_V4L2, "Set vertical flip to %d", val); 429 PDEBUG(D_V4L2, "Set vertical flip to %d", val);
394 err = ov9650_read_sensor(sd, OV9650_MVFP, &i2c_data, 1); 430 err = m5602_read_sensor(sd, OV9650_MVFP, &i2c_data, 1);
395 if (err < 0) 431 if (err < 0)
396 goto out; 432 goto out;
397 433
@@ -402,9 +438,9 @@ int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
402 i2c_data = ((i2c_data & 0xef) | 438 i2c_data = ((i2c_data & 0xef) |
403 ((val & 0x01) << 4)); 439 ((val & 0x01) << 4));
404 440
405 err = ov9650_write_sensor(sd, OV9650_MVFP, &i2c_data, 1); 441 err = m5602_write_sensor(sd, OV9650_MVFP, &i2c_data, 1);
406out: 442out:
407 return (err < 0) ? err : 0; 443 return err;
408} 444}
409 445
410int ov9650_get_brightness(struct gspca_dev *gspca_dev, __s32 *val) 446int ov9650_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
@@ -413,16 +449,16 @@ int ov9650_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
413 u8 i2c_data; 449 u8 i2c_data;
414 struct sd *sd = (struct sd *) gspca_dev; 450 struct sd *sd = (struct sd *) gspca_dev;
415 451
416 err = ov9650_read_sensor(sd, OV9650_VREF, &i2c_data, 1); 452 err = m5602_read_sensor(sd, OV9650_VREF, &i2c_data, 1);
417 if (err < 0) 453 if (err < 0)
418 goto out; 454 goto out;
419 *val = (i2c_data & 0x03) << 8; 455 *val = (i2c_data & 0x03) << 8;
420 456
421 err = ov9650_read_sensor(sd, OV9650_GAIN, &i2c_data, 1); 457 err = m5602_read_sensor(sd, OV9650_GAIN, &i2c_data, 1);
422 *val |= i2c_data; 458 *val |= i2c_data;
423 PDEBUG(D_V4L2, "Read gain %d", *val); 459 PDEBUG(D_V4L2, "Read gain %d", *val);
424out: 460out:
425 return (err < 0) ? err : 0; 461 return err;
426} 462}
427 463
428int ov9650_set_brightness(struct gspca_dev *gspca_dev, __s32 val) 464int ov9650_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
@@ -435,22 +471,22 @@ int ov9650_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
435 471
436 /* Read the OV9650_VREF register first to avoid 472 /* Read the OV9650_VREF register first to avoid
437 corrupting the VREF high and low bits */ 473 corrupting the VREF high and low bits */
438 err = ov9650_read_sensor(sd, OV9650_VREF, &i2c_data, 1); 474 err = m5602_read_sensor(sd, OV9650_VREF, &i2c_data, 1);
439 if (err < 0) 475 if (err < 0)
440 goto out; 476 goto out;
441 477
442 /* Mask away all uninteresting bits */ 478 /* Mask away all uninteresting bits */
443 i2c_data = ((val & 0x0300) >> 2) | (i2c_data & 0x3F); 479 i2c_data = ((val & 0x0300) >> 2) | (i2c_data & 0x3F);
444 err = ov9650_write_sensor(sd, OV9650_VREF, &i2c_data, 1); 480 err = m5602_write_sensor(sd, OV9650_VREF, &i2c_data, 1);
445 if (err < 0) 481 if (err < 0)
446 goto out; 482 goto out;
447 483
448 /* The 8 LSBs */ 484 /* The 8 LSBs */
449 i2c_data = val & 0xff; 485 i2c_data = val & 0xff;
450 err = ov9650_write_sensor(sd, OV9650_GAIN, &i2c_data, 1); 486 err = m5602_write_sensor(sd, OV9650_GAIN, &i2c_data, 1);
451 487
452out: 488out:
453 return (err < 0) ? err : 0; 489 return err;
454} 490}
455 491
456int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev, __s32 *val) 492int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev, __s32 *val)
@@ -459,11 +495,11 @@ int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev, __s32 *val)
459 u8 i2c_data; 495 u8 i2c_data;
460 struct sd *sd = (struct sd *) gspca_dev; 496 struct sd *sd = (struct sd *) gspca_dev;
461 497
462 err = ov9650_read_sensor(sd, OV9650_COM8, &i2c_data, 1); 498 err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
463 *val = (i2c_data & OV9650_AWB_EN) >> 1; 499 *val = (i2c_data & OV9650_AWB_EN) >> 1;
464 PDEBUG(D_V4L2, "Read auto white balance %d", *val); 500 PDEBUG(D_V4L2, "Read auto white balance %d", *val);
465 501
466 return (err < 0) ? err : 0; 502 return err;
467} 503}
468 504
469int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val) 505int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val)
@@ -473,14 +509,14 @@ int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val)
473 struct sd *sd = (struct sd *) gspca_dev; 509 struct sd *sd = (struct sd *) gspca_dev;
474 510
475 PDEBUG(D_V4L2, "Set auto white balance to %d", val); 511 PDEBUG(D_V4L2, "Set auto white balance to %d", val);
476 err = ov9650_read_sensor(sd, OV9650_COM8, &i2c_data, 1); 512 err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
477 if (err < 0) 513 if (err < 0)
478 goto out; 514 goto out;
479 515
480 i2c_data = ((i2c_data & 0xfd) | ((val & 0x01) << 1)); 516 i2c_data = ((i2c_data & 0xfd) | ((val & 0x01) << 1));
481 err = ov9650_write_sensor(sd, OV9650_COM8, &i2c_data, 1); 517 err = m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1);
482out: 518out:
483 return (err < 0) ? err : 0; 519 return err;
484} 520}
485 521
486int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val) 522int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val)
@@ -489,11 +525,11 @@ int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val)
489 u8 i2c_data; 525 u8 i2c_data;
490 struct sd *sd = (struct sd *) gspca_dev; 526 struct sd *sd = (struct sd *) gspca_dev;
491 527
492 err = ov9650_read_sensor(sd, OV9650_COM8, &i2c_data, 1); 528 err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
493 *val = (i2c_data & OV9650_AGC_EN) >> 2; 529 *val = (i2c_data & OV9650_AGC_EN) >> 2;
494 PDEBUG(D_V4L2, "Read auto gain control %d", *val); 530 PDEBUG(D_V4L2, "Read auto gain control %d", *val);
495 531
496 return (err < 0) ? err : 0; 532 return err;
497} 533}
498 534
499int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val) 535int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val)
@@ -503,23 +539,23 @@ int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val)
503 struct sd *sd = (struct sd *) gspca_dev; 539 struct sd *sd = (struct sd *) gspca_dev;
504 540
505 PDEBUG(D_V4L2, "Set auto gain control to %d", val); 541 PDEBUG(D_V4L2, "Set auto gain control to %d", val);
506 err = ov9650_read_sensor(sd, OV9650_COM8, &i2c_data, 1); 542 err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
507 if (err < 0) 543 if (err < 0)
508 goto out; 544 goto out;
509 545
510 i2c_data = ((i2c_data & 0xfb) | ((val & 0x01) << 2)); 546 i2c_data = ((i2c_data & 0xfb) | ((val & 0x01) << 2));
511 err = ov9650_write_sensor(sd, OV9650_COM8, &i2c_data, 1); 547 err = m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1);
512out: 548out:
513 return (err < 0) ? err : 0; 549 return err;
514} 550}
515 551
516void ov9650_dump_registers(struct sd *sd) 552static void ov9650_dump_registers(struct sd *sd)
517{ 553{
518 int address; 554 int address;
519 info("Dumping the ov9650 register state"); 555 info("Dumping the ov9650 register state");
520 for (address = 0; address < 0xa9; address++) { 556 for (address = 0; address < 0xa9; address++) {
521 u8 value; 557 u8 value;
522 ov9650_read_sensor(sd, address, &value, 1); 558 m5602_read_sensor(sd, address, &value, 1);
523 info("register 0x%x contains 0x%x", 559 info("register 0x%x contains 0x%x",
524 address, value); 560 address, value);
525 } 561 }
@@ -531,9 +567,9 @@ void ov9650_dump_registers(struct sd *sd)
531 u8 old_value, ctrl_value; 567 u8 old_value, ctrl_value;
532 u8 test_value[2] = {0xff, 0xff}; 568 u8 test_value[2] = {0xff, 0xff};
533 569
534 ov9650_read_sensor(sd, address, &old_value, 1); 570 m5602_read_sensor(sd, address, &old_value, 1);
535 ov9650_write_sensor(sd, address, test_value, 1); 571 m5602_write_sensor(sd, address, test_value, 1);
536 ov9650_read_sensor(sd, address, &ctrl_value, 1); 572 m5602_read_sensor(sd, address, &ctrl_value, 1);
537 573
538 if (ctrl_value == test_value[0]) 574 if (ctrl_value == test_value[0])
539 info("register 0x%x is writeable", address); 575 info("register 0x%x is writeable", address);
@@ -541,6 +577,6 @@ void ov9650_dump_registers(struct sd *sd)
541 info("register 0x%x is read only", address); 577 info("register 0x%x is read only", address);
542 578
543 /* Restore original value */ 579 /* Restore original value */
544 ov9650_write_sensor(sd, address, &old_value, 1); 580 m5602_write_sensor(sd, address, &old_value, 1);
545 } 581 }
546} 582}
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.h b/drivers/media/video/gspca/m5602/m5602_ov9650.h
index 065632f0378e..f4b33b8e8dae 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.h
@@ -20,7 +20,6 @@
20#define M5602_OV9650_H_ 20#define M5602_OV9650_H_
21 21
22#include <linux/dmi.h> 22#include <linux/dmi.h>
23
24#include "m5602_sensor.h" 23#include "m5602_sensor.h"
25 24
26/*****************************************************************************/ 25/*****************************************************************************/
@@ -36,6 +35,7 @@
36#define OV9650_PID 0x0a 35#define OV9650_PID 0x0a
37#define OV9650_VER 0x0b 36#define OV9650_VER 0x0b
38#define OV9650_COM3 0x0c 37#define OV9650_COM3 0x0c
38#define OV9650_COM4 0x0d
39#define OV9650_COM5 0x0e 39#define OV9650_COM5 0x0e
40#define OV9650_COM6 0x0f 40#define OV9650_COM6 0x0f
41#define OV9650_AECH 0x10 41#define OV9650_AECH 0x10
@@ -94,6 +94,8 @@
94 94
95#define OV9650_REGISTER_RESET (1 << 7) 95#define OV9650_REGISTER_RESET (1 << 7)
96#define OV9650_VGA_SELECT (1 << 6) 96#define OV9650_VGA_SELECT (1 << 6)
97#define OV9650_CIF_SELECT (1 << 5)
98#define OV9650_QVGA_SELECT (1 << 4)
97#define OV9650_RGB_SELECT (1 << 2) 99#define OV9650_RGB_SELECT (1 << 2)
98#define OV9650_RAW_RGB_SELECT (1 << 0) 100#define OV9650_RAW_RGB_SELECT (1 << 0)
99 101
@@ -108,6 +110,8 @@
108#define OV9650_SYSTEM_CLK_SEL (1 << 7) 110#define OV9650_SYSTEM_CLK_SEL (1 << 7)
109#define OV9650_SLAM_MODE (1 << 4) 111#define OV9650_SLAM_MODE (1 << 4)
110 112
113#define OV9650_QVGA_VARIOPIXEL (1 << 7)
114
111#define OV9650_VFLIP (1 << 4) 115#define OV9650_VFLIP (1 << 4)
112#define OV9650_HFLIP (1 << 5) 116#define OV9650_HFLIP (1 << 5)
113 117
@@ -124,15 +128,9 @@ extern int dump_sensor;
124 128
125int ov9650_probe(struct sd *sd); 129int ov9650_probe(struct sd *sd);
126int ov9650_init(struct sd *sd); 130int ov9650_init(struct sd *sd);
131int ov9650_start(struct sd *sd);
127int ov9650_power_down(struct sd *sd); 132int ov9650_power_down(struct sd *sd);
128 133
129int ov9650_read_sensor(struct sd *sd, const u8 address,
130 u8 *i2c_data, const u8 len);
131int ov9650_write_sensor(struct sd *sd, const u8 address,
132 u8 *i2c_data, const u8 len);
133
134void ov9650_dump_registers(struct sd *sd);
135
136int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val); 134int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
137int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val); 135int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
138int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val); 136int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
@@ -155,11 +153,11 @@ int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val);
155static struct m5602_sensor ov9650 = { 153static struct m5602_sensor ov9650 = {
156 .name = "OV9650", 154 .name = "OV9650",
157 .i2c_slave_id = 0x60, 155 .i2c_slave_id = 0x60,
156 .i2c_regW = 1,
158 .probe = ov9650_probe, 157 .probe = ov9650_probe,
159 .init = ov9650_init, 158 .init = ov9650_init,
159 .start = ov9650_start,
160 .power_down = ov9650_power_down, 160 .power_down = ov9650_power_down,
161 .read_sensor = ov9650_read_sensor,
162 .write_sensor = ov9650_write_sensor,
163 161
164 .nctrls = 8, 162 .nctrls = 8,
165 .ctrls = { 163 .ctrls = {
@@ -264,18 +262,38 @@ static struct m5602_sensor ov9650 = {
264 } 262 }
265 }, 263 },
266 264
267 .nmodes = 1, 265 .nmodes = 3,
268 .modes = { 266 .modes = {
269 { 267 {
270 M5602_DEFAULT_FRAME_WIDTH, 268 320,
271 M5602_DEFAULT_FRAME_HEIGHT, 269 240,
270 V4L2_PIX_FMT_SBGGR8,
271 V4L2_FIELD_NONE,
272 .sizeimage =
273 320 * 240,
274 .bytesperline = 320,
275 .colorspace = V4L2_COLORSPACE_SRGB,
276 .priv = 0
277 }, {
278 352,
279 288,
280 V4L2_PIX_FMT_SBGGR8,
281 V4L2_FIELD_NONE,
282 .sizeimage =
283 352 * 288,
284 .bytesperline = 352,
285 .colorspace = V4L2_COLORSPACE_SRGB,
286 .priv = 0
287 }, {
288 640,
289 480,
272 V4L2_PIX_FMT_SBGGR8, 290 V4L2_PIX_FMT_SBGGR8,
273 V4L2_FIELD_NONE, 291 V4L2_FIELD_NONE,
274 .sizeimage = 292 .sizeimage =
275 M5602_DEFAULT_FRAME_WIDTH * M5602_DEFAULT_FRAME_HEIGHT, 293 640 * 480,
276 .bytesperline = M5602_DEFAULT_FRAME_WIDTH, 294 .bytesperline = 640,
277 .colorspace = V4L2_COLORSPACE_SRGB, 295 .colorspace = V4L2_COLORSPACE_SRGB,
278 .priv = 1 296 .priv = 0
279 } 297 }
280 } 298 }
281}; 299};
@@ -324,6 +342,7 @@ static const unsigned char init_ov9650[][3] =
324 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00}, 342 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
325 {BRIDGE, M5602_XB_GPIO_DAT, 0x00}, 343 {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
326 {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a}, 344 {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a},
345
327 /* Reset chip */ 346 /* Reset chip */
328 {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET}, 347 {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
329 /* Enable double clock */ 348 /* Enable double clock */
@@ -331,8 +350,6 @@ static const unsigned char init_ov9650[][3] =
331 /* Do something out of spec with the power */ 350 /* Do something out of spec with the power */
332 {SENSOR, OV9650_OFON, 0x40}, 351 {SENSOR, OV9650_OFON, 0x40},
333 352
334 /* Set QQVGA */
335 {SENSOR, OV9650_COM1, 0x20},
336 /* Set fast AGC/AEC algorithm with unlimited step size */ 353 /* Set fast AGC/AEC algorithm with unlimited step size */
337 {SENSOR, OV9650_COM8, OV9650_FAST_AGC_AEC | 354 {SENSOR, OV9650_COM8, OV9650_FAST_AGC_AEC |
338 OV9650_AEC_UNLIM_STEP_SIZE | 355 OV9650_AEC_UNLIM_STEP_SIZE |
@@ -343,7 +360,7 @@ static const unsigned char init_ov9650[][3] =
343 {SENSOR, OV9650_ACOM38, 0x81}, 360 {SENSOR, OV9650_ACOM38, 0x81},
344 /* Turn off color matrix coefficient double option */ 361 /* Turn off color matrix coefficient double option */
345 {SENSOR, OV9650_COM16, 0x00}, 362 {SENSOR, OV9650_COM16, 0x00},
346 /* Enable color matrix for RGB/YUV, Delay Y channel, 363 /* Enable color matrix for RGB/YUV, Delay Y channel,
347 set output Y/UV delay to 1 */ 364 set output Y/UV delay to 1 */
348 {SENSOR, OV9650_COM13, 0x19}, 365 {SENSOR, OV9650_COM13, 0x19},
349 /* Enable digital BLC, Set output mode to U Y V Y */ 366 /* Enable digital BLC, Set output mode to U Y V Y */
@@ -352,7 +369,7 @@ static const unsigned char init_ov9650[][3] =
352 {SENSOR, OV9650_COM24, 0x00}, 369 {SENSOR, OV9650_COM24, 0x00},
353 /* Enable HREF and some out of spec things */ 370 /* Enable HREF and some out of spec things */
354 {SENSOR, OV9650_COM12, 0x73}, 371 {SENSOR, OV9650_COM12, 0x73},
355 /* Set all DBLC offset signs to positive and 372 /* Set all DBLC offset signs to positive and
356 do some out of spec stuff */ 373 do some out of spec stuff */
357 {SENSOR, OV9650_DBLC1, 0xdf}, 374 {SENSOR, OV9650_DBLC1, 0xdf},
358 {SENSOR, OV9650_COM21, 0x06}, 375 {SENSOR, OV9650_COM21, 0x06},
@@ -364,7 +381,7 @@ static const unsigned char init_ov9650[][3] =
364 {SENSOR, OV9650_RSVD96, 0x04}, 381 {SENSOR, OV9650_RSVD96, 0x04},
365 /* Enable full range output */ 382 /* Enable full range output */
366 {SENSOR, OV9650_COM15, 0x0}, 383 {SENSOR, OV9650_COM15, 0x0},
367 /* Enable HREF at optical black, enable ADBLC bias, 384 /* Enable HREF at optical black, enable ADBLC bias,
368 enable ADBLC, reset timings at format change */ 385 enable ADBLC, reset timings at format change */
369 {SENSOR, OV9650_COM6, 0x4b}, 386 {SENSOR, OV9650_COM6, 0x4b},
370 /* Subtract 32 from the B channel bias */ 387 /* Subtract 32 from the B channel bias */
@@ -385,7 +402,7 @@ static const unsigned char init_ov9650[][3] =
385 {SENSOR, OV9650_AEB, 0x5c}, 402 {SENSOR, OV9650_AEB, 0x5c},
386 /* Set the high and low limit nibbles to 3 */ 403 /* Set the high and low limit nibbles to 3 */
387 {SENSOR, OV9650_VPT, 0xc3}, 404 {SENSOR, OV9650_VPT, 0xc3},
388 /* Set the Automatic Gain Ceiling (AGC) to 128x, 405 /* Set the Automatic Gain Ceiling (AGC) to 128x,
389 drop VSYNC at frame drop, 406 drop VSYNC at frame drop,
390 limit exposure timing, 407 limit exposure timing,
391 drop frame when the AEC step is larger than the exposure gap */ 408 drop frame when the AEC step is larger than the exposure gap */
@@ -394,9 +411,9 @@ static const unsigned char init_ov9650[][3] =
394 and set PWDN to SLVS (slave mode vertical sync) */ 411 and set PWDN to SLVS (slave mode vertical sync) */
395 {SENSOR, OV9650_COM10, 0x42}, 412 {SENSOR, OV9650_COM10, 0x42},
396 /* Set horizontal column start high to default value */ 413 /* Set horizontal column start high to default value */
397 {SENSOR, OV9650_HSTART, 0x1a}, 414 {SENSOR, OV9650_HSTART, 0x1a}, /* 210 */
398 /* Set horizontal column end */ 415 /* Set horizontal column end */
399 {SENSOR, OV9650_HSTOP, 0xbf}, 416 {SENSOR, OV9650_HSTOP, 0xbf}, /* 1534 */
400 /* Complementing register to the two writes above */ 417 /* Complementing register to the two writes above */
401 {SENSOR, OV9650_HREF, 0xb2}, 418 {SENSOR, OV9650_HREF, 0xb2},
402 /* Set vertical row start high bits */ 419 /* Set vertical row start high bits */
@@ -405,10 +422,6 @@ static const unsigned char init_ov9650[][3] =
405 {SENSOR, OV9650_VSTOP, 0x7e}, 422 {SENSOR, OV9650_VSTOP, 0x7e},
406 /* Set complementing vertical frame control */ 423 /* Set complementing vertical frame control */
407 {SENSOR, OV9650_VREF, 0x10}, 424 {SENSOR, OV9650_VREF, 0x10},
408 /* Set raw RGB output format with VGA resolution */
409 {SENSOR, OV9650_COM7, OV9650_VGA_SELECT |
410 OV9650_RGB_SELECT |
411 OV9650_RAW_RGB_SELECT},
412 {SENSOR, OV9650_ADC, 0x04}, 425 {SENSOR, OV9650_ADC, 0x04},
413 {SENSOR, OV9650_HV, 0x40}, 426 {SENSOR, OV9650_HV, 0x40},
414 /* Enable denoise, and white-pixel erase */ 427 /* Enable denoise, and white-pixel erase */
@@ -417,30 +430,15 @@ static const unsigned char init_ov9650[][3] =
417 /* Set the high bits of the exposure value */ 430 /* Set the high bits of the exposure value */
418 {SENSOR, OV9650_AECH, ((EXPOSURE_DEFAULT & 0xff00) >> 8)}, 431 {SENSOR, OV9650_AECH, ((EXPOSURE_DEFAULT & 0xff00) >> 8)},
419 432
433 /* Enable VARIOPIXEL */
434 {SENSOR, OV9650_COM3, OV9650_VARIOPIXEL},
435 {SENSOR, OV9650_COM4, OV9650_QVGA_VARIOPIXEL},
436
420 /* Set the low bits of the exposure value */ 437 /* Set the low bits of the exposure value */
421 {SENSOR, OV9650_COM1, (EXPOSURE_DEFAULT & 0xff)}, 438 {SENSOR, OV9650_COM1, (EXPOSURE_DEFAULT & 0xff)},
422 {SENSOR, OV9650_GAIN, GAIN_DEFAULT}, 439 {SENSOR, OV9650_GAIN, GAIN_DEFAULT},
423 {SENSOR, OV9650_BLUE, BLUE_GAIN_DEFAULT}, 440 {SENSOR, OV9650_BLUE, BLUE_GAIN_DEFAULT},
424 {SENSOR, OV9650_RED, RED_GAIN_DEFAULT}, 441 {SENSOR, OV9650_RED, RED_GAIN_DEFAULT},
425
426 {SENSOR, OV9650_COM3, OV9650_VARIOPIXEL},
427 {SENSOR, OV9650_COM5, OV9650_SYSTEM_CLK_SEL},
428
429 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x82},
430 {BRIDGE, M5602_XB_LINE_OF_FRAME_L, 0x00},
431 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
432 {BRIDGE, M5602_XB_PIX_OF_LINE_L, 0x00},
433 {BRIDGE, M5602_XB_SIG_INI, 0x01},
434 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
435 {BRIDGE, M5602_XB_VSYNC_PARA, 0x09},
436 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
437 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
438 {BRIDGE, M5602_XB_VSYNC_PARA, 0xe0},
439 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
440 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
441 {BRIDGE, M5602_XB_HSYNC_PARA, 0x5e},
442 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02},
443 {BRIDGE, M5602_XB_HSYNC_PARA, 0xde}
444}; 442};
445 443
446static const unsigned char power_down_ov9650[][3] = 444static const unsigned char power_down_ov9650[][3] =
@@ -460,43 +458,76 @@ static const unsigned char power_down_ov9650[][3] =
460 {BRIDGE, M5602_XB_GPIO_EN_L, 0x06}, 458 {BRIDGE, M5602_XB_GPIO_EN_L, 0x06},
461 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02}, 459 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
462 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04}, 460 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
463 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0} 461 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
464}; 462};
465 463
466/* Vertically and horizontally flips the image if matched, needed for machines 464static const unsigned char res_init_ov9650[][2] =
467 where the sensor is mounted upside down */ 465{
468static 466 {M5602_XB_LINE_OF_FRAME_H, 0x82},
469 const 467 {M5602_XB_LINE_OF_FRAME_L, 0x00},
470 struct dmi_system_id ov9650_flip_dmi_table[] = { 468 {M5602_XB_PIX_OF_LINE_H, 0x82},
471 { 469 {M5602_XB_PIX_OF_LINE_L, 0x00},
472 .ident = "ASUS A6VC", 470 {M5602_XB_SIG_INI, 0x01}
473 .matches = { 471};
474 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 472
475 DMI_MATCH(DMI_PRODUCT_NAME, "A6VC") 473static const unsigned char VGA_ov9650[][3] =
476 } 474{
477 }, 475 /* Moves the view window in a vertical orientation */
478 { 476 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
479 .ident = "ASUS A6VM", 477 {BRIDGE, M5602_XB_VSYNC_PARA, 0x09},
480 .matches = { 478 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
481 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 479 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
482 DMI_MATCH(DMI_PRODUCT_NAME, "A6VM") 480 {BRIDGE, M5602_XB_VSYNC_PARA, 0xe0}, /* 480 */
483 } 481 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
484 }, 482 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
485 { 483 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
486 .ident = "ASUS A6JC", 484 {BRIDGE, M5602_XB_HSYNC_PARA, 0x62}, /* 98 */
487 .matches = { 485 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02}, /* 640 + 98 */
488 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 486 {BRIDGE, M5602_XB_HSYNC_PARA, 0xe2},
489 DMI_MATCH(DMI_PRODUCT_NAME, "A6JC") 487
490 } 488 {SENSOR, OV9650_COM7, OV9650_VGA_SELECT |
491 }, 489 OV9650_RGB_SELECT |
492 { 490 OV9650_RAW_RGB_SELECT},
493 .ident = "ASUS A6Kt", 491};
494 .matches = { 492
495 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 493static const unsigned char CIF_ov9650[][3] =
496 DMI_MATCH(DMI_PRODUCT_NAME, "A6Kt") 494{
497 } 495 /* Moves the view window in a vertical orientation */
498 }, 496 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
499 { } 497 {BRIDGE, M5602_XB_VSYNC_PARA, 0x09},
498 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
499 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
500 {BRIDGE, M5602_XB_VSYNC_PARA, 0x20}, /* 288 */
501 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
502 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
503 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
504 {BRIDGE, M5602_XB_HSYNC_PARA, 0x62}, /* 98 */
505 {BRIDGE, M5602_XB_HSYNC_PARA, 0x01}, /* 352 + 98 */
506 {BRIDGE, M5602_XB_HSYNC_PARA, 0xc2},
507
508 {SENSOR, OV9650_COM7, OV9650_CIF_SELECT |
509 OV9650_RGB_SELECT |
510 OV9650_RAW_RGB_SELECT},
511};
512
513static const unsigned char QVGA_ov9650[][3] =
514{
515 /* Moves the view window in a vertical orientation */
516 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
517 {BRIDGE, M5602_XB_VSYNC_PARA, 0x08},
518 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
519 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
520 {BRIDGE, M5602_XB_VSYNC_PARA, 0xf0}, /* 240 */
521 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
522 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
523 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
524 {BRIDGE, M5602_XB_HSYNC_PARA, 0x31}, /* 50 */
525 {BRIDGE, M5602_XB_HSYNC_PARA, 0x01}, /* 320 + 50 */
526 {BRIDGE, M5602_XB_HSYNC_PARA, 0x71},
527
528 {SENSOR, OV9650_COM7, OV9650_QVGA_SELECT |
529 OV9650_RGB_SELECT |
530 OV9650_RAW_RGB_SELECT},
500}; 531};
501 532
502#endif 533#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.c b/drivers/media/video/gspca/m5602/m5602_po1030.c
index d17ac52566e6..2e7fb91673cf 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.c
@@ -18,6 +18,8 @@
18 18
19#include "m5602_po1030.h" 19#include "m5602_po1030.h"
20 20
21static void po1030_dump_registers(struct sd *sd);
22
21int po1030_probe(struct sd *sd) 23int po1030_probe(struct sd *sd)
22{ 24{
23 u8 prod_id = 0, ver_id = 0, i; 25 u8 prod_id = 0, ver_id = 0, i;
@@ -38,16 +40,16 @@ int po1030_probe(struct sd *sd)
38 for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) { 40 for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) {
39 u8 data = preinit_po1030[i][2]; 41 u8 data = preinit_po1030[i][2];
40 if (preinit_po1030[i][0] == SENSOR) 42 if (preinit_po1030[i][0] == SENSOR)
41 po1030_write_sensor(sd, 43 m5602_write_sensor(sd,
42 preinit_po1030[i][1], &data, 1); 44 preinit_po1030[i][1], &data, 1);
43 else 45 else
44 m5602_write_bridge(sd, preinit_po1030[i][1], data); 46 m5602_write_bridge(sd, preinit_po1030[i][1], data);
45 } 47 }
46 48
47 if (po1030_read_sensor(sd, 0x3, &prod_id, 1)) 49 if (m5602_read_sensor(sd, 0x3, &prod_id, 1))
48 return -ENODEV; 50 return -ENODEV;
49 51
50 if (po1030_read_sensor(sd, 0x4, &ver_id, 1)) 52 if (m5602_read_sensor(sd, 0x4, &ver_id, 1))
51 return -ENODEV; 53 return -ENODEV;
52 54
53 if ((prod_id == 0x02) && (ver_id == 0xef)) { 55 if ((prod_id == 0x02) && (ver_id == 0xef)) {
@@ -64,78 +66,12 @@ sensor_found:
64 return 0; 66 return 0;
65} 67}
66 68
67int po1030_read_sensor(struct sd *sd, const u8 address,
68 u8 *i2c_data, const u8 len)
69{
70 int err, i;
71
72 do {
73 err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, i2c_data);
74 } while ((*i2c_data & I2C_BUSY) && !err);
75
76 m5602_write_bridge(sd, M5602_XB_I2C_DEV_ADDR,
77 sd->sensor->i2c_slave_id);
78 m5602_write_bridge(sd, M5602_XB_I2C_REG_ADDR, address);
79 m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x10 + len);
80 m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x08);
81
82 for (i = 0; i < len; i++) {
83 err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
84
85 PDEBUG(D_CONF, "Reading sensor register "
86 "0x%x containing 0x%x ", address, *i2c_data);
87 }
88 return (err < 0) ? err : 0;
89}
90
91int po1030_write_sensor(struct sd *sd, const u8 address,
92 u8 *i2c_data, const u8 len)
93{
94 int err, i;
95 u8 *p;
96 struct usb_device *udev = sd->gspca_dev.dev;
97 __u8 *buf = sd->gspca_dev.usb_buf;
98
99 /* The po1030 only supports one byte writes */
100 if (len > 1 || !len)
101 return -EINVAL;
102
103 memcpy(buf, sensor_urb_skeleton, sizeof(sensor_urb_skeleton));
104
105 buf[11] = sd->sensor->i2c_slave_id;
106 buf[15] = address;
107
108 p = buf + 16;
109
110 /* Copy a four byte write sequence for each byte to be written to */
111 for (i = 0; i < len; i++) {
112 memcpy(p, sensor_urb_skeleton + 16, 4);
113 p[3] = i2c_data[i];
114 p += 4;
115 PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x",
116 address, i2c_data[i]);
117 }
118
119 /* Copy the footer */
120 memcpy(p, sensor_urb_skeleton + 20, 4);
121
122 /* Set the total length */
123 p[3] = 0x10 + len;
124
125 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
126 0x04, 0x40, 0x19,
127 0x0000, buf,
128 20 + len * 4, M5602_URB_MSG_TIMEOUT);
129
130 return (err < 0) ? err : 0;
131}
132
133int po1030_init(struct sd *sd) 69int po1030_init(struct sd *sd)
134{ 70{
135 int i, err = 0; 71 int i, err = 0;
136 72
137 /* Init the sensor */ 73 /* Init the sensor */
138 for (i = 0; i < ARRAY_SIZE(init_po1030); i++) { 74 for (i = 0; i < ARRAY_SIZE(init_po1030) && !err; i++) {
139 u8 data[2] = {0x00, 0x00}; 75 u8 data[2] = {0x00, 0x00};
140 76
141 switch (init_po1030[i][0]) { 77 switch (init_po1030[i][0]) {
@@ -147,16 +83,10 @@ int po1030_init(struct sd *sd)
147 83
148 case SENSOR: 84 case SENSOR:
149 data[0] = init_po1030[i][2]; 85 data[0] = init_po1030[i][2];
150 err = po1030_write_sensor(sd, 86 err = m5602_write_sensor(sd,
151 init_po1030[i][1], data, 1); 87 init_po1030[i][1], data, 1);
152 break; 88 break;
153 89
154 case SENSOR_LONG:
155 data[0] = init_po1030[i][2];
156 data[1] = init_po1030[i][3];
157 err = po1030_write_sensor(sd,
158 init_po1030[i][1], data, 2);
159 break;
160 default: 90 default:
161 info("Invalid stream command, exiting init"); 91 info("Invalid stream command, exiting init");
162 return -EINVAL; 92 return -EINVAL;
@@ -166,7 +96,7 @@ int po1030_init(struct sd *sd)
166 if (dump_sensor) 96 if (dump_sensor)
167 po1030_dump_registers(sd); 97 po1030_dump_registers(sd);
168 98
169 return (err < 0) ? err : 0; 99 return err;
170} 100}
171 101
172int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val) 102int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
@@ -175,19 +105,19 @@ int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
175 u8 i2c_data; 105 u8 i2c_data;
176 int err; 106 int err;
177 107
178 err = po1030_read_sensor(sd, PO1030_REG_INTEGLINES_H, 108 err = m5602_read_sensor(sd, PO1030_REG_INTEGLINES_H,
179 &i2c_data, 1); 109 &i2c_data, 1);
180 if (err < 0) 110 if (err < 0)
181 goto out; 111 goto out;
182 *val = (i2c_data << 8); 112 *val = (i2c_data << 8);
183 113
184 err = po1030_read_sensor(sd, PO1030_REG_INTEGLINES_M, 114 err = m5602_read_sensor(sd, PO1030_REG_INTEGLINES_M,
185 &i2c_data, 1); 115 &i2c_data, 1);
186 *val |= i2c_data; 116 *val |= i2c_data;
187 117
188 PDEBUG(D_V4L2, "Exposure read as %d", *val); 118 PDEBUG(D_V4L2, "Exposure read as %d", *val);
189out: 119out:
190 return (err < 0) ? err : 0; 120 return err;
191} 121}
192 122
193int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val) 123int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
@@ -202,7 +132,7 @@ int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
202 PDEBUG(D_V4L2, "Set exposure to high byte to 0x%x", 132 PDEBUG(D_V4L2, "Set exposure to high byte to 0x%x",
203 i2c_data); 133 i2c_data);
204 134
205 err = po1030_write_sensor(sd, PO1030_REG_INTEGLINES_H, 135 err = m5602_write_sensor(sd, PO1030_REG_INTEGLINES_H,
206 &i2c_data, 1); 136 &i2c_data, 1);
207 if (err < 0) 137 if (err < 0)
208 goto out; 138 goto out;
@@ -210,11 +140,11 @@ int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
210 i2c_data = (val & 0xff); 140 i2c_data = (val & 0xff);
211 PDEBUG(D_V4L2, "Set exposure to low byte to 0x%x", 141 PDEBUG(D_V4L2, "Set exposure to low byte to 0x%x",
212 i2c_data); 142 i2c_data);
213 err = po1030_write_sensor(sd, PO1030_REG_INTEGLINES_M, 143 err = m5602_write_sensor(sd, PO1030_REG_INTEGLINES_M,
214 &i2c_data, 1); 144 &i2c_data, 1);
215 145
216out: 146out:
217 return (err < 0) ? err : 0; 147 return err;
218} 148}
219 149
220int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val) 150int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
@@ -223,12 +153,12 @@ int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
223 u8 i2c_data; 153 u8 i2c_data;
224 int err; 154 int err;
225 155
226 err = po1030_read_sensor(sd, PO1030_REG_GLOBALGAIN, 156 err = m5602_read_sensor(sd, PO1030_REG_GLOBALGAIN,
227 &i2c_data, 1); 157 &i2c_data, 1);
228 *val = i2c_data; 158 *val = i2c_data;
229 PDEBUG(D_V4L2, "Read global gain %d", *val); 159 PDEBUG(D_V4L2, "Read global gain %d", *val);
230 160
231 return (err < 0) ? err : 0; 161 return err;
232} 162}
233 163
234int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val) 164int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
@@ -237,14 +167,14 @@ int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
237 u8 i2c_data; 167 u8 i2c_data;
238 int err; 168 int err;
239 169
240 err = po1030_read_sensor(sd, PO1030_REG_CONTROL2, 170 err = m5602_read_sensor(sd, PO1030_REG_CONTROL2,
241 &i2c_data, 1); 171 &i2c_data, 1);
242 172
243 *val = (i2c_data >> 7) & 0x01 ; 173 *val = (i2c_data >> 7) & 0x01 ;
244 174
245 PDEBUG(D_V4L2, "Read hflip %d", *val); 175 PDEBUG(D_V4L2, "Read hflip %d", *val);
246 176
247 return (err < 0) ? err : 0; 177 return err;
248} 178}
249 179
250int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val) 180int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
@@ -254,13 +184,17 @@ int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
254 int err; 184 int err;
255 185
256 PDEBUG(D_V4L2, "Set hflip %d", val); 186 PDEBUG(D_V4L2, "Set hflip %d", val);
187 err = m5602_read_sensor(sd, PO1030_REG_CONTROL2, &i2c_data, 1);
188 if (err < 0)
189 goto out;
257 190
258 i2c_data = (val & 0x01) << 7; 191 i2c_data = (0x7f & i2c_data) | ((val & 0x01) << 7);
259 192
260 err = po1030_write_sensor(sd, PO1030_REG_CONTROL2, 193 err = m5602_write_sensor(sd, PO1030_REG_CONTROL2,
261 &i2c_data, 1); 194 &i2c_data, 1);
262 195
263 return (err < 0) ? err : 0; 196out:
197 return err;
264} 198}
265 199
266int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val) 200int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
@@ -269,14 +203,14 @@ int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
269 u8 i2c_data; 203 u8 i2c_data;
270 int err; 204 int err;
271 205
272 err = po1030_read_sensor(sd, PO1030_REG_GLOBALGAIN, 206 err = m5602_read_sensor(sd, PO1030_REG_GLOBALGAIN,
273 &i2c_data, 1); 207 &i2c_data, 1);
274 208
275 *val = (i2c_data >> 6) & 0x01; 209 *val = (i2c_data >> 6) & 0x01;
276 210
277 PDEBUG(D_V4L2, "Read vflip %d", *val); 211 PDEBUG(D_V4L2, "Read vflip %d", *val);
278 212
279 return (err < 0) ? err : 0; 213 return err;
280} 214}
281 215
282int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val) 216int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
@@ -286,13 +220,17 @@ int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
286 int err; 220 int err;
287 221
288 PDEBUG(D_V4L2, "Set vflip %d", val); 222 PDEBUG(D_V4L2, "Set vflip %d", val);
223 err = m5602_read_sensor(sd, PO1030_REG_CONTROL2, &i2c_data, 1);
224 if (err < 0)
225 goto out;
289 226
290 i2c_data = (val & 0x01) << 6; 227 i2c_data = (i2c_data & 0xbf) | ((val & 0x01) << 6);
291 228
292 err = po1030_write_sensor(sd, PO1030_REG_CONTROL2, 229 err = m5602_write_sensor(sd, PO1030_REG_CONTROL2,
293 &i2c_data, 1); 230 &i2c_data, 1);
294 231
295 return (err < 0) ? err : 0; 232out:
233 return err;
296} 234}
297 235
298int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val) 236int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val)
@@ -303,9 +241,9 @@ int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val)
303 241
304 i2c_data = val & 0xff; 242 i2c_data = val & 0xff;
305 PDEBUG(D_V4L2, "Set global gain to %d", i2c_data); 243 PDEBUG(D_V4L2, "Set global gain to %d", i2c_data);
306 err = po1030_write_sensor(sd, PO1030_REG_GLOBALGAIN, 244 err = m5602_write_sensor(sd, PO1030_REG_GLOBALGAIN,
307 &i2c_data, 1); 245 &i2c_data, 1);
308 return (err < 0) ? err : 0; 246 return err;
309} 247}
310 248
311int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val) 249int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
@@ -314,11 +252,11 @@ int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
314 u8 i2c_data; 252 u8 i2c_data;
315 int err; 253 int err;
316 254
317 err = po1030_read_sensor(sd, PO1030_REG_RED_GAIN, 255 err = m5602_read_sensor(sd, PO1030_REG_RED_GAIN,
318 &i2c_data, 1); 256 &i2c_data, 1);
319 *val = i2c_data; 257 *val = i2c_data;
320 PDEBUG(D_V4L2, "Read red gain %d", *val); 258 PDEBUG(D_V4L2, "Read red gain %d", *val);
321 return (err < 0) ? err : 0; 259 return err;
322} 260}
323 261
324int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val) 262int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
@@ -329,9 +267,9 @@ int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
329 267
330 i2c_data = val & 0xff; 268 i2c_data = val & 0xff;
331 PDEBUG(D_V4L2, "Set red gain to %d", i2c_data); 269 PDEBUG(D_V4L2, "Set red gain to %d", i2c_data);
332 err = po1030_write_sensor(sd, PO1030_REG_RED_GAIN, 270 err = m5602_write_sensor(sd, PO1030_REG_RED_GAIN,
333 &i2c_data, 1); 271 &i2c_data, 1);
334 return (err < 0) ? err : 0; 272 return err;
335} 273}
336 274
337int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val) 275int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
@@ -340,12 +278,12 @@ int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
340 u8 i2c_data; 278 u8 i2c_data;
341 int err; 279 int err;
342 280
343 err = po1030_read_sensor(sd, PO1030_REG_BLUE_GAIN, 281 err = m5602_read_sensor(sd, PO1030_REG_BLUE_GAIN,
344 &i2c_data, 1); 282 &i2c_data, 1);
345 *val = i2c_data; 283 *val = i2c_data;
346 PDEBUG(D_V4L2, "Read blue gain %d", *val); 284 PDEBUG(D_V4L2, "Read blue gain %d", *val);
347 285
348 return (err < 0) ? err : 0; 286 return err;
349} 287}
350 288
351int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val) 289int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
@@ -355,10 +293,10 @@ int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
355 int err; 293 int err;
356 i2c_data = val & 0xff; 294 i2c_data = val & 0xff;
357 PDEBUG(D_V4L2, "Set blue gain to %d", i2c_data); 295 PDEBUG(D_V4L2, "Set blue gain to %d", i2c_data);
358 err = po1030_write_sensor(sd, PO1030_REG_BLUE_GAIN, 296 err = m5602_write_sensor(sd, PO1030_REG_BLUE_GAIN,
359 &i2c_data, 1); 297 &i2c_data, 1);
360 298
361 return (err < 0) ? err : 0; 299 return err;
362} 300}
363 301
364int po1030_power_down(struct sd *sd) 302int po1030_power_down(struct sd *sd)
@@ -366,14 +304,14 @@ int po1030_power_down(struct sd *sd)
366 return 0; 304 return 0;
367} 305}
368 306
369void po1030_dump_registers(struct sd *sd) 307static void po1030_dump_registers(struct sd *sd)
370{ 308{
371 int address; 309 int address;
372 u8 value = 0; 310 u8 value = 0;
373 311
374 info("Dumping the po1030 sensor core registers"); 312 info("Dumping the po1030 sensor core registers");
375 for (address = 0; address < 0x7f; address++) { 313 for (address = 0; address < 0x7f; address++) {
376 po1030_read_sensor(sd, address, &value, 1); 314 m5602_read_sensor(sd, address, &value, 1);
377 info("register 0x%x contains 0x%x", 315 info("register 0x%x contains 0x%x",
378 address, value); 316 address, value);
379 } 317 }
@@ -385,9 +323,9 @@ void po1030_dump_registers(struct sd *sd)
385 u8 old_value, ctrl_value; 323 u8 old_value, ctrl_value;
386 u8 test_value[2] = {0xff, 0xff}; 324 u8 test_value[2] = {0xff, 0xff};
387 325
388 po1030_read_sensor(sd, address, &old_value, 1); 326 m5602_read_sensor(sd, address, &old_value, 1);
389 po1030_write_sensor(sd, address, test_value, 1); 327 m5602_write_sensor(sd, address, test_value, 1);
390 po1030_read_sensor(sd, address, &ctrl_value, 1); 328 m5602_read_sensor(sd, address, &ctrl_value, 1);
391 329
392 if (ctrl_value == test_value[0]) 330 if (ctrl_value == test_value[0])
393 info("register 0x%x is writeable", address); 331 info("register 0x%x is writeable", address);
@@ -395,6 +333,6 @@ void po1030_dump_registers(struct sd *sd)
395 info("register 0x%x is read only", address); 333 info("register 0x%x is read only", address);
396 334
397 /* Restore original value */ 335 /* Restore original value */
398 po1030_write_sensor(sd, address, &old_value, 1); 336 m5602_write_sensor(sd, address, &old_value, 1);
399 } 337 }
400} 338}
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.h b/drivers/media/video/gspca/m5602/m5602_po1030.h
index a0b75ff61d79..def39d5bcec6 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.h
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.h
@@ -10,7 +10,7 @@
10 * v4l2 interface modeled after the V4L2 driver 10 * v4l2 interface modeled after the V4L2 driver
11 * for SN9C10x PC Camera Controllers 11 * for SN9C10x PC Camera Controllers
12 * 12 *
13 * Register defines taken from Pascal Stangs Proxycon Armlib 13 * Register defines taken from Pascal Stangs Procyon Armlib
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as 16 * modify it under the terms of the GNU General Public License as
@@ -128,13 +128,6 @@ int po1030_probe(struct sd *sd);
128int po1030_init(struct sd *sd); 128int po1030_init(struct sd *sd);
129int po1030_power_down(struct sd *sd); 129int po1030_power_down(struct sd *sd);
130 130
131void po1030_dump_registers(struct sd *sd);
132
133int po1030_read_sensor(struct sd *sd, const u8 address,
134 u8 *i2c_data, const u8 len);
135int po1030_write_sensor(struct sd *sd, const u8 address,
136 u8 *i2c_data, const u8 len);
137
138int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val); 131int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
139int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val); 132int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
140int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val); 133int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
@@ -152,6 +145,7 @@ static struct m5602_sensor po1030 = {
152 .name = "PO1030", 145 .name = "PO1030",
153 146
154 .i2c_slave_id = 0xdc, 147 .i2c_slave_id = 0xdc,
148 .i2c_regW = 1,
155 149
156 .probe = po1030_probe, 150 .probe = po1030_probe,
157 .init = po1030_init, 151 .init = po1030_init,
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index 14b1eac5b812..e564a61a72d7 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -18,6 +18,40 @@
18 18
19#include "m5602_s5k4aa.h" 19#include "m5602_s5k4aa.h"
20 20
21static
22 const
23 struct dmi_system_id s5k4aa_vflip_dmi_table[] = {
24 {
25 .ident = "Fujitsu-Siemens Amilo Xa 2528",
26 .matches = {
27 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
28 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xa 2528")
29 }
30 }, {
31 .ident = "Fujitsu-Siemens Amilo Xi 2550",
32 .matches = {
33 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
34 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 2550")
35 }
36 }, {
37 .ident = "MSI GX700",
38 .matches = {
39 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
40 DMI_MATCH(DMI_PRODUCT_NAME, "GX700"),
41 DMI_MATCH(DMI_BIOS_DATE, "07/26/2007")
42 }
43 }, {
44 .ident = "MSI GX700/GX705/EX700",
45 .matches = {
46 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
47 DMI_MATCH(DMI_PRODUCT_NAME, "GX700/GX705/EX700")
48 }
49 },
50 { }
51};
52
53static void s5k4aa_dump_registers(struct sd *sd);
54
21int s5k4aa_probe(struct sd *sd) 55int s5k4aa_probe(struct sd *sd)
22{ 56{
23 u8 prod_id[6] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 57 u8 prod_id[6] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
@@ -49,7 +83,7 @@ int s5k4aa_probe(struct sd *sd)
49 83
50 case SENSOR: 84 case SENSOR:
51 data[0] = preinit_s5k4aa[i][2]; 85 data[0] = preinit_s5k4aa[i][2];
52 err = s5k4aa_write_sensor(sd, 86 err = m5602_write_sensor(sd,
53 preinit_s5k4aa[i][1], 87 preinit_s5k4aa[i][1],
54 data, 1); 88 data, 1);
55 break; 89 break;
@@ -57,7 +91,7 @@ int s5k4aa_probe(struct sd *sd)
57 case SENSOR_LONG: 91 case SENSOR_LONG:
58 data[0] = preinit_s5k4aa[i][2]; 92 data[0] = preinit_s5k4aa[i][2];
59 data[1] = preinit_s5k4aa[i][3]; 93 data[1] = preinit_s5k4aa[i][3];
60 err = s5k4aa_write_sensor(sd, 94 err = m5602_write_sensor(sd,
61 preinit_s5k4aa[i][1], 95 preinit_s5k4aa[i][1],
62 data, 2); 96 data, 2);
63 break; 97 break;
@@ -68,13 +102,14 @@ int s5k4aa_probe(struct sd *sd)
68 } 102 }
69 103
70 /* Test some registers, but we don't know their exact meaning yet */ 104 /* Test some registers, but we don't know their exact meaning yet */
71 if (s5k4aa_read_sensor(sd, 0x00, prod_id, sizeof(prod_id))) 105 if (m5602_read_sensor(sd, 0x00, prod_id, sizeof(prod_id)))
72 return -ENODEV; 106 return -ENODEV;
73 107
74 if (memcmp(prod_id, expected_prod_id, sizeof(prod_id))) 108 if (memcmp(prod_id, expected_prod_id, sizeof(prod_id)))
75 return -ENODEV; 109 return -ENODEV;
76 else 110 else
77 info("Detected a s5k4aa sensor"); 111 info("Detected a s5k4aa sensor");
112
78sensor_found: 113sensor_found:
79 sd->gspca_dev.cam.cam_mode = s5k4aa.modes; 114 sd->gspca_dev.cam.cam_mode = s5k4aa.modes;
80 sd->gspca_dev.cam.nmodes = s5k4aa.nmodes; 115 sd->gspca_dev.cam.nmodes = s5k4aa.nmodes;
@@ -84,90 +119,6 @@ sensor_found:
84 return 0; 119 return 0;
85} 120}
86 121
87int s5k4aa_read_sensor(struct sd *sd, const u8 address,
88 u8 *i2c_data, const u8 len)
89{
90 int err, i;
91
92 do {
93 err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, i2c_data);
94 } while ((*i2c_data & I2C_BUSY) && !err);
95 if (err < 0)
96 goto out;
97
98 err = m5602_write_bridge(sd, M5602_XB_I2C_DEV_ADDR,
99 sd->sensor->i2c_slave_id);
100 if (err < 0)
101 goto out;
102
103 err = m5602_write_bridge(sd, M5602_XB_I2C_REG_ADDR, address);
104 if (err < 0)
105 goto out;
106
107 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x18 + len);
108 if (err < 0)
109 goto out;
110
111 do {
112 err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, i2c_data);
113 } while ((*i2c_data & I2C_BUSY) && !err);
114 if (err < 0)
115 goto out;
116
117 for (i = 0; (i < len) & !err; i++) {
118 err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
119
120 PDEBUG(D_CONF, "Reading sensor register "
121 "0x%x containing 0x%x ", address, *i2c_data);
122 }
123out:
124 return (err < 0) ? err : 0;
125}
126
127int s5k4aa_write_sensor(struct sd *sd, const u8 address,
128 u8 *i2c_data, const u8 len)
129{
130 int err, i;
131 u8 *p;
132 struct usb_device *udev = sd->gspca_dev.dev;
133 __u8 *buf = sd->gspca_dev.usb_buf;
134
135 /* No sensor with a data width larger than 16 bits has yet been seen */
136 if (len > 2 || !len)
137 return -EINVAL;
138
139 memcpy(buf, sensor_urb_skeleton,
140 sizeof(sensor_urb_skeleton));
141
142 buf[11] = sd->sensor->i2c_slave_id;
143 buf[15] = address;
144
145 /* Special case larger sensor writes */
146 p = buf + 16;
147
148 /* Copy a four byte write sequence for each byte to be written to */
149 for (i = 0; i < len; i++) {
150 memcpy(p, sensor_urb_skeleton + 16, 4);
151 p[3] = i2c_data[i];
152 p += 4;
153 PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x",
154 address, i2c_data[i]);
155 }
156
157 /* Copy the tailer */
158 memcpy(p, sensor_urb_skeleton + 20, 4);
159
160 /* Set the total length */
161 p[3] = 0x10 + len;
162
163 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
164 0x04, 0x40, 0x19,
165 0x0000, buf,
166 20 + len * 4, M5602_URB_MSG_TIMEOUT);
167
168 return (err < 0) ? err : 0;
169}
170
171int s5k4aa_init(struct sd *sd) 122int s5k4aa_init(struct sd *sd)
172{ 123{
173 int i, err = 0; 124 int i, err = 0;
@@ -184,14 +135,14 @@ int s5k4aa_init(struct sd *sd)
184 135
185 case SENSOR: 136 case SENSOR:
186 data[0] = init_s5k4aa[i][2]; 137 data[0] = init_s5k4aa[i][2];
187 err = s5k4aa_write_sensor(sd, 138 err = m5602_write_sensor(sd,
188 init_s5k4aa[i][1], data, 1); 139 init_s5k4aa[i][1], data, 1);
189 break; 140 break;
190 141
191 case SENSOR_LONG: 142 case SENSOR_LONG:
192 data[0] = init_s5k4aa[i][2]; 143 data[0] = init_s5k4aa[i][2];
193 data[1] = init_s5k4aa[i][3]; 144 data[1] = init_s5k4aa[i][3];
194 err = s5k4aa_write_sensor(sd, 145 err = m5602_write_sensor(sd,
195 init_s5k4aa[i][1], data, 2); 146 init_s5k4aa[i][1], data, 2);
196 break; 147 break;
197 default: 148 default:
@@ -206,21 +157,21 @@ int s5k4aa_init(struct sd *sd)
206 if (!err && dmi_check_system(s5k4aa_vflip_dmi_table)) { 157 if (!err && dmi_check_system(s5k4aa_vflip_dmi_table)) {
207 u8 data = 0x02; 158 u8 data = 0x02;
208 info("vertical flip quirk active"); 159 info("vertical flip quirk active");
209 s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 160 m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
210 s5k4aa_read_sensor(sd, S5K4AA_READ_MODE, &data, 1); 161 m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1);
211 data |= S5K4AA_RM_V_FLIP; 162 data |= S5K4AA_RM_V_FLIP;
212 data &= ~S5K4AA_RM_H_FLIP; 163 data &= ~S5K4AA_RM_H_FLIP;
213 s5k4aa_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); 164 m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
214 165
215 /* Decrement COLSTART to preserve color order (BGGR) */ 166 /* Decrement COLSTART to preserve color order (BGGR) */
216 s5k4aa_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1); 167 m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
217 data--; 168 data--;
218 s5k4aa_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1); 169 m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
219 170
220 /* Increment ROWSTART to preserve color order (BGGR) */ 171 /* Increment ROWSTART to preserve color order (BGGR) */
221 s5k4aa_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1); 172 m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
222 data++; 173 data++;
223 s5k4aa_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1); 174 m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
224 } 175 }
225 176
226 return (err < 0) ? err : 0; 177 return (err < 0) ? err : 0;
@@ -237,20 +188,20 @@ int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
237 u8 data = S5K4AA_PAGE_MAP_2; 188 u8 data = S5K4AA_PAGE_MAP_2;
238 int err; 189 int err;
239 190
240 err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 191 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
241 if (err < 0) 192 if (err < 0)
242 goto out; 193 goto out;
243 194
244 err = s5k4aa_read_sensor(sd, S5K4AA_EXPOSURE_HI, &data, 1); 195 err = m5602_read_sensor(sd, S5K4AA_EXPOSURE_HI, &data, 1);
245 if (err < 0) 196 if (err < 0)
246 goto out; 197 goto out;
247 198
248 *val = data << 8; 199 *val = data << 8;
249 err = s5k4aa_read_sensor(sd, S5K4AA_EXPOSURE_LO, &data, 1); 200 err = m5602_read_sensor(sd, S5K4AA_EXPOSURE_LO, &data, 1);
250 *val |= data; 201 *val |= data;
251 PDEBUG(D_V4L2, "Read exposure %d", *val); 202 PDEBUG(D_V4L2, "Read exposure %d", *val);
252out: 203out:
253 return (err < 0) ? err : 0; 204 return err;
254} 205}
255 206
256int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val) 207int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
@@ -260,17 +211,17 @@ int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
260 int err; 211 int err;
261 212
262 PDEBUG(D_V4L2, "Set exposure to %d", val); 213 PDEBUG(D_V4L2, "Set exposure to %d", val);
263 err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 214 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
264 if (err < 0) 215 if (err < 0)
265 goto out; 216 goto out;
266 data = (val >> 8) & 0xff; 217 data = (val >> 8) & 0xff;
267 err = s5k4aa_write_sensor(sd, S5K4AA_EXPOSURE_HI, &data, 1); 218 err = m5602_write_sensor(sd, S5K4AA_EXPOSURE_HI, &data, 1);
268 if (err < 0) 219 if (err < 0)
269 goto out; 220 goto out;
270 data = val & 0xff; 221 data = val & 0xff;
271 err = s5k4aa_write_sensor(sd, S5K4AA_EXPOSURE_LO, &data, 1); 222 err = m5602_write_sensor(sd, S5K4AA_EXPOSURE_LO, &data, 1);
272out: 223out:
273 return (err < 0) ? err : 0; 224 return err;
274} 225}
275 226
276int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val) 227int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
@@ -279,16 +230,16 @@ int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
279 u8 data = S5K4AA_PAGE_MAP_2; 230 u8 data = S5K4AA_PAGE_MAP_2;
280 int err; 231 int err;
281 232
282 err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 233 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
283 if (err < 0) 234 if (err < 0)
284 goto out; 235 goto out;
285 236
286 err = s5k4aa_read_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 237 err = m5602_read_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
287 *val = (data & S5K4AA_RM_V_FLIP) >> 7; 238 *val = (data & S5K4AA_RM_V_FLIP) >> 7;
288 PDEBUG(D_V4L2, "Read vertical flip %d", *val); 239 PDEBUG(D_V4L2, "Read vertical flip %d", *val);
289 240
290out: 241out:
291 return (err < 0) ? err : 0; 242 return err;
292} 243}
293 244
294int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val) 245int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
@@ -298,35 +249,35 @@ int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
298 int err; 249 int err;
299 250
300 PDEBUG(D_V4L2, "Set vertical flip to %d", val); 251 PDEBUG(D_V4L2, "Set vertical flip to %d", val);
301 err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 252 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
302 if (err < 0) 253 if (err < 0)
303 goto out; 254 goto out;
304 err = s5k4aa_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); 255 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
305 if (err < 0) 256 if (err < 0)
306 goto out; 257 goto out;
307 data = ((data & ~S5K4AA_RM_V_FLIP) 258 data = ((data & ~S5K4AA_RM_V_FLIP)
308 | ((val & 0x01) << 7)); 259 | ((val & 0x01) << 7));
309 err = s5k4aa_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); 260 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
310 if (err < 0) 261 if (err < 0)
311 goto out; 262 goto out;
312 263
313 if (val) { 264 if (val) {
314 err = s5k4aa_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1); 265 err = m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
315 if (err < 0) 266 if (err < 0)
316 goto out; 267 goto out;
317 268
318 data++; 269 data++;
319 err = s5k4aa_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1); 270 err = m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
320 } else { 271 } else {
321 err = s5k4aa_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1); 272 err = m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
322 if (err < 0) 273 if (err < 0)
323 goto out; 274 goto out;
324 275
325 data--; 276 data--;
326 err = s5k4aa_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1); 277 err = m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
327 } 278 }
328out: 279out:
329 return (err < 0) ? err : 0; 280 return err;
330} 281}
331 282
332int s5k4aa_get_hflip(struct gspca_dev *gspca_dev, __s32 *val) 283int s5k4aa_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
@@ -335,15 +286,15 @@ int s5k4aa_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
335 u8 data = S5K4AA_PAGE_MAP_2; 286 u8 data = S5K4AA_PAGE_MAP_2;
336 int err; 287 int err;
337 288
338 err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 289 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
339 if (err < 0) 290 if (err < 0)
340 goto out; 291 goto out;
341 292
342 err = s5k4aa_read_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 293 err = m5602_read_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
343 *val = (data & S5K4AA_RM_H_FLIP) >> 6; 294 *val = (data & S5K4AA_RM_H_FLIP) >> 6;
344 PDEBUG(D_V4L2, "Read horizontal flip %d", *val); 295 PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
345out: 296out:
346 return (err < 0) ? err : 0; 297 return err;
347} 298}
348 299
349int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val) 300int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
@@ -354,35 +305,35 @@ int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
354 305
355 PDEBUG(D_V4L2, "Set horizontal flip to %d", 306 PDEBUG(D_V4L2, "Set horizontal flip to %d",
356 val); 307 val);
357 err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 308 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
358 if (err < 0) 309 if (err < 0)
359 goto out; 310 goto out;
360 err = s5k4aa_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); 311 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
361 if (err < 0) 312 if (err < 0)
362 goto out; 313 goto out;
363 314
364 data = ((data & ~S5K4AA_RM_H_FLIP) | ((val & 0x01) << 6)); 315 data = ((data & ~S5K4AA_RM_H_FLIP) | ((val & 0x01) << 6));
365 err = s5k4aa_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); 316 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
366 if (err < 0) 317 if (err < 0)
367 goto out; 318 goto out;
368 319
369 if (val) { 320 if (val) {
370 err = s5k4aa_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1); 321 err = m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
371 if (err < 0) 322 if (err < 0)
372 goto out; 323 goto out;
373 data++; 324 data++;
374 err = s5k4aa_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1); 325 err = m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
375 if (err < 0) 326 if (err < 0)
376 goto out; 327 goto out;
377 } else { 328 } else {
378 err = s5k4aa_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1); 329 err = m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
379 if (err < 0) 330 if (err < 0)
380 goto out; 331 goto out;
381 data--; 332 data--;
382 err = s5k4aa_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1); 333 err = m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
383 } 334 }
384out: 335out:
385 return (err < 0) ? err : 0; 336 return err;
386} 337}
387 338
388int s5k4aa_get_gain(struct gspca_dev *gspca_dev, __s32 *val) 339int s5k4aa_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
@@ -391,16 +342,16 @@ int s5k4aa_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
391 u8 data = S5K4AA_PAGE_MAP_2; 342 u8 data = S5K4AA_PAGE_MAP_2;
392 int err; 343 int err;
393 344
394 err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 345 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
395 if (err < 0) 346 if (err < 0)
396 goto out; 347 goto out;
397 348
398 err = s5k4aa_read_sensor(sd, S5K4AA_GAIN_2, &data, 1); 349 err = m5602_read_sensor(sd, S5K4AA_GAIN_2, &data, 1);
399 *val = data; 350 *val = data;
400 PDEBUG(D_V4L2, "Read gain %d", *val); 351 PDEBUG(D_V4L2, "Read gain %d", *val);
401 352
402out: 353out:
403 return (err < 0) ? err : 0; 354 return err;
404} 355}
405 356
406int s5k4aa_set_gain(struct gspca_dev *gspca_dev, __s32 val) 357int s5k4aa_set_gain(struct gspca_dev *gspca_dev, __s32 val)
@@ -410,28 +361,28 @@ int s5k4aa_set_gain(struct gspca_dev *gspca_dev, __s32 val)
410 int err; 361 int err;
411 362
412 PDEBUG(D_V4L2, "Set gain to %d", val); 363 PDEBUG(D_V4L2, "Set gain to %d", val);
413 err = s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 364 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
414 if (err < 0) 365 if (err < 0)
415 goto out; 366 goto out;
416 367
417 data = val & 0xff; 368 data = val & 0xff;
418 err = s5k4aa_write_sensor(sd, S5K4AA_GAIN_2, &data, 1); 369 err = m5602_write_sensor(sd, S5K4AA_GAIN_2, &data, 1);
419 370
420out: 371out:
421 return (err < 0) ? err : 0; 372 return err;
422} 373}
423 374
424void s5k4aa_dump_registers(struct sd *sd) 375static void s5k4aa_dump_registers(struct sd *sd)
425{ 376{
426 int address; 377 int address;
427 u8 page, old_page; 378 u8 page, old_page;
428 s5k4aa_read_sensor(sd, S5K4AA_PAGE_MAP, &old_page, 1); 379 m5602_read_sensor(sd, S5K4AA_PAGE_MAP, &old_page, 1);
429 for (page = 0; page < 16; page++) { 380 for (page = 0; page < 16; page++) {
430 s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &page, 1); 381 m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &page, 1);
431 info("Dumping the s5k4aa register state for page 0x%x", page); 382 info("Dumping the s5k4aa register state for page 0x%x", page);
432 for (address = 0; address <= 0xff; address++) { 383 for (address = 0; address <= 0xff; address++) {
433 u8 value = 0; 384 u8 value = 0;
434 s5k4aa_read_sensor(sd, address, &value, 1); 385 m5602_read_sensor(sd, address, &value, 1);
435 info("register 0x%x contains 0x%x", 386 info("register 0x%x contains 0x%x",
436 address, value); 387 address, value);
437 } 388 }
@@ -439,15 +390,15 @@ void s5k4aa_dump_registers(struct sd *sd)
439 info("s5k4aa register state dump complete"); 390 info("s5k4aa register state dump complete");
440 391
441 for (page = 0; page < 16; page++) { 392 for (page = 0; page < 16; page++) {
442 s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &page, 1); 393 m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &page, 1);
443 info("Probing for which registers that are " 394 info("Probing for which registers that are "
444 "read/write for page 0x%x", page); 395 "read/write for page 0x%x", page);
445 for (address = 0; address <= 0xff; address++) { 396 for (address = 0; address <= 0xff; address++) {
446 u8 old_value, ctrl_value, test_value = 0xff; 397 u8 old_value, ctrl_value, test_value = 0xff;
447 398
448 s5k4aa_read_sensor(sd, address, &old_value, 1); 399 m5602_read_sensor(sd, address, &old_value, 1);
449 s5k4aa_write_sensor(sd, address, &test_value, 1); 400 m5602_write_sensor(sd, address, &test_value, 1);
450 s5k4aa_read_sensor(sd, address, &ctrl_value, 1); 401 m5602_read_sensor(sd, address, &ctrl_value, 1);
451 402
452 if (ctrl_value == test_value) 403 if (ctrl_value == test_value)
453 info("register 0x%x is writeable", address); 404 info("register 0x%x is writeable", address);
@@ -455,9 +406,9 @@ void s5k4aa_dump_registers(struct sd *sd)
455 info("register 0x%x is read only", address); 406 info("register 0x%x is read only", address);
456 407
457 /* Restore original value */ 408 /* Restore original value */
458 s5k4aa_write_sensor(sd, address, &old_value, 1); 409 m5602_write_sensor(sd, address, &old_value, 1);
459 } 410 }
460 } 411 }
461 info("Read/write register probing complete"); 412 info("Read/write register probing complete");
462 s5k4aa_write_sensor(sd, S5K4AA_PAGE_MAP, &old_page, 1); 413 m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &old_page, 1);
463} 414}
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
index eaef67655afa..1f88b0d040c4 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
@@ -41,11 +41,10 @@
41#define S5K4AA_WINDOW_HEIGHT_LO 0x09 41#define S5K4AA_WINDOW_HEIGHT_LO 0x09
42#define S5K4AA_WINDOW_WIDTH_HI 0x0a 42#define S5K4AA_WINDOW_WIDTH_HI 0x0a
43#define S5K4AA_WINDOW_WIDTH_LO 0x0b 43#define S5K4AA_WINDOW_WIDTH_LO 0x0b
44#define S5K4AA_GLOBAL_GAIN__ 0x0f /* Only a guess ATM !!! */ 44#define S5K4AA_GLOBAL_GAIN__ 0x0f
45#define S5K4AA_H_BLANK_HI__ 0x1d /* Only a guess ATM !!! sync lost 45/* sync lost, if too low, reduces frame rate if too high */
46 if too low, reduces frame rate 46#define S5K4AA_H_BLANK_HI__ 0x1d
47 if too high */ 47#define S5K4AA_H_BLANK_LO__ 0x1e
48#define S5K4AA_H_BLANK_LO__ 0x1e /* Only a guess ATM !!! */
49#define S5K4AA_EXPOSURE_HI 0x17 48#define S5K4AA_EXPOSURE_HI 0x17
50#define S5K4AA_EXPOSURE_LO 0x18 49#define S5K4AA_EXPOSURE_LO 0x18
51#define S5K4AA_GAIN_1 0x1f /* (digital?) gain : 5 bits */ 50#define S5K4AA_GAIN_1 0x1f /* (digital?) gain : 5 bits */
@@ -68,13 +67,6 @@ int s5k4aa_probe(struct sd *sd);
68int s5k4aa_init(struct sd *sd); 67int s5k4aa_init(struct sd *sd);
69int s5k4aa_power_down(struct sd *sd); 68int s5k4aa_power_down(struct sd *sd);
70 69
71void s5k4aa_dump_registers(struct sd *sd);
72
73int s5k4aa_read_sensor(struct sd *sd, const u8 address,
74 u8 *i2c_data, const u8 len);
75int s5k4aa_write_sensor(struct sd *sd, const u8 address,
76 u8 *i2c_data, const u8 len);
77
78int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val); 70int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
79int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val); 71int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
80int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val); 72int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
@@ -89,9 +81,8 @@ static struct m5602_sensor s5k4aa = {
89 .probe = s5k4aa_probe, 81 .probe = s5k4aa_probe,
90 .init = s5k4aa_init, 82 .init = s5k4aa_init,
91 .power_down = s5k4aa_power_down, 83 .power_down = s5k4aa_power_down,
92 .read_sensor = s5k4aa_read_sensor,
93 .write_sensor = s5k4aa_write_sensor,
94 .i2c_slave_id = 0x5a, 84 .i2c_slave_id = 0x5a,
85 .i2c_regW = 2,
95 .nctrls = 4, 86 .nctrls = 4,
96 .ctrls = { 87 .ctrls = {
97 { 88 {
@@ -338,32 +329,4 @@ static const unsigned char init_s5k4aa[][4] =
338 {SENSOR, S5K4AA_GAIN_2, 0xa0, 0x00} 329 {SENSOR, S5K4AA_GAIN_2, 0xa0, 0x00}
339}; 330};
340 331
341static
342 const
343 struct dmi_system_id s5k4aa_vflip_dmi_table[] = {
344 {
345 .ident = "Fujitsu-Siemens Amilo Xa 2528",
346 .matches = {
347 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
348 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xa 2528")
349 }
350 },
351 {
352 .ident = "Fujitsu-Siemens Amilo Xi 2550",
353 .matches = {
354 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
355 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 2550")
356 }
357 },
358 {
359 .ident = "MSI GX700",
360 .matches = {
361 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
362 DMI_MATCH(DMI_PRODUCT_NAME, "GX700"),
363 DMI_MATCH(DMI_BIOS_DATE, "07/26/2007")
364 }
365 },
366 { }
367};
368
369#endif 332#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.c b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
index 8988a728e0b4..af3f2dc2c702 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
@@ -18,6 +18,8 @@
18 18
19#include "m5602_s5k83a.h" 19#include "m5602_s5k83a.h"
20 20
21static void s5k83a_dump_registers(struct sd *sd);
22
21int s5k83a_probe(struct sd *sd) 23int s5k83a_probe(struct sd *sd)
22{ 24{
23 u8 prod_id = 0, ver_id = 0; 25 u8 prod_id = 0, ver_id = 0;
@@ -39,7 +41,7 @@ int s5k83a_probe(struct sd *sd)
39 for (i = 0; i < ARRAY_SIZE(preinit_s5k83a) && !err; i++) { 41 for (i = 0; i < ARRAY_SIZE(preinit_s5k83a) && !err; i++) {
40 u8 data[2] = {preinit_s5k83a[i][2], preinit_s5k83a[i][3]}; 42 u8 data[2] = {preinit_s5k83a[i][2], preinit_s5k83a[i][3]};
41 if (preinit_s5k83a[i][0] == SENSOR) 43 if (preinit_s5k83a[i][0] == SENSOR)
42 err = s5k83a_write_sensor(sd, preinit_s5k83a[i][1], 44 err = m5602_write_sensor(sd, preinit_s5k83a[i][1],
43 data, 2); 45 data, 2);
44 else 46 else
45 err = m5602_write_bridge(sd, preinit_s5k83a[i][1], 47 err = m5602_write_bridge(sd, preinit_s5k83a[i][1],
@@ -49,10 +51,10 @@ int s5k83a_probe(struct sd *sd)
49 /* We don't know what register (if any) that contain the product id 51 /* We don't know what register (if any) that contain the product id
50 * Just pick the first addresses that seem to produce the same results 52 * Just pick the first addresses that seem to produce the same results
51 * on multiple machines */ 53 * on multiple machines */
52 if (s5k83a_read_sensor(sd, 0x00, &prod_id, 1)) 54 if (m5602_read_sensor(sd, 0x00, &prod_id, 1))
53 return -ENODEV; 55 return -ENODEV;
54 56
55 if (s5k83a_read_sensor(sd, 0x01, &ver_id, 1)) 57 if (m5602_read_sensor(sd, 0x01, &ver_id, 1))
56 return -ENODEV; 58 return -ENODEV;
57 59
58 if ((prod_id == 0xff) || (ver_id == 0xff)) 60 if ((prod_id == 0xff) || (ver_id == 0xff))
@@ -68,91 +70,6 @@ sensor_found:
68 return 0; 70 return 0;
69} 71}
70 72
71int s5k83a_read_sensor(struct sd *sd, const u8 address,
72 u8 *i2c_data, const u8 len)
73{
74 int err, i;
75
76 do {
77 err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, i2c_data);
78 } while ((*i2c_data & I2C_BUSY) && !err);
79 if (err < 0)
80 goto out;
81
82 err = m5602_write_bridge(sd, M5602_XB_I2C_DEV_ADDR,
83 sd->sensor->i2c_slave_id);
84 if (err < 0)
85 goto out;
86
87 err = m5602_write_bridge(sd, M5602_XB_I2C_REG_ADDR, address);
88 if (err < 0)
89 goto out;
90
91 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x18 + len);
92 if (err < 0)
93 goto out;
94
95 do {
96 err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, i2c_data);
97 } while ((*i2c_data & I2C_BUSY) && !err);
98
99 if (err < 0)
100 goto out;
101 for (i = 0; i < len && !len; i++) {
102 err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
103
104 PDEBUG(D_CONF, "Reading sensor register "
105 "0x%x containing 0x%x ", address, *i2c_data);
106 }
107
108out:
109 return (err < 0) ? err : 0;
110}
111
112int s5k83a_write_sensor(struct sd *sd, const u8 address,
113 u8 *i2c_data, const u8 len)
114{
115 int err, i;
116 u8 *p;
117 struct usb_device *udev = sd->gspca_dev.dev;
118 __u8 *buf = sd->gspca_dev.usb_buf;
119
120 /* No sensor with a data width larger than 16 bits has yet been seen */
121 if (len > 2 || !len)
122 return -EINVAL;
123
124 memcpy(buf, sensor_urb_skeleton,
125 sizeof(sensor_urb_skeleton));
126
127 buf[11] = sd->sensor->i2c_slave_id;
128 buf[15] = address;
129
130 /* Special case larger sensor writes */
131 p = buf + 16;
132
133 /* Copy a four byte write sequence for each byte to be written to */
134 for (i = 0; i < len; i++) {
135 memcpy(p, sensor_urb_skeleton + 16, 4);
136 p[3] = i2c_data[i];
137 p += 4;
138 PDEBUG(D_CONF, "Writing sensor register 0x%x with 0x%x",
139 address, i2c_data[i]);
140 }
141
142 /* Copy the tailer */
143 memcpy(p, sensor_urb_skeleton + 20, 4);
144
145 /* Set the total length */
146 p[3] = 0x10 + len;
147
148 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
149 0x04, 0x40, 0x19,
150 0x0000, buf,
151 20 + len * 4, M5602_URB_MSG_TIMEOUT);
152
153 return (err < 0) ? err : 0;
154}
155
156int s5k83a_init(struct sd *sd) 73int s5k83a_init(struct sd *sd)
157{ 74{
158 int i, err = 0; 75 int i, err = 0;
@@ -169,14 +86,14 @@ int s5k83a_init(struct sd *sd)
169 86
170 case SENSOR: 87 case SENSOR:
171 data[0] = init_s5k83a[i][2]; 88 data[0] = init_s5k83a[i][2];
172 err = s5k83a_write_sensor(sd, 89 err = m5602_write_sensor(sd,
173 init_s5k83a[i][1], data, 1); 90 init_s5k83a[i][1], data, 1);
174 break; 91 break;
175 92
176 case SENSOR_LONG: 93 case SENSOR_LONG:
177 data[0] = init_s5k83a[i][2]; 94 data[0] = init_s5k83a[i][2];
178 data[1] = init_s5k83a[i][3]; 95 data[1] = init_s5k83a[i][3];
179 err = s5k83a_write_sensor(sd, 96 err = m5602_write_sensor(sd,
180 init_s5k83a[i][1], data, 2); 97 init_s5k83a[i][1], data, 2);
181 break; 98 break;
182 default: 99 default:
@@ -200,14 +117,14 @@ void s5k83a_dump_registers(struct sd *sd)
200{ 117{
201 int address; 118 int address;
202 u8 page, old_page; 119 u8 page, old_page;
203 s5k83a_read_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1); 120 m5602_read_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1);
204 121
205 for (page = 0; page < 16; page++) { 122 for (page = 0; page < 16; page++) {
206 s5k83a_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1); 123 m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
207 info("Dumping the s5k83a register state for page 0x%x", page); 124 info("Dumping the s5k83a register state for page 0x%x", page);
208 for (address = 0; address <= 0xff; address++) { 125 for (address = 0; address <= 0xff; address++) {
209 u8 val = 0; 126 u8 val = 0;
210 s5k83a_read_sensor(sd, address, &val, 1); 127 m5602_read_sensor(sd, address, &val, 1);
211 info("register 0x%x contains 0x%x", 128 info("register 0x%x contains 0x%x",
212 address, val); 129 address, val);
213 } 130 }
@@ -215,15 +132,15 @@ void s5k83a_dump_registers(struct sd *sd)
215 info("s5k83a register state dump complete"); 132 info("s5k83a register state dump complete");
216 133
217 for (page = 0; page < 16; page++) { 134 for (page = 0; page < 16; page++) {
218 s5k83a_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1); 135 m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
219 info("Probing for which registers that are read/write " 136 info("Probing for which registers that are read/write "
220 "for page 0x%x", page); 137 "for page 0x%x", page);
221 for (address = 0; address <= 0xff; address++) { 138 for (address = 0; address <= 0xff; address++) {
222 u8 old_val, ctrl_val, test_val = 0xff; 139 u8 old_val, ctrl_val, test_val = 0xff;
223 140
224 s5k83a_read_sensor(sd, address, &old_val, 1); 141 m5602_read_sensor(sd, address, &old_val, 1);
225 s5k83a_write_sensor(sd, address, &test_val, 1); 142 m5602_write_sensor(sd, address, &test_val, 1);
226 s5k83a_read_sensor(sd, address, &ctrl_val, 1); 143 m5602_read_sensor(sd, address, &ctrl_val, 1);
227 144
228 if (ctrl_val == test_val) 145 if (ctrl_val == test_val)
229 info("register 0x%x is writeable", address); 146 info("register 0x%x is writeable", address);
@@ -231,11 +148,11 @@ void s5k83a_dump_registers(struct sd *sd)
231 info("register 0x%x is read only", address); 148 info("register 0x%x is read only", address);
232 149
233 /* Restore original val */ 150 /* Restore original val */
234 s5k83a_write_sensor(sd, address, &old_val, 1); 151 m5602_write_sensor(sd, address, &old_val, 1);
235 } 152 }
236 } 153 }
237 info("Read/write register probing complete"); 154 info("Read/write register probing complete");
238 s5k83a_write_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1); 155 m5602_write_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1);
239} 156}
240 157
241int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val) 158int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
@@ -244,11 +161,15 @@ int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
244 u8 data[2]; 161 u8 data[2];
245 struct sd *sd = (struct sd *) gspca_dev; 162 struct sd *sd = (struct sd *) gspca_dev;
246 163
247 err = s5k83a_read_sensor(sd, S5K83A_BRIGHTNESS, data, 2); 164 err = m5602_read_sensor(sd, S5K83A_BRIGHTNESS, data, 2);
165 if (err < 0)
166 goto out;
167
248 data[1] = data[1] << 1; 168 data[1] = data[1] << 1;
249 *val = data[1]; 169 *val = data[1];
250 170
251 return (err < 0) ? err : 0; 171out:
172 return err;
252} 173}
253 174
254int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val) 175int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
@@ -259,23 +180,24 @@ int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
259 180
260 data[0] = 0x00; 181 data[0] = 0x00;
261 data[1] = 0x20; 182 data[1] = 0x20;
262 err = s5k83a_write_sensor(sd, 0x14, data, 2); 183 err = m5602_write_sensor(sd, 0x14, data, 2);
263 if (err < 0) 184 if (err < 0)
264 return err; 185 goto out;
265 186
266 data[0] = 0x01; 187 data[0] = 0x01;
267 data[1] = 0x00; 188 data[1] = 0x00;
268 err = s5k83a_write_sensor(sd, 0x0d, data, 2); 189 err = m5602_write_sensor(sd, 0x0d, data, 2);
269 if (err < 0) 190 if (err < 0)
270 return err; 191 goto out;
271 192
272 /* FIXME: This is not sane, we need to figure out the composition 193 /* FIXME: This is not sane, we need to figure out the composition
273 of these registers */ 194 of these registers */
274 data[0] = val >> 3; /* brightness, high 5 bits */ 195 data[0] = val >> 3; /* brightness, high 5 bits */
275 data[1] = val >> 1; /* brightness, high 7 bits */ 196 data[1] = val >> 1; /* brightness, high 7 bits */
276 err = s5k83a_write_sensor(sd, S5K83A_BRIGHTNESS, data, 2); 197 err = m5602_write_sensor(sd, S5K83A_BRIGHTNESS, data, 2);
277 198
278 return (err < 0) ? err : 0; 199out:
200 return err;
279} 201}
280 202
281int s5k83a_get_whiteness(struct gspca_dev *gspca_dev, __s32 *val) 203int s5k83a_get_whiteness(struct gspca_dev *gspca_dev, __s32 *val)
@@ -284,10 +206,14 @@ int s5k83a_get_whiteness(struct gspca_dev *gspca_dev, __s32 *val)
284 u8 data; 206 u8 data;
285 struct sd *sd = (struct sd *) gspca_dev; 207 struct sd *sd = (struct sd *) gspca_dev;
286 208
287 err = s5k83a_read_sensor(sd, S5K83A_WHITENESS, &data, 1); 209 err = m5602_read_sensor(sd, S5K83A_WHITENESS, &data, 1);
210 if (err < 0)
211 goto out;
288 212
289 *val = data; 213 *val = data;
290 return (err < 0) ? err : 0; 214
215out:
216 return err;
291} 217}
292 218
293int s5k83a_set_whiteness(struct gspca_dev *gspca_dev, __s32 val) 219int s5k83a_set_whiteness(struct gspca_dev *gspca_dev, __s32 val)
@@ -297,9 +223,9 @@ int s5k83a_set_whiteness(struct gspca_dev *gspca_dev, __s32 val)
297 struct sd *sd = (struct sd *) gspca_dev; 223 struct sd *sd = (struct sd *) gspca_dev;
298 224
299 data[0] = val; 225 data[0] = val;
300 err = s5k83a_write_sensor(sd, S5K83A_WHITENESS, data, 1); 226 err = m5602_write_sensor(sd, S5K83A_WHITENESS, data, 1);
301 227
302 return (err < 0) ? err : 0; 228 return err;
303} 229}
304 230
305int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val) 231int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
@@ -308,7 +234,9 @@ int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
308 u8 data[2]; 234 u8 data[2];
309 struct sd *sd = (struct sd *) gspca_dev; 235 struct sd *sd = (struct sd *) gspca_dev;
310 236
311 err = s5k83a_read_sensor(sd, S5K83A_GAIN, data, 2); 237 err = m5602_read_sensor(sd, S5K83A_GAIN, data, 2);
238 if (err < 0)
239 goto out;
312 240
313 data[1] = data[1] & 0x3f; 241 data[1] = data[1] & 0x3f;
314 if (data[1] > S5K83A_MAXIMUM_GAIN) 242 if (data[1] > S5K83A_MAXIMUM_GAIN)
@@ -316,7 +244,8 @@ int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
316 244
317 *val = data[1]; 245 *val = data[1];
318 246
319 return (err < 0) ? err : 0; 247out:
248 return err;
320} 249}
321 250
322int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val) 251int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val)
@@ -327,9 +256,8 @@ int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val)
327 256
328 data[0] = 0; 257 data[0] = 0;
329 data[1] = val; 258 data[1] = val;
330 err = s5k83a_write_sensor(sd, S5K83A_GAIN, data, 2); 259 err = m5602_write_sensor(sd, S5K83A_GAIN, data, 2);
331 260 return err;
332 return (err < 0) ? err : 0;
333} 261}
334 262
335int s5k83a_get_vflip(struct gspca_dev *gspca_dev, __s32 *val) 263int s5k83a_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
@@ -339,14 +267,15 @@ int s5k83a_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
339 struct sd *sd = (struct sd *) gspca_dev; 267 struct sd *sd = (struct sd *) gspca_dev;
340 268
341 data[0] = 0x05; 269 data[0] = 0x05;
342 err = s5k83a_write_sensor(sd, S5K83A_PAGE_MAP, data, 1); 270 err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1);
343 if (err < 0) 271 if (err < 0)
344 return err; 272 goto out;
345 273
346 err = s5k83a_read_sensor(sd, S5K83A_FLIP, data, 1); 274 err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1);
347 *val = (data[0] | 0x40) ? 1 : 0; 275 *val = (data[0] | 0x40) ? 1 : 0;
348 276
349 return (err < 0) ? err : 0; 277out:
278 return err;
350} 279}
351 280
352int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val) 281int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
@@ -356,25 +285,26 @@ int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
356 struct sd *sd = (struct sd *) gspca_dev; 285 struct sd *sd = (struct sd *) gspca_dev;
357 286
358 data[0] = 0x05; 287 data[0] = 0x05;
359 err = s5k83a_write_sensor(sd, S5K83A_PAGE_MAP, data, 1); 288 err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1);
360 if (err < 0) 289 if (err < 0)
361 return err; 290 goto out;
362 291
363 err = s5k83a_read_sensor(sd, S5K83A_FLIP, data, 1); 292 err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1);
364 if (err < 0) 293 if (err < 0)
365 return err; 294 goto out;
366 295
367 /* set or zero six bit, seven is hflip */ 296 /* set or zero six bit, seven is hflip */
368 data[0] = (val) ? (data[0] & 0x80) | 0x40 | S5K83A_FLIP_MASK 297 data[0] = (val) ? (data[0] & 0x80) | 0x40 | S5K83A_FLIP_MASK
369 : (data[0] & 0x80) | S5K83A_FLIP_MASK; 298 : (data[0] & 0x80) | S5K83A_FLIP_MASK;
370 err = s5k83a_write_sensor(sd, S5K83A_FLIP, data, 1); 299 err = m5602_write_sensor(sd, S5K83A_FLIP, data, 1);
371 if (err < 0) 300 if (err < 0)
372 return err; 301 goto out;
373 302
374 data[0] = (val) ? 0x0b : 0x0a; 303 data[0] = (val) ? 0x0b : 0x0a;
375 err = s5k83a_write_sensor(sd, S5K83A_VFLIP_TUNE, data, 1); 304 err = m5602_write_sensor(sd, S5K83A_VFLIP_TUNE, data, 1);
376 305
377 return (err < 0) ? err : 0; 306out:
307 return err;
378} 308}
379 309
380int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val) 310int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
@@ -384,14 +314,15 @@ int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
384 struct sd *sd = (struct sd *) gspca_dev; 314 struct sd *sd = (struct sd *) gspca_dev;
385 315
386 data[0] = 0x05; 316 data[0] = 0x05;
387 err = s5k83a_write_sensor(sd, S5K83A_PAGE_MAP, data, 1); 317 err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1);
388 if (err < 0) 318 if (err < 0)
389 return err; 319 goto out;
390 320
391 err = s5k83a_read_sensor(sd, S5K83A_FLIP, data, 1); 321 err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1);
392 *val = (data[0] | 0x80) ? 1 : 0; 322 *val = (data[0] | 0x80) ? 1 : 0;
393 323
394 return (err < 0) ? err : 0; 324out:
325 return err;
395} 326}
396 327
397int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val) 328int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
@@ -401,23 +332,23 @@ int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
401 struct sd *sd = (struct sd *) gspca_dev; 332 struct sd *sd = (struct sd *) gspca_dev;
402 333
403 data[0] = 0x05; 334 data[0] = 0x05;
404 err = s5k83a_write_sensor(sd, S5K83A_PAGE_MAP, data, 1); 335 err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1);
405 if (err < 0) 336 if (err < 0)
406 return err; 337 goto out;
407 338
408 err = s5k83a_read_sensor(sd, S5K83A_FLIP, data, 1); 339 err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1);
409 if (err < 0) 340 if (err < 0)
410 return err; 341 goto out;
411 342
412 /* set or zero seven bit, six is vflip */ 343 /* set or zero seven bit, six is vflip */
413 data[0] = (val) ? (data[0] & 0x40) | 0x80 | S5K83A_FLIP_MASK 344 data[0] = (val) ? (data[0] & 0x40) | 0x80 | S5K83A_FLIP_MASK
414 : (data[0] & 0x40) | S5K83A_FLIP_MASK; 345 : (data[0] & 0x40) | S5K83A_FLIP_MASK;
415 err = s5k83a_write_sensor(sd, S5K83A_FLIP, data, 1); 346 err = m5602_write_sensor(sd, S5K83A_FLIP, data, 1);
416 if (err < 0) 347 if (err < 0)
417 return err; 348 goto out;
418 349
419 data[0] = (val) ? 0x0a : 0x0b; 350 data[0] = (val) ? 0x0a : 0x0b;
420 err = s5k83a_write_sensor(sd, S5K83A_HFLIP_TUNE, data, 1); 351 err = m5602_write_sensor(sd, S5K83A_HFLIP_TUNE, data, 1);
421 352out:
422 return (err < 0) ? err : 0; 353 return err;
423} 354}
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.h b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
index ee3ee9cfca1d..05ccb5b57a88 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
@@ -22,15 +22,15 @@
22#include "m5602_sensor.h" 22#include "m5602_sensor.h"
23 23
24#define S5K83A_FLIP 0x01 24#define S5K83A_FLIP 0x01
25#define S5K83A_HFLIP_TUNE 0x03 25#define S5K83A_HFLIP_TUNE 0x03
26#define S5K83A_VFLIP_TUNE 0x05 26#define S5K83A_VFLIP_TUNE 0x05
27#define S5K83A_WHITENESS 0x0a 27#define S5K83A_WHITENESS 0x0a
28#define S5K83A_GAIN 0x18 28#define S5K83A_GAIN 0x18
29#define S5K83A_BRIGHTNESS 0x1b 29#define S5K83A_BRIGHTNESS 0x1b
30#define S5K83A_PAGE_MAP 0xec 30#define S5K83A_PAGE_MAP 0xec
31 31
32#define S5K83A_DEFAULT_BRIGHTNESS 0x71 32#define S5K83A_DEFAULT_BRIGHTNESS 0x71
33#define S5K83A_DEFAULT_WHITENESS 0x7e 33#define S5K83A_DEFAULT_WHITENESS 0x7e
34#define S5K83A_DEFAULT_GAIN 0x00 34#define S5K83A_DEFAULT_GAIN 0x00
35#define S5K83A_MAXIMUM_GAIN 0x3c 35#define S5K83A_MAXIMUM_GAIN 0x3c
36#define S5K83A_FLIP_MASK 0x10 36#define S5K83A_FLIP_MASK 0x10
@@ -46,13 +46,6 @@ int s5k83a_probe(struct sd *sd);
46int s5k83a_init(struct sd *sd); 46int s5k83a_init(struct sd *sd);
47int s5k83a_power_down(struct sd *sd); 47int s5k83a_power_down(struct sd *sd);
48 48
49void s5k83a_dump_registers(struct sd *sd);
50
51int s5k83a_read_sensor(struct sd *sd, const u8 address,
52 u8 *i2c_data, const u8 len);
53int s5k83a_write_sensor(struct sd *sd, const u8 address,
54 u8 *i2c_data, const u8 len);
55
56int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val); 49int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
57int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val); 50int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
58int s5k83a_set_whiteness(struct gspca_dev *gspca_dev, __s32 val); 51int s5k83a_set_whiteness(struct gspca_dev *gspca_dev, __s32 val);
@@ -64,15 +57,13 @@ int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
64int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val); 57int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
65int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val); 58int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
66 59
67
68static struct m5602_sensor s5k83a = { 60static struct m5602_sensor s5k83a = {
69 .name = "S5K83A", 61 .name = "S5K83A",
70 .probe = s5k83a_probe, 62 .probe = s5k83a_probe,
71 .init = s5k83a_init, 63 .init = s5k83a_init,
72 .power_down = s5k83a_power_down, 64 .power_down = s5k83a_power_down,
73 .read_sensor = s5k83a_read_sensor,
74 .write_sensor = s5k83a_write_sensor,
75 .i2c_slave_id = 0x5a, 65 .i2c_slave_id = 0x5a,
66 .i2c_regW = 2,
76 .nctrls = 5, 67 .nctrls = 5,
77 .ctrls = { 68 .ctrls = {
78 { 69 {
diff --git a/drivers/media/video/gspca/m5602/m5602_sensor.h b/drivers/media/video/gspca/m5602/m5602_sensor.h
index 60c9a48e0c02..261623f0da48 100644
--- a/drivers/media/video/gspca/m5602/m5602_sensor.h
+++ b/drivers/media/video/gspca/m5602/m5602_sensor.h
@@ -49,23 +49,21 @@ struct m5602_sensor {
49 /* What i2c address the sensor is connected to */ 49 /* What i2c address the sensor is connected to */
50 u8 i2c_slave_id; 50 u8 i2c_slave_id;
51 51
52 /* Width of each i2c register (in bytes) */
53 u8 i2c_regW;
54
52 /* Probes if the sensor is connected */ 55 /* Probes if the sensor is connected */
53 int (*probe)(struct sd *sd); 56 int (*probe)(struct sd *sd);
54 57
55 /* Performs a initialization sequence */ 58 /* Performs a initialization sequence */
56 int (*init)(struct sd *sd); 59 int (*init)(struct sd *sd);
57 60
61 /* Executed when the camera starts to send data */
62 int (*start)(struct sd *sd);
63
58 /* Performs a power down sequence */ 64 /* Performs a power down sequence */
59 int (*power_down)(struct sd *sd); 65 int (*power_down)(struct sd *sd);
60 66
61 /* Reads a sensor register */
62 int (*read_sensor)(struct sd *sd, const u8 address,
63 u8 *i2c_data, const u8 len);
64
65 /* Writes to a sensor register */
66 int (*write_sensor)(struct sd *sd, const u8 address,
67 u8 *i2c_data, const u8 len);
68
69 int nctrls; 67 int nctrls;
70 struct ctrl ctrls[M5602_MAX_CTRLS]; 68 struct ctrl ctrls[M5602_MAX_CTRLS];
71 69
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index 277ca34a8817..3d2090e67a63 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -39,7 +39,7 @@ struct sd {
39static struct ctrl sd_ctrls[] = { 39static struct ctrl sd_ctrls[] = {
40}; 40};
41 41
42static struct v4l2_pix_format vga_mode[] = { 42static const struct v4l2_pix_format vga_mode[] = {
43 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 43 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
44 .bytesperline = 320, 44 .bytesperline = 320,
45 .sizeimage = 320 * 240 * 3 / 8 + 589, 45 .sizeimage = 320 * 240 * 3 / 8 + 589,
@@ -123,7 +123,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
123 cam = &gspca_dev->cam; 123 cam = &gspca_dev->cam;
124 cam->epaddr = 0x01; 124 cam->epaddr = 0x01;
125 cam->cam_mode = vga_mode; 125 cam->cam_mode = vga_mode;
126 cam->nmodes = sizeof vga_mode / sizeof vga_mode[0]; 126 cam->nmodes = ARRAY_SIZE(vga_mode);
127 sd->qindex = 1; /* set the quantization table */ 127 sd->qindex = 1; /* set the quantization table */
128 return 0; 128 return 0;
129} 129}
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index ca671194679e..ee232956c812 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -3,7 +3,18 @@
3 * 3 *
4 * Copyright (C) 2008 Jean-Francois Moine (http://moinejf.free.fr) 4 * Copyright (C) 2008 Jean-Francois Moine (http://moinejf.free.fr)
5 * 5 *
6 * (This module is adapted from the ov51x-jpeg package) 6 * This module is adapted from the ov51x-jpeg package, which itself
7 * was adapted from the ov511 driver.
8 *
9 * Original copyright for the ov511 driver is:
10 *
11 * Copyright (c) 1999-2004 Mark W. McClelland
12 * Support for OV519, OV8610 Copyright (c) 2003 Joerg Heckenbach
13 *
14 * ov51x-jpeg original copyright is:
15 *
16 * Copyright (c) 2004-2007 Romain Beauxis <toots@rastageeks.org>
17 * Support for OV7670 sensors was contributed by Sam Skipsey <aoanla@yahoo.com>
7 * 18 *
8 * This program is free software; you can redistribute it and/or modify 19 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 20 * it under the terms of the GNU General Public License as published by
@@ -40,22 +51,18 @@ struct sd {
40 struct gspca_dev gspca_dev; /* !! must be the first item */ 51 struct gspca_dev gspca_dev; /* !! must be the first item */
41 52
42 /* Determined by sensor type */ 53 /* Determined by sensor type */
43 char sif; 54 __u8 sif;
44
45 unsigned char primary_i2c_slave; /* I2C write id of sensor */
46 55
47 unsigned char brightness; 56 __u8 brightness;
48 unsigned char contrast; 57 __u8 contrast;
49 unsigned char colors; 58 __u8 colors;
50 __u8 hflip; 59 __u8 hflip;
51 __u8 vflip; 60 __u8 vflip;
52 61
53 char compress; /* Should the next frame be compressed? */ 62 __u8 stopped; /* Streaming is temporarily paused */
54 char compress_inited; /* Are compression params uploaded? */
55 char stopped; /* Streaming is temporarily paused */
56 63
57 char frame_rate; /* current Framerate (OV519 only) */ 64 __u8 frame_rate; /* current Framerate (OV519 only) */
58 char clockdiv; /* clockdiv override for OV519 only */ 65 __u8 clockdiv; /* clockdiv override for OV519 only */
59 66
60 char sensor; /* Type of image sensor chip (SEN_*) */ 67 char sensor; /* Type of image sensor chip (SEN_*) */
61#define SEN_UNKNOWN 0 68#define SEN_UNKNOWN 0
@@ -67,7 +74,6 @@ struct sd {
67#define SEN_OV7670 6 74#define SEN_OV7670 6
68#define SEN_OV76BE 7 75#define SEN_OV76BE 7
69#define SEN_OV8610 8 76#define SEN_OV8610 8
70
71}; 77};
72 78
73/* V4L2 controls supported by the driver */ 79/* V4L2 controls supported by the driver */
@@ -158,7 +164,7 @@ static struct ctrl sd_ctrls[] = {
158 }, 164 },
159}; 165};
160 166
161static struct v4l2_pix_format vga_mode[] = { 167static const struct v4l2_pix_format vga_mode[] = {
162 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 168 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
163 .bytesperline = 320, 169 .bytesperline = 320,
164 .sizeimage = 320 * 240 * 3 / 8 + 590, 170 .sizeimage = 320 * 240 * 3 / 8 + 590,
@@ -170,7 +176,7 @@ static struct v4l2_pix_format vga_mode[] = {
170 .colorspace = V4L2_COLORSPACE_JPEG, 176 .colorspace = V4L2_COLORSPACE_JPEG,
171 .priv = 0}, 177 .priv = 0},
172}; 178};
173static struct v4l2_pix_format sif_mode[] = { 179static const struct v4l2_pix_format sif_mode[] = {
174 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 180 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
175 .bytesperline = 176, 181 .bytesperline = 176,
176 .sizeimage = 176 * 144 * 3 / 8 + 590, 182 .sizeimage = 176 * 144 * 3 / 8 + 590,
@@ -184,15 +190,15 @@ static struct v4l2_pix_format sif_mode[] = {
184}; 190};
185 191
186/* OV519 Camera interface register numbers */ 192/* OV519 Camera interface register numbers */
187#define OV519_CAM_H_SIZE 0x10 193#define OV519_R10_H_SIZE 0x10
188#define OV519_CAM_V_SIZE 0x11 194#define OV519_R11_V_SIZE 0x11
189#define OV519_CAM_X_OFFSETL 0x12 195#define OV519_R12_X_OFFSETL 0x12
190#define OV519_CAM_X_OFFSETH 0x13 196#define OV519_R13_X_OFFSETH 0x13
191#define OV519_CAM_Y_OFFSETL 0x14 197#define OV519_R14_Y_OFFSETL 0x14
192#define OV519_CAM_Y_OFFSETH 0x15 198#define OV519_R15_Y_OFFSETH 0x15
193#define OV519_CAM_DIVIDER 0x16 199#define OV519_R16_DIVIDER 0x16
194#define OV519_CAM_DFR 0x20 200#define OV519_R20_DFR 0x20
195#define OV519_CAM_FORMAT 0x25 201#define OV519_R25_FORMAT 0x25
196 202
197/* OV519 System Controller register numbers */ 203/* OV519 System Controller register numbers */
198#define OV519_SYS_RESET1 0x51 204#define OV519_SYS_RESET1 0x51
@@ -562,8 +568,8 @@ static const struct ov_i2c_regvals norm_7670[] = {
562 { OV7670_REG_VSTOP, 0x7a }, 568 { OV7670_REG_VSTOP, 0x7a },
563 { OV7670_REG_VREF, 0x0a }, 569 { OV7670_REG_VREF, 0x0a },
564 570
565 { OV7670_REG_COM3, 0 }, 571 { OV7670_REG_COM3, 0x00 },
566 { OV7670_REG_COM14, 0 }, 572 { OV7670_REG_COM14, 0x00 },
567/* Mystery scaling numbers */ 573/* Mystery scaling numbers */
568 { 0x70, 0x3a }, 574 { 0x70, 0x3a },
569 { 0x71, 0x35 }, 575 { 0x71, 0x35 },
@@ -595,8 +601,8 @@ static const struct ov_i2c_regvals norm_7670[] = {
595 { OV7670_REG_COM8, OV7670_COM8_FASTAEC 601 { OV7670_REG_COM8, OV7670_COM8_FASTAEC
596 | OV7670_COM8_AECSTEP 602 | OV7670_COM8_AECSTEP
597 | OV7670_COM8_BFILT }, 603 | OV7670_COM8_BFILT },
598 { OV7670_REG_GAIN, 0 }, 604 { OV7670_REG_GAIN, 0x00 },
599 { OV7670_REG_AECH, 0 }, 605 { OV7670_REG_AECH, 0x00 },
600 { OV7670_REG_COM4, 0x40 }, /* magic reserved bit */ 606 { OV7670_REG_COM4, 0x40 }, /* magic reserved bit */
601 { OV7670_REG_COM9, 0x18 }, /* 4x gain + magic rsvd bit */ 607 { OV7670_REG_COM9, 0x18 }, /* 4x gain + magic rsvd bit */
602 { OV7670_REG_BD50MAX, 0x05 }, 608 { OV7670_REG_BD50MAX, 0x05 },
@@ -634,16 +640,16 @@ static const struct ov_i2c_regvals norm_7670[] = {
634 { OV7670_REG_COM12, 0x78 }, 640 { OV7670_REG_COM12, 0x78 },
635 { 0x4d, 0x40 }, 641 { 0x4d, 0x40 },
636 { 0x4e, 0x20 }, 642 { 0x4e, 0x20 },
637 { OV7670_REG_GFIX, 0 }, 643 { OV7670_REG_GFIX, 0x00 },
638 { 0x6b, 0x4a }, 644 { 0x6b, 0x4a },
639 { 0x74, 0x10 }, 645 { 0x74, 0x10 },
640 { 0x8d, 0x4f }, 646 { 0x8d, 0x4f },
641 { 0x8e, 0 }, 647 { 0x8e, 0x00 },
642 { 0x8f, 0 }, 648 { 0x8f, 0x00 },
643 { 0x90, 0 }, 649 { 0x90, 0x00 },
644 { 0x91, 0 }, 650 { 0x91, 0x00 },
645 { 0x96, 0 }, 651 { 0x96, 0x00 },
646 { 0x9a, 0 }, 652 { 0x9a, 0x00 },
647 { 0xb0, 0x84 }, 653 { 0xb0, 0x84 },
648 { 0xb1, 0x0c }, 654 { 0xb1, 0x0c },
649 { 0xb2, 0x0e }, 655 { 0xb2, 0x0e },
@@ -681,17 +687,17 @@ static const struct ov_i2c_regvals norm_7670[] = {
681/* Matrix coefficients */ 687/* Matrix coefficients */
682 { 0x4f, 0x80 }, 688 { 0x4f, 0x80 },
683 { 0x50, 0x80 }, 689 { 0x50, 0x80 },
684 { 0x51, 0 }, 690 { 0x51, 0x00 },
685 { 0x52, 0x22 }, 691 { 0x52, 0x22 },
686 { 0x53, 0x5e }, 692 { 0x53, 0x5e },
687 { 0x54, 0x80 }, 693 { 0x54, 0x80 },
688 { 0x58, 0x9e }, 694 { 0x58, 0x9e },
689 695
690 { OV7670_REG_COM16, OV7670_COM16_AWBGAIN }, 696 { OV7670_REG_COM16, OV7670_COM16_AWBGAIN },
691 { OV7670_REG_EDGE, 0 }, 697 { OV7670_REG_EDGE, 0x00 },
692 { 0x75, 0x05 }, 698 { 0x75, 0x05 },
693 { 0x76, 0xe1 }, 699 { 0x76, 0xe1 },
694 { 0x4c, 0 }, 700 { 0x4c, 0x00 },
695 { 0x77, 0x01 }, 701 { 0x77, 0x01 },
696 { OV7670_REG_COM13, OV7670_COM13_GAMMA 702 { OV7670_REG_COM13, OV7670_COM13_GAMMA
697 | OV7670_COM13_UVSAT 703 | OV7670_COM13_UVSAT
@@ -704,7 +710,7 @@ static const struct ov_i2c_regvals norm_7670[] = {
704 { 0x34, 0x11 }, 710 { 0x34, 0x11 },
705 { OV7670_REG_COM11, OV7670_COM11_EXP|OV7670_COM11_HZAUTO }, 711 { OV7670_REG_COM11, OV7670_COM11_EXP|OV7670_COM11_HZAUTO },
706 { 0xa4, 0x88 }, 712 { 0xa4, 0x88 },
707 { 0x96, 0 }, 713 { 0x96, 0x00 },
708 { 0x97, 0x30 }, 714 { 0x97, 0x30 },
709 { 0x98, 0x20 }, 715 { 0x98, 0x20 },
710 { 0x99, 0x30 }, 716 { 0x99, 0x30 },
@@ -942,11 +948,11 @@ static int i2c_w(struct sd *sd,
942 948
943 /* Initiate 3-byte write cycle */ 949 /* Initiate 3-byte write cycle */
944 rc = reg_w(sd, R518_I2C_CTL, 0x01); 950 rc = reg_w(sd, R518_I2C_CTL, 0x01);
951 if (rc < 0)
952 return rc;
945 953
946 /* wait for write complete */ 954 /* wait for write complete */
947 msleep(4); 955 msleep(4);
948 if (rc < 0)
949 return rc;
950 return reg_r8(sd, R518_I2C_CTL); 956 return reg_r8(sd, R518_I2C_CTL);
951} 957}
952 958
@@ -1029,7 +1035,7 @@ static inline int ov51x_restart(struct sd *sd)
1029 */ 1035 */
1030static int init_ov_sensor(struct sd *sd) 1036static int init_ov_sensor(struct sd *sd)
1031{ 1037{
1032 int i, success; 1038 int i;
1033 1039
1034 /* Reset the sensor */ 1040 /* Reset the sensor */
1035 if (i2c_w(sd, 0x12, 0x80) < 0) 1041 if (i2c_w(sd, 0x12, 0x80) < 0)
@@ -1038,11 +1044,11 @@ static int init_ov_sensor(struct sd *sd)
1038 /* Wait for it to initialize */ 1044 /* Wait for it to initialize */
1039 msleep(150); 1045 msleep(150);
1040 1046
1041 for (i = 0, success = 0; i < i2c_detect_tries && !success; i++) { 1047 for (i = 0; i < i2c_detect_tries; i++) {
1042 if (i2c_r(sd, OV7610_REG_ID_HIGH) == 0x7f && 1048 if (i2c_r(sd, OV7610_REG_ID_HIGH) == 0x7f &&
1043 i2c_r(sd, OV7610_REG_ID_LOW) == 0xa2) { 1049 i2c_r(sd, OV7610_REG_ID_LOW) == 0xa2) {
1044 success = 1; 1050 PDEBUG(D_PROBE, "I2C synced in %d attempt(s)", i);
1045 continue; 1051 return 0;
1046 } 1052 }
1047 1053
1048 /* Reset the sensor */ 1054 /* Reset the sensor */
@@ -1054,10 +1060,7 @@ static int init_ov_sensor(struct sd *sd)
1054 if (i2c_r(sd, 0x00) < 0) 1060 if (i2c_r(sd, 0x00) < 0)
1055 return -EIO; 1061 return -EIO;
1056 } 1062 }
1057 if (!success) 1063 return -EIO;
1058 return -EIO;
1059 PDEBUG(D_PROBE, "I2C synced in %d attempt(s)", i);
1060 return 0;
1061} 1064}
1062 1065
1063/* Set the read and write slave IDs. The "slave" argument is the write slave, 1066/* Set the read and write slave IDs. The "slave" argument is the write slave,
@@ -1073,7 +1076,6 @@ static int ov51x_set_slave_ids(struct sd *sd,
1073 rc = reg_w(sd, R51x_I2C_W_SID, slave); 1076 rc = reg_w(sd, R51x_I2C_W_SID, slave);
1074 if (rc < 0) 1077 if (rc < 0)
1075 return rc; 1078 return rc;
1076 sd->primary_i2c_slave = slave;
1077 return reg_w(sd, R51x_I2C_R_SID, slave + 1); 1079 return reg_w(sd, R51x_I2C_R_SID, slave + 1);
1078} 1080}
1079 1081
@@ -1285,7 +1287,6 @@ static int ov6xx0_configure(struct sd *sd)
1285/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */ 1287/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */
1286static void ov51x_led_control(struct sd *sd, int on) 1288static void ov51x_led_control(struct sd *sd, int on)
1287{ 1289{
1288/* PDEBUG(D_STREAM, "LED (%s)", on ? "on" : "off"); */
1289 reg_w_mask(sd, OV519_GPIO_DATA_OUT0, !on, 1); /* 0 / 1 */ 1290 reg_w_mask(sd, OV519_GPIO_DATA_OUT0, !on, 1); /* 0 / 1 */
1290} 1291}
1291 1292
@@ -1352,7 +1353,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
1352 } 1353 }
1353 if (ov8xx0_configure(sd) < 0) { 1354 if (ov8xx0_configure(sd) < 0) {
1354 PDEBUG(D_ERR, 1355 PDEBUG(D_ERR,
1355 "Failed to configure OV8xx0 sensor"); 1356 "Failed to configure OV8xx0 sensor");
1356 goto error; 1357 goto error;
1357 } 1358 }
1358 } 1359 }
@@ -1482,7 +1483,7 @@ static int ov519_mode_init_regs(struct sd *sd)
1482 return -EIO; 1483 return -EIO;
1483 if (sd->sensor == SEN_OV7640) { 1484 if (sd->sensor == SEN_OV7640) {
1484 /* Select 8-bit input mode */ 1485 /* Select 8-bit input mode */
1485 reg_w_mask(sd, OV519_CAM_DFR, 0x10, 0x10); 1486 reg_w_mask(sd, OV519_R20_DFR, 0x10, 0x10);
1486 } 1487 }
1487 } else { 1488 } else {
1488 if (write_regvals(sd, mode_init_519_ov7670, 1489 if (write_regvals(sd, mode_init_519_ov7670,
@@ -1490,14 +1491,14 @@ static int ov519_mode_init_regs(struct sd *sd)
1490 return -EIO; 1491 return -EIO;
1491 } 1492 }
1492 1493
1493 reg_w(sd, OV519_CAM_H_SIZE, sd->gspca_dev.width >> 4); 1494 reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4);
1494 reg_w(sd, OV519_CAM_V_SIZE, sd->gspca_dev.height >> 3); 1495 reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3);
1495 reg_w(sd, OV519_CAM_X_OFFSETL, 0x00); 1496 reg_w(sd, OV519_R12_X_OFFSETL, 0x00);
1496 reg_w(sd, OV519_CAM_X_OFFSETH, 0x00); 1497 reg_w(sd, OV519_R13_X_OFFSETH, 0x00);
1497 reg_w(sd, OV519_CAM_Y_OFFSETL, 0x00); 1498 reg_w(sd, OV519_R14_Y_OFFSETL, 0x00);
1498 reg_w(sd, OV519_CAM_Y_OFFSETH, 0x00); 1499 reg_w(sd, OV519_R15_Y_OFFSETH, 0x00);
1499 reg_w(sd, OV519_CAM_DIVIDER, 0x00); 1500 reg_w(sd, OV519_R16_DIVIDER, 0x00);
1500 reg_w(sd, OV519_CAM_FORMAT, 0x03); /* YUV422 */ 1501 reg_w(sd, OV519_R25_FORMAT, 0x03); /* YUV422 */
1501 reg_w(sd, 0x26, 0x00); /* Undocumented */ 1502 reg_w(sd, 0x26, 0x00); /* Undocumented */
1502 1503
1503 /******** Set the framerate ********/ 1504 /******** Set the framerate ********/
@@ -1509,8 +1510,8 @@ static int ov519_mode_init_regs(struct sd *sd)
1509 switch (sd->sensor) { 1510 switch (sd->sensor) {
1510 case SEN_OV7640: 1511 case SEN_OV7640:
1511 switch (sd->frame_rate) { 1512 switch (sd->frame_rate) {
1512/*fixme: default was 30 fps */ 1513 default:
1513 case 30: 1514/* case 30: */
1514 reg_w(sd, 0xa4, 0x0c); 1515 reg_w(sd, 0xa4, 0x0c);
1515 reg_w(sd, 0x23, 0xff); 1516 reg_w(sd, 0x23, 0xff);
1516 break; 1517 break;
@@ -1522,8 +1523,7 @@ static int ov519_mode_init_regs(struct sd *sd)
1522 reg_w(sd, 0xa4, 0x0c); 1523 reg_w(sd, 0xa4, 0x0c);
1523 reg_w(sd, 0x23, 0x1b); 1524 reg_w(sd, 0x23, 0x1b);
1524 break; 1525 break;
1525 default: 1526 case 15:
1526/* case 15: */
1527 reg_w(sd, 0xa4, 0x04); 1527 reg_w(sd, 0xa4, 0x04);
1528 reg_w(sd, 0x23, 0xff); 1528 reg_w(sd, 0x23, 0xff);
1529 sd->clockdiv = 1; 1529 sd->clockdiv = 1;
@@ -1576,7 +1576,6 @@ static int ov519_mode_init_regs(struct sd *sd)
1576 } 1576 }
1577 break; 1577 break;
1578 } 1578 }
1579
1580 return 0; 1579 return 0;
1581} 1580}
1582 1581
@@ -1667,7 +1666,7 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
1667 * the gain or the contrast. The "reserved" bits seem 1666 * the gain or the contrast. The "reserved" bits seem
1668 * to have some effect in this case. */ 1667 * to have some effect in this case. */
1669 i2c_w(sd, 0x2d, 0x85); 1668 i2c_w(sd, 0x2d, 0x85);
1670 } else if (sd->clockdiv >= 0) { 1669 } else {
1671 i2c_w(sd, 0x11, sd->clockdiv); 1670 i2c_w(sd, 0x11, sd->clockdiv);
1672 } 1671 }
1673 1672
@@ -1869,7 +1868,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
1869 ret = ov51x_restart(sd); 1868 ret = ov51x_restart(sd);
1870 if (ret < 0) 1869 if (ret < 0)
1871 goto out; 1870 goto out;
1872 PDEBUG(D_STREAM, "camera started alt: 0x%02x", gspca_dev->alt);
1873 ov51x_led_control(sd, 1); 1871 ov51x_led_control(sd, 1);
1874 return 0; 1872 return 0;
1875out: 1873out:
@@ -1879,8 +1877,10 @@ out:
1879 1877
1880static void sd_stopN(struct gspca_dev *gspca_dev) 1878static void sd_stopN(struct gspca_dev *gspca_dev)
1881{ 1879{
1882 ov51x_stop((struct sd *) gspca_dev); 1880 struct sd *sd = (struct sd *) gspca_dev;
1883 ov51x_led_control((struct sd *) gspca_dev, 0); 1881
1882 ov51x_stop(sd);
1883 ov51x_led_control(sd, 0);
1884} 1884}
1885 1885
1886static void sd_pkt_scan(struct gspca_dev *gspca_dev, 1886static void sd_pkt_scan(struct gspca_dev *gspca_dev,
@@ -1935,9 +1935,6 @@ static void setbrightness(struct gspca_dev *gspca_dev)
1935 int val; 1935 int val;
1936 1936
1937 val = sd->brightness; 1937 val = sd->brightness;
1938 PDEBUG(D_CONF, "brightness:%d", val);
1939/* if (gspca_dev->streaming)
1940 * ov51x_stop(sd); */
1941 switch (sd->sensor) { 1938 switch (sd->sensor) {
1942 case SEN_OV8610: 1939 case SEN_OV8610:
1943 case SEN_OV7610: 1940 case SEN_OV7610:
@@ -1959,8 +1956,6 @@ static void setbrightness(struct gspca_dev *gspca_dev)
1959 i2c_w(sd, OV7670_REG_BRIGHT, ov7670_abs_to_sm(val)); 1956 i2c_w(sd, OV7670_REG_BRIGHT, ov7670_abs_to_sm(val));
1960 break; 1957 break;
1961 } 1958 }
1962/* if (gspca_dev->streaming)
1963 * ov51x_restart(sd); */
1964} 1959}
1965 1960
1966static void setcontrast(struct gspca_dev *gspca_dev) 1961static void setcontrast(struct gspca_dev *gspca_dev)
@@ -1969,9 +1964,6 @@ static void setcontrast(struct gspca_dev *gspca_dev)
1969 int val; 1964 int val;
1970 1965
1971 val = sd->contrast; 1966 val = sd->contrast;
1972 PDEBUG(D_CONF, "contrast:%d", val);
1973/* if (gspca_dev->streaming)
1974 ov51x_stop(sd); */
1975 switch (sd->sensor) { 1967 switch (sd->sensor) {
1976 case SEN_OV7610: 1968 case SEN_OV7610:
1977 case SEN_OV6620: 1969 case SEN_OV6620:
@@ -2007,8 +1999,6 @@ static void setcontrast(struct gspca_dev *gspca_dev)
2007 i2c_w(sd, OV7670_REG_CONTRAS, val >> 1); 1999 i2c_w(sd, OV7670_REG_CONTRAS, val >> 1);
2008 break; 2000 break;
2009 } 2001 }
2010/* if (gspca_dev->streaming)
2011 ov51x_restart(sd); */
2012} 2002}
2013 2003
2014static void setcolors(struct gspca_dev *gspca_dev) 2004static void setcolors(struct gspca_dev *gspca_dev)
@@ -2017,9 +2007,6 @@ static void setcolors(struct gspca_dev *gspca_dev)
2017 int val; 2007 int val;
2018 2008
2019 val = sd->colors; 2009 val = sd->colors;
2020 PDEBUG(D_CONF, "saturation:%d", val);
2021/* if (gspca_dev->streaming)
2022 ov51x_stop(sd); */
2023 switch (sd->sensor) { 2010 switch (sd->sensor) {
2024 case SEN_OV8610: 2011 case SEN_OV8610:
2025 case SEN_OV7610: 2012 case SEN_OV7610:
@@ -2044,8 +2031,6 @@ static void setcolors(struct gspca_dev *gspca_dev)
2044 /* set REG_COM13 values for UV sat auto mode */ 2031 /* set REG_COM13 values for UV sat auto mode */
2045 break; 2032 break;
2046 } 2033 }
2047/* if (gspca_dev->streaming)
2048 ov51x_restart(sd); */
2049} 2034}
2050 2035
2051static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) 2036static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
@@ -2053,7 +2038,8 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
2053 struct sd *sd = (struct sd *) gspca_dev; 2038 struct sd *sd = (struct sd *) gspca_dev;
2054 2039
2055 sd->brightness = val; 2040 sd->brightness = val;
2056 setbrightness(gspca_dev); 2041 if (gspca_dev->streaming)
2042 setbrightness(gspca_dev);
2057 return 0; 2043 return 0;
2058} 2044}
2059 2045
@@ -2070,7 +2056,8 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
2070 struct sd *sd = (struct sd *) gspca_dev; 2056 struct sd *sd = (struct sd *) gspca_dev;
2071 2057
2072 sd->contrast = val; 2058 sd->contrast = val;
2073 setcontrast(gspca_dev); 2059 if (gspca_dev->streaming)
2060 setcontrast(gspca_dev);
2074 return 0; 2061 return 0;
2075} 2062}
2076 2063
@@ -2087,7 +2074,8 @@ static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
2087 struct sd *sd = (struct sd *) gspca_dev; 2074 struct sd *sd = (struct sd *) gspca_dev;
2088 2075
2089 sd->colors = val; 2076 sd->colors = val;
2090 setcolors(gspca_dev); 2077 if (gspca_dev->streaming)
2078 setcolors(gspca_dev);
2091 return 0; 2079 return 0;
2092} 2080}
2093 2081
@@ -2104,7 +2092,8 @@ static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
2104 struct sd *sd = (struct sd *) gspca_dev; 2092 struct sd *sd = (struct sd *) gspca_dev;
2105 2093
2106 sd->hflip = val; 2094 sd->hflip = val;
2107 sethvflip(sd); 2095 if (gspca_dev->streaming)
2096 sethvflip(sd);
2108 return 0; 2097 return 0;
2109} 2098}
2110 2099
@@ -2121,7 +2110,8 @@ static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val)
2121 struct sd *sd = (struct sd *) gspca_dev; 2110 struct sd *sd = (struct sd *) gspca_dev;
2122 2111
2123 sd->vflip = val; 2112 sd->vflip = val;
2124 sethvflip(sd); 2113 if (gspca_dev->streaming)
2114 sethvflip(sd);
2125 return 0; 2115 return 0;
2126} 2116}
2127 2117
@@ -2162,7 +2152,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
2162 {USB_DEVICE(0x05a9, 0x8519)}, 2152 {USB_DEVICE(0x05a9, 0x8519)},
2163 {} 2153 {}
2164}; 2154};
2165#undef DVNAME 2155
2166MODULE_DEVICE_TABLE(usb, device_table); 2156MODULE_DEVICE_TABLE(usb, device_table);
2167 2157
2168/* -- device connect -- */ 2158/* -- device connect -- */
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
new file mode 100644
index 000000000000..3bf15e401693
--- /dev/null
+++ b/drivers/media/video/gspca/ov534.c
@@ -0,0 +1,601 @@
1/*
2 * ov534/ov772x gspca driver
3 * Copyright (C) 2008 Antonio Ospite <ospite@studenti.unina.it>
4 * Copyright (C) 2008 Jim Paris <jim@jtan.com>
5 *
6 * Based on a prototype written by Mark Ferrell <majortrips@gmail.com>
7 * USB protocol reverse engineered by Jim Paris <jim@jtan.com>
8 * https://jim.sh/svn/jim/devl/playstation/ps3/eye/test/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#define MODULE_NAME "ov534"
26
27#include "gspca.h"
28
29#define OV534_REG_ADDRESS 0xf1 /* ? */
30#define OV534_REG_SUBADDR 0xf2
31#define OV534_REG_WRITE 0xf3
32#define OV534_REG_READ 0xf4
33#define OV534_REG_OPERATION 0xf5
34#define OV534_REG_STATUS 0xf6
35
36#define OV534_OP_WRITE_3 0x37
37#define OV534_OP_WRITE_2 0x33
38#define OV534_OP_READ_2 0xf9
39
40#define CTRL_TIMEOUT 500
41
42MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
43MODULE_DESCRIPTION("GSPCA/OV534 USB Camera Driver");
44MODULE_LICENSE("GPL");
45
46/* specific webcam descriptor */
47struct sd {
48 struct gspca_dev gspca_dev; /* !! must be the first item */
49 __u32 last_fid;
50 __u32 last_pts;
51 int frame_rate;
52};
53
54/* V4L2 controls supported by the driver */
55static struct ctrl sd_ctrls[] = {
56};
57
58static const struct v4l2_pix_format vga_mode[] = {
59 {640, 480, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
60 .bytesperline = 640 * 2,
61 .sizeimage = 640 * 480 * 2,
62 .colorspace = V4L2_COLORSPACE_JPEG,
63 .priv = 0},
64};
65
66static void ov534_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val)
67{
68 struct usb_device *udev = gspca_dev->dev;
69 int ret;
70
71 PDEBUG(D_USBO, "reg=0x%04x, val=0%02x", reg, val);
72 gspca_dev->usb_buf[0] = val;
73 ret = usb_control_msg(udev,
74 usb_sndctrlpipe(udev, 0),
75 0x1,
76 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
77 0x0, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
78 if (ret < 0)
79 PDEBUG(D_ERR, "write failed");
80}
81
82static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
83{
84 struct usb_device *udev = gspca_dev->dev;
85 int ret;
86
87 ret = usb_control_msg(udev,
88 usb_rcvctrlpipe(udev, 0),
89 0x1,
90 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
91 0x0, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
92 PDEBUG(D_USBI, "reg=0x%04x, data=0x%02x", reg, gspca_dev->usb_buf[0]);
93 if (ret < 0)
94 PDEBUG(D_ERR, "read failed");
95 return gspca_dev->usb_buf[0];
96}
97
98/* Two bits control LED: 0x21 bit 7 and 0x23 bit 7.
99 * (direction and output)? */
100static void ov534_set_led(struct gspca_dev *gspca_dev, int status)
101{
102 u8 data;
103
104 PDEBUG(D_CONF, "led status: %d", status);
105
106 data = ov534_reg_read(gspca_dev, 0x21);
107 data |= 0x80;
108 ov534_reg_write(gspca_dev, 0x21, data);
109
110 data = ov534_reg_read(gspca_dev, 0x23);
111 if (status)
112 data |= 0x80;
113 else
114 data &= ~(0x80);
115
116 ov534_reg_write(gspca_dev, 0x23, data);
117}
118
119static int sccb_check_status(struct gspca_dev *gspca_dev)
120{
121 u8 data;
122 int i;
123
124 for (i = 0; i < 5; i++) {
125 data = ov534_reg_read(gspca_dev, OV534_REG_STATUS);
126
127 switch (data) {
128 case 0x00:
129 return 1;
130 case 0x04:
131 return 0;
132 case 0x03:
133 break;
134 default:
135 PDEBUG(D_ERR, "sccb status 0x%02x, attempt %d/5",
136 data, i + 1);
137 }
138 }
139 return 0;
140}
141
142static void sccb_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val)
143{
144 PDEBUG(D_USBO, "reg: 0x%04x, val: 0x%02x", reg, val);
145 ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg);
146 ov534_reg_write(gspca_dev, OV534_REG_WRITE, val);
147 ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
148
149 if (!sccb_check_status(gspca_dev))
150 PDEBUG(D_ERR, "sccb_reg_write failed");
151}
152
153#ifdef GSPCA_DEBUG
154static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg)
155{
156 ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg);
157 ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_2);
158 if (!sccb_check_status(gspca_dev))
159 PDEBUG(D_ERR, "sccb_reg_read failed 1");
160
161 ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_READ_2);
162 if (!sccb_check_status(gspca_dev))
163 PDEBUG(D_ERR, "sccb_reg_read failed 2");
164
165 return ov534_reg_read(gspca_dev, OV534_REG_READ);
166}
167#endif
168
169static const __u8 ov534_reg_initdata[][2] = {
170 { 0xe7, 0x3a },
171
172 { OV534_REG_ADDRESS, 0x42 }, /* select OV772x sensor */
173
174 { 0xc2, 0x0c },
175 { 0x88, 0xf8 },
176 { 0xc3, 0x69 },
177 { 0x89, 0xff },
178 { 0x76, 0x03 },
179 { 0x92, 0x01 },
180 { 0x93, 0x18 },
181 { 0x94, 0x10 },
182 { 0x95, 0x10 },
183 { 0xe2, 0x00 },
184 { 0xe7, 0x3e },
185
186 { 0x96, 0x00 },
187
188 { 0x97, 0x20 },
189 { 0x97, 0x20 },
190 { 0x97, 0x20 },
191 { 0x97, 0x0a },
192 { 0x97, 0x3f },
193 { 0x97, 0x4a },
194 { 0x97, 0x20 },
195 { 0x97, 0x15 },
196 { 0x97, 0x0b },
197
198 { 0x8e, 0x40 },
199 { 0x1f, 0x81 },
200 { 0x34, 0x05 },
201 { 0xe3, 0x04 },
202 { 0x88, 0x00 },
203 { 0x89, 0x00 },
204 { 0x76, 0x00 },
205 { 0xe7, 0x2e },
206 { 0x31, 0xf9 },
207 { 0x25, 0x42 },
208 { 0x21, 0xf0 },
209
210 { 0x1c, 0x00 },
211 { 0x1d, 0x40 },
212 { 0x1d, 0x02 }, /* payload size 0x0200 * 4 = 2048 bytes */
213 { 0x1d, 0x00 }, /* payload size */
214 { 0x1d, 0x02 }, /* frame size 0x025800 * 4 = 614400 */
215 { 0x1d, 0x58 }, /* frame size */
216 { 0x1d, 0x00 }, /* frame size */
217
218 { 0x1c, 0x0a },
219 { 0x1d, 0x08 }, /* turn on UVC header */
220 { 0x1d, 0x0e }, /* .. */
221
222 { 0x8d, 0x1c },
223 { 0x8e, 0x80 },
224 { 0xe5, 0x04 },
225
226 { 0xc0, 0x50 },
227 { 0xc1, 0x3c },
228 { 0xc2, 0x0c },
229};
230
231static const __u8 ov772x_reg_initdata[][2] = {
232 { 0x12, 0x80 },
233 { 0x11, 0x01 },
234
235 { 0x3d, 0x03 },
236 { 0x17, 0x26 },
237 { 0x18, 0xa0 },
238 { 0x19, 0x07 },
239 { 0x1a, 0xf0 },
240 { 0x32, 0x00 },
241 { 0x29, 0xa0 },
242 { 0x2c, 0xf0 },
243 { 0x65, 0x20 },
244 { 0x11, 0x01 },
245 { 0x42, 0x7f },
246 { 0x63, 0xe0 },
247 { 0x64, 0xff },
248 { 0x66, 0x00 },
249 { 0x13, 0xf0 },
250 { 0x0d, 0x41 },
251 { 0x0f, 0xc5 },
252 { 0x14, 0x11 },
253
254 { 0x22, 0x7f },
255 { 0x23, 0x03 },
256 { 0x24, 0x40 },
257 { 0x25, 0x30 },
258 { 0x26, 0xa1 },
259 { 0x2a, 0x00 },
260 { 0x2b, 0x00 },
261 { 0x6b, 0xaa },
262 { 0x13, 0xff },
263
264 { 0x90, 0x05 },
265 { 0x91, 0x01 },
266 { 0x92, 0x03 },
267 { 0x93, 0x00 },
268 { 0x94, 0x60 },
269 { 0x95, 0x3c },
270 { 0x96, 0x24 },
271 { 0x97, 0x1e },
272 { 0x98, 0x62 },
273 { 0x99, 0x80 },
274 { 0x9a, 0x1e },
275 { 0x9b, 0x08 },
276 { 0x9c, 0x20 },
277 { 0x9e, 0x81 },
278
279 { 0xa6, 0x04 },
280 { 0x7e, 0x0c },
281 { 0x7f, 0x16 },
282 { 0x80, 0x2a },
283 { 0x81, 0x4e },
284 { 0x82, 0x61 },
285 { 0x83, 0x6f },
286 { 0x84, 0x7b },
287 { 0x85, 0x86 },
288 { 0x86, 0x8e },
289 { 0x87, 0x97 },
290 { 0x88, 0xa4 },
291 { 0x89, 0xaf },
292 { 0x8a, 0xc5 },
293 { 0x8b, 0xd7 },
294 { 0x8c, 0xe8 },
295 { 0x8d, 0x20 },
296
297 { 0x0c, 0x90 },
298
299 { 0x2b, 0x00 },
300 { 0x22, 0x7f },
301 { 0x23, 0x03 },
302 { 0x11, 0x01 },
303 { 0x0c, 0xd0 },
304 { 0x64, 0xff },
305 { 0x0d, 0x41 },
306
307 { 0x14, 0x41 },
308 { 0x0e, 0xcd },
309 { 0xac, 0xbf },
310 { 0x8e, 0x00 },
311 { 0x0c, 0xd0 }
312};
313
314/* set framerate */
315static void ov534_set_frame_rate(struct gspca_dev *gspca_dev)
316{
317 struct sd *sd = (struct sd *) gspca_dev;
318 int fr = sd->frame_rate;
319
320 switch (fr) {
321 case 50:
322 sccb_reg_write(gspca_dev, 0x11, 0x01);
323 sccb_reg_write(gspca_dev, 0x0d, 0x41);
324 ov534_reg_write(gspca_dev, 0xe5, 0x02);
325 break;
326 case 40:
327 sccb_reg_write(gspca_dev, 0x11, 0x02);
328 sccb_reg_write(gspca_dev, 0x0d, 0xc1);
329 ov534_reg_write(gspca_dev, 0xe5, 0x04);
330 break;
331/* case 30: */
332 default:
333 fr = 30;
334 sccb_reg_write(gspca_dev, 0x11, 0x04);
335 sccb_reg_write(gspca_dev, 0x0d, 0x81);
336 ov534_reg_write(gspca_dev, 0xe5, 0x02);
337 break;
338 case 15:
339 sccb_reg_write(gspca_dev, 0x11, 0x03);
340 sccb_reg_write(gspca_dev, 0x0d, 0x41);
341 ov534_reg_write(gspca_dev, 0xe5, 0x04);
342 break;
343 }
344
345 sd->frame_rate = fr;
346 PDEBUG(D_PROBE, "frame_rate: %d", fr);
347}
348
349/* setup method */
350static void ov534_setup(struct gspca_dev *gspca_dev)
351{
352 int i;
353
354 /* Initialize bridge chip */
355 for (i = 0; i < ARRAY_SIZE(ov534_reg_initdata); i++)
356 ov534_reg_write(gspca_dev, ov534_reg_initdata[i][0],
357 ov534_reg_initdata[i][1]);
358
359 PDEBUG(D_PROBE, "sensor is ov%02x%02x",
360 sccb_reg_read(gspca_dev, 0x0a),
361 sccb_reg_read(gspca_dev, 0x0b));
362
363 ov534_set_led(gspca_dev, 1);
364
365 /* Initialize sensor */
366 for (i = 0; i < ARRAY_SIZE(ov772x_reg_initdata); i++)
367 sccb_reg_write(gspca_dev, ov772x_reg_initdata[i][0],
368 ov772x_reg_initdata[i][1]);
369
370 ov534_reg_write(gspca_dev, 0xe0, 0x09);
371 ov534_set_led(gspca_dev, 0);
372}
373
374/* this function is called at probe time */
375static int sd_config(struct gspca_dev *gspca_dev,
376 const struct usb_device_id *id)
377{
378 struct cam *cam;
379
380 cam = &gspca_dev->cam;
381
382 cam->epaddr = 0x01;
383 cam->cam_mode = vga_mode;
384 cam->nmodes = ARRAY_SIZE(vga_mode);
385
386 cam->bulk_size = 16384;
387 cam->bulk_nurbs = 2;
388
389 return 0;
390}
391
392/* this function is called at probe and resume time */
393static int sd_init(struct gspca_dev *gspca_dev)
394{
395 ov534_setup(gspca_dev);
396 ov534_set_frame_rate(gspca_dev);
397
398 return 0;
399}
400
401static int sd_start(struct gspca_dev *gspca_dev)
402{
403 /* start streaming data */
404 ov534_set_led(gspca_dev, 1);
405 ov534_reg_write(gspca_dev, 0xe0, 0x00);
406
407 return 0;
408}
409
410static void sd_stopN(struct gspca_dev *gspca_dev)
411{
412 /* stop streaming data */
413 ov534_reg_write(gspca_dev, 0xe0, 0x09);
414 ov534_set_led(gspca_dev, 0);
415}
416
417/* Values for bmHeaderInfo (Video and Still Image Payload Headers, 2.4.3.3) */
418#define UVC_STREAM_EOH (1 << 7)
419#define UVC_STREAM_ERR (1 << 6)
420#define UVC_STREAM_STI (1 << 5)
421#define UVC_STREAM_RES (1 << 4)
422#define UVC_STREAM_SCR (1 << 3)
423#define UVC_STREAM_PTS (1 << 2)
424#define UVC_STREAM_EOF (1 << 1)
425#define UVC_STREAM_FID (1 << 0)
426
427static void sd_pkt_scan(struct gspca_dev *gspca_dev, struct gspca_frame *frame,
428 __u8 *data, int len)
429{
430 struct sd *sd = (struct sd *) gspca_dev;
431 __u32 this_pts;
432 int this_fid;
433 int remaining_len = len;
434 __u8 *next_data = data;
435
436scan_next:
437 if (remaining_len <= 0)
438 return;
439
440 data = next_data;
441 len = min(remaining_len, 2048);
442 remaining_len -= len;
443 next_data += len;
444
445 /* Payloads are prefixed with a UVC-style header. We
446 consider a frame to start when the FID toggles, or the PTS
447 changes. A frame ends when EOF is set, and we've received
448 the correct number of bytes. */
449
450 /* Verify UVC header. Header length is always 12 */
451 if (data[0] != 12 || len < 12) {
452 PDEBUG(D_PACK, "bad header");
453 goto discard;
454 }
455
456 /* Check errors */
457 if (data[1] & UVC_STREAM_ERR) {
458 PDEBUG(D_PACK, "payload error");
459 goto discard;
460 }
461
462 /* Extract PTS and FID */
463 if (!(data[1] & UVC_STREAM_PTS)) {
464 PDEBUG(D_PACK, "PTS not present");
465 goto discard;
466 }
467 this_pts = (data[5] << 24) | (data[4] << 16) | (data[3] << 8) | data[2];
468 this_fid = (data[1] & UVC_STREAM_FID) ? 1 : 0;
469
470 /* If PTS or FID has changed, start a new frame. */
471 if (this_pts != sd->last_pts || this_fid != sd->last_fid) {
472 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, NULL, 0);
473 sd->last_pts = this_pts;
474 sd->last_fid = this_fid;
475 }
476
477 /* Add the data from this payload */
478 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
479 data + 12, len - 12);
480
481 /* If this packet is marked as EOF, end the frame */
482 if (data[1] & UVC_STREAM_EOF) {
483 sd->last_pts = 0;
484
485 if ((frame->data_end - frame->data) !=
486 (gspca_dev->width * gspca_dev->height * 2)) {
487 PDEBUG(D_PACK, "short frame");
488 goto discard;
489 }
490
491 gspca_frame_add(gspca_dev, LAST_PACKET, frame, NULL, 0);
492 }
493
494 /* Done this payload */
495 goto scan_next;
496
497discard:
498 /* Discard data until a new frame starts. */
499 gspca_frame_add(gspca_dev, DISCARD_PACKET, frame, NULL, 0);
500 goto scan_next;
501}
502
503/* get stream parameters (framerate) */
504static int sd_get_streamparm(struct gspca_dev *gspca_dev,
505 struct v4l2_streamparm *parm)
506{
507 struct v4l2_captureparm *cp = &parm->parm.capture;
508 struct v4l2_fract *tpf = &cp->timeperframe;
509 struct sd *sd = (struct sd *) gspca_dev;
510
511 if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
512 return -EINVAL;
513
514 cp->capability |= V4L2_CAP_TIMEPERFRAME;
515 tpf->numerator = 1;
516 tpf->denominator = sd->frame_rate;
517
518 return 0;
519}
520
521/* set stream parameters (framerate) */
522static int sd_set_streamparm(struct gspca_dev *gspca_dev,
523 struct v4l2_streamparm *parm)
524{
525 struct v4l2_captureparm *cp = &parm->parm.capture;
526 struct v4l2_fract *tpf = &cp->timeperframe;
527 struct sd *sd = (struct sd *) gspca_dev;
528
529 if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
530 return -EINVAL;
531
532 /* Set requested framerate */
533 sd->frame_rate = tpf->denominator / tpf->numerator;
534 ov534_set_frame_rate(gspca_dev);
535
536 /* Return the actual framerate */
537 tpf->numerator = 1;
538 tpf->denominator = sd->frame_rate;
539
540 return 0;
541}
542
543/* sub-driver description */
544static const struct sd_desc sd_desc = {
545 .name = MODULE_NAME,
546 .ctrls = sd_ctrls,
547 .nctrls = ARRAY_SIZE(sd_ctrls),
548 .config = sd_config,
549 .init = sd_init,
550 .start = sd_start,
551 .stopN = sd_stopN,
552 .pkt_scan = sd_pkt_scan,
553 .get_streamparm = sd_get_streamparm,
554 .set_streamparm = sd_set_streamparm,
555};
556
557/* -- module initialisation -- */
558static const __devinitdata struct usb_device_id device_table[] = {
559 {USB_DEVICE(0x06f8, 0x3002)}, /* Hercules Blog Webcam */
560 {USB_DEVICE(0x06f8, 0x3003)}, /* Hercules Dualpix HD Weblog */
561 {USB_DEVICE(0x1415, 0x2000)}, /* Sony HD Eye for PS3 (SLEH 00201) */
562 {}
563};
564
565MODULE_DEVICE_TABLE(usb, device_table);
566
567/* -- device connect -- */
568static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id)
569{
570 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
571 THIS_MODULE);
572}
573
574static struct usb_driver sd_driver = {
575 .name = MODULE_NAME,
576 .id_table = device_table,
577 .probe = sd_probe,
578 .disconnect = gspca_disconnect,
579#ifdef CONFIG_PM
580 .suspend = gspca_suspend,
581 .resume = gspca_resume,
582#endif
583};
584
585/* -- module insert / remove -- */
586static int __init sd_mod_init(void)
587{
588 if (usb_register(&sd_driver) < 0)
589 return -1;
590 PDEBUG(D_PROBE, "registered");
591 return 0;
592}
593
594static void __exit sd_mod_exit(void)
595{
596 usb_deregister(&sd_driver);
597 PDEBUG(D_PROBE, "deregistered");
598}
599
600module_init(sd_mod_init);
601module_exit(sd_mod_exit);
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 0b0c573d06da..c90ac852bac0 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Pixart PAC207BCA library 2 * Pixart PAC207BCA library
3 * 3 *
4 * Copyright (C) 2008 Hans de Goede <j.w.r.degoede@hhs.nl> 4 * Copyright (C) 2008 Hans de Goede <hdgoede@redhat.com>
5 * Copyright (C) 2005 Thomas Kaiser thomas@kaiser-linux.li 5 * Copyright (C) 2005 Thomas Kaiser thomas@kaiser-linux.li
6 * Copyleft (C) 2005 Michel Xhaard mxhaard@magic.fr 6 * Copyleft (C) 2005 Michel Xhaard mxhaard@magic.fr
7 * 7 *
@@ -27,7 +27,7 @@
27 27
28#include "gspca.h" 28#include "gspca.h"
29 29
30MODULE_AUTHOR("Hans de Goede <j.w.r.degoede@hhs.nl>"); 30MODULE_AUTHOR("Hans de Goede <hdgoede@redhat.com>");
31MODULE_DESCRIPTION("Pixart PAC207"); 31MODULE_DESCRIPTION("Pixart PAC207");
32MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
33 33
@@ -149,7 +149,7 @@ static struct ctrl sd_ctrls[] = {
149 }, 149 },
150}; 150};
151 151
152static struct v4l2_pix_format sif_mode[] = { 152static const struct v4l2_pix_format sif_mode[] = {
153 {176, 144, V4L2_PIX_FMT_PAC207, V4L2_FIELD_NONE, 153 {176, 144, V4L2_PIX_FMT_PAC207, V4L2_FIELD_NONE,
154 .bytesperline = 176, 154 .bytesperline = 176,
155 .sizeimage = (176 + 2) * 144, 155 .sizeimage = (176 + 2) * 144,
@@ -529,6 +529,7 @@ static const struct sd_desc sd_desc = {
529static const __devinitdata struct usb_device_id device_table[] = { 529static const __devinitdata struct usb_device_id device_table[] = {
530 {USB_DEVICE(0x041e, 0x4028)}, 530 {USB_DEVICE(0x041e, 0x4028)},
531 {USB_DEVICE(0x093a, 0x2460)}, 531 {USB_DEVICE(0x093a, 0x2460)},
532 {USB_DEVICE(0x093a, 0x2461)},
532 {USB_DEVICE(0x093a, 0x2463)}, 533 {USB_DEVICE(0x093a, 0x2463)},
533 {USB_DEVICE(0x093a, 0x2464)}, 534 {USB_DEVICE(0x093a, 0x2464)},
534 {USB_DEVICE(0x093a, 0x2468)}, 535 {USB_DEVICE(0x093a, 0x2468)},
@@ -536,6 +537,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
536 {USB_DEVICE(0x093a, 0x2471)}, 537 {USB_DEVICE(0x093a, 0x2471)},
537 {USB_DEVICE(0x093a, 0x2472)}, 538 {USB_DEVICE(0x093a, 0x2472)},
538 {USB_DEVICE(0x093a, 0x2476)}, 539 {USB_DEVICE(0x093a, 0x2476)},
540 {USB_DEVICE(0x145f, 0x013a)},
539 {USB_DEVICE(0x2001, 0xf115)}, 541 {USB_DEVICE(0x2001, 0xf115)},
540 {} 542 {}
541}; 543};
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index fbd45e235d97..a9c95cba710e 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -226,7 +226,7 @@ static struct ctrl sd_ctrls[] = {
226 }, 226 },
227}; 227};
228 228
229static struct v4l2_pix_format vga_mode[] = { 229static const struct v4l2_pix_format vga_mode[] = {
230 {160, 120, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE, 230 {160, 120, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE,
231 .bytesperline = 160, 231 .bytesperline = 160,
232 .sizeimage = 160 * 120 * 3 / 8 + 590, 232 .sizeimage = 160 * 120 * 3 / 8 + 590,
@@ -1064,10 +1064,13 @@ static __devinitdata struct usb_device_id device_table[] = {
1064 {USB_DEVICE(0x093a, 0x2608), .driver_info = SENSOR_PAC7311}, 1064 {USB_DEVICE(0x093a, 0x2608), .driver_info = SENSOR_PAC7311},
1065 {USB_DEVICE(0x093a, 0x260e), .driver_info = SENSOR_PAC7311}, 1065 {USB_DEVICE(0x093a, 0x260e), .driver_info = SENSOR_PAC7311},
1066 {USB_DEVICE(0x093a, 0x260f), .driver_info = SENSOR_PAC7311}, 1066 {USB_DEVICE(0x093a, 0x260f), .driver_info = SENSOR_PAC7311},
1067 {USB_DEVICE(0x093a, 0x2620), .driver_info = SENSOR_PAC7302},
1067 {USB_DEVICE(0x093a, 0x2621), .driver_info = SENSOR_PAC7302}, 1068 {USB_DEVICE(0x093a, 0x2621), .driver_info = SENSOR_PAC7302},
1069 {USB_DEVICE(0x093a, 0x2622), .driver_info = SENSOR_PAC7302},
1068 {USB_DEVICE(0x093a, 0x2624), .driver_info = SENSOR_PAC7302}, 1070 {USB_DEVICE(0x093a, 0x2624), .driver_info = SENSOR_PAC7302},
1069 {USB_DEVICE(0x093a, 0x2626), .driver_info = SENSOR_PAC7302}, 1071 {USB_DEVICE(0x093a, 0x2626), .driver_info = SENSOR_PAC7302},
1070 {USB_DEVICE(0x093a, 0x262a), .driver_info = SENSOR_PAC7302}, 1072 {USB_DEVICE(0x093a, 0x262a), .driver_info = SENSOR_PAC7302},
1073 {USB_DEVICE(0x093a, 0x262c), .driver_info = SENSOR_PAC7302},
1071 {} 1074 {}
1072}; 1075};
1073MODULE_DEVICE_TABLE(usb, device_table); 1076MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 6c69bc7778fc..b3e4e0677b68 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -132,8 +132,6 @@ struct sensor_data {
132 ignore atleast the 2 next frames for the new settings to come into effect 132 ignore atleast the 2 next frames for the new settings to come into effect
133 before doing any other adjustments */ 133 before doing any other adjustments */
134#define AUTOGAIN_IGNORE_FRAMES 3 134#define AUTOGAIN_IGNORE_FRAMES 3
135#define AUTOGAIN_DEADZONE 1000
136#define DESIRED_AVG_LUM 7000
137 135
138/* V4L2 controls supported by the driver */ 136/* V4L2 controls supported by the driver */
139static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); 137static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
@@ -229,7 +227,7 @@ static struct ctrl sd_ctrls[] = {
229 }, 227 },
230}; 228};
231 229
232static struct v4l2_pix_format vga_mode[] = { 230static const struct v4l2_pix_format vga_mode[] = {
233 {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, 231 {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
234 .bytesperline = 160, 232 .bytesperline = 160,
235 .sizeimage = 160 * 120, 233 .sizeimage = 160 * 120,
@@ -251,7 +249,7 @@ static struct v4l2_pix_format vga_mode[] = {
251 .colorspace = V4L2_COLORSPACE_SRGB, 249 .colorspace = V4L2_COLORSPACE_SRGB,
252 .priv = 0}, 250 .priv = 0},
253}; 251};
254static struct v4l2_pix_format sif_mode[] = { 252static const struct v4l2_pix_format sif_mode[] = {
255 {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, 253 {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
256 .bytesperline = 160, 254 .bytesperline = 160,
257 .sizeimage = 160 * 120, 255 .sizeimage = 160 * 120,
@@ -827,18 +825,29 @@ static void setfreq(struct gspca_dev *gspca_dev)
827 825
828static void do_autogain(struct gspca_dev *gspca_dev) 826static void do_autogain(struct gspca_dev *gspca_dev)
829{ 827{
828 int deadzone, desired_avg_lum;
830 struct sd *sd = (struct sd *) gspca_dev; 829 struct sd *sd = (struct sd *) gspca_dev;
831 int avg_lum = atomic_read(&sd->avg_lum); 830 int avg_lum = atomic_read(&sd->avg_lum);
832 831
833 if (avg_lum == -1) 832 if (avg_lum == -1)
834 return; 833 return;
835 834
835 /* SIF / VGA sensors have a different autoexposure area and thus
836 different avg_lum values for the same picture brightness */
837 if (sensor_data[sd->sensor].flags & F_SIF) {
838 deadzone = 1000;
839 desired_avg_lum = 7000;
840 } else {
841 deadzone = 3000;
842 desired_avg_lum = 23000;
843 }
844
836 if (sd->autogain_ignore_frames > 0) 845 if (sd->autogain_ignore_frames > 0)
837 sd->autogain_ignore_frames--; 846 sd->autogain_ignore_frames--;
838 else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum, 847 else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum,
839 sd->brightness * DESIRED_AVG_LUM / 127, 848 sd->brightness * desired_avg_lum / 127,
840 AUTOGAIN_DEADZONE, GAIN_KNEE, EXPOSURE_KNEE)) { 849 deadzone, GAIN_KNEE, EXPOSURE_KNEE)) {
841 PDEBUG(D_FRAM, "autogain: gain changed: gain: %d expo: %d\n", 850 PDEBUG(D_FRAM, "autogain: gain changed: gain: %d expo: %d",
842 (int)sd->gain, (int)sd->exposure); 851 (int)sd->gain, (int)sd->exposure);
843 sd->autogain_ignore_frames = AUTOGAIN_IGNORE_FRAMES; 852 sd->autogain_ignore_frames = AUTOGAIN_IGNORE_FRAMES;
844 } 853 }
@@ -1226,8 +1235,8 @@ static __devinitdata struct usb_device_id device_table[] = {
1226 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, 1235 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
1227 {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)}, 1236 {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
1228 {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)}, 1237 {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
1229 {USB_DEVICE(0x0c45, 0x602c), SB(OV7630, 102)},
1230#endif 1238#endif
1239 {USB_DEVICE(0x0c45, 0x602c), SB(OV7630, 102)},
1231 {USB_DEVICE(0x0c45, 0x602d), SB(HV7131R, 102)}, 1240 {USB_DEVICE(0x0c45, 0x602d), SB(HV7131R, 102)},
1232#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE 1241#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1233 {USB_DEVICE(0x0c45, 0x602e), SB(OV7630, 102)}, 1242 {USB_DEVICE(0x0c45, 0x602e), SB(OV7630, 102)},
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 53cb82d9e7c6..3373b8d9d2a8 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -24,6 +24,8 @@
24#include "gspca.h" 24#include "gspca.h"
25#include "jpeg.h" 25#include "jpeg.h"
26 26
27#define V4L2_CID_INFRARED (V4L2_CID_PRIVATE_BASE + 0)
28
27MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); 29MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
28MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver"); 30MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver");
29MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
@@ -35,23 +37,26 @@ struct sd {
35 atomic_t avg_lum; 37 atomic_t avg_lum;
36 unsigned int exposure; 38 unsigned int exposure;
37 39
38 unsigned short brightness; 40 __u16 brightness;
39 unsigned char contrast; 41 __u8 contrast;
40 unsigned char colors; 42 __u8 colors;
41 unsigned char autogain; 43 __u8 autogain;
44 __u8 blue;
45 __u8 red;
42 __u8 vflip; /* ov7630 only */ 46 __u8 vflip; /* ov7630 only */
47 __u8 infrared; /* mi0360 only */
43 48
44 signed char ag_cnt; 49 __s8 ag_cnt;
45#define AG_CNT_START 13 50#define AG_CNT_START 13
46 51
47 char qindex; 52 __u8 qindex;
48 unsigned char bridge; 53 __u8 bridge;
49#define BRIDGE_SN9C102P 0 54#define BRIDGE_SN9C102P 0
50#define BRIDGE_SN9C105 1 55#define BRIDGE_SN9C105 1
51#define BRIDGE_SN9C110 2 56#define BRIDGE_SN9C110 2
52#define BRIDGE_SN9C120 3 57#define BRIDGE_SN9C120 3
53#define BRIDGE_SN9C325 4 58#define BRIDGE_SN9C325 4
54 char sensor; /* Type of image sensor chip */ 59 __u8 sensor; /* Type of image sensor chip */
55#define SENSOR_HV7131R 0 60#define SENSOR_HV7131R 0
56#define SENSOR_MI0360 1 61#define SENSOR_MI0360 1
57#define SENSOR_MO4000 2 62#define SENSOR_MO4000 2
@@ -59,7 +64,7 @@ struct sd {
59#define SENSOR_OV7630 4 64#define SENSOR_OV7630 4
60#define SENSOR_OV7648 5 65#define SENSOR_OV7648 5
61#define SENSOR_OV7660 6 66#define SENSOR_OV7660 6
62 unsigned char i2c_base; 67 __u8 i2c_base;
63}; 68};
64 69
65/* V4L2 controls supported by the driver */ 70/* V4L2 controls supported by the driver */
@@ -69,10 +74,16 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
69static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); 74static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
70static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val); 75static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
71static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); 76static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
77static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val);
78static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val);
79static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val);
80static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val);
72static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val); 81static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
73static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); 82static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
74static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val); 83static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
75static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); 84static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
85static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val);
86static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val);
76 87
77static struct ctrl sd_ctrls[] = { 88static struct ctrl sd_ctrls[] = {
78 { 89 {
@@ -84,7 +95,7 @@ static struct ctrl sd_ctrls[] = {
84#define BRIGHTNESS_MAX 0xffff 95#define BRIGHTNESS_MAX 0xffff
85 .maximum = BRIGHTNESS_MAX, 96 .maximum = BRIGHTNESS_MAX,
86 .step = 1, 97 .step = 1,
87#define BRIGHTNESS_DEF 0x7fff 98#define BRIGHTNESS_DEF 0x8000
88 .default_value = BRIGHTNESS_DEF, 99 .default_value = BRIGHTNESS_DEF,
89 }, 100 },
90 .set = sd_setbrightness, 101 .set = sd_setbrightness,
@@ -111,7 +122,7 @@ static struct ctrl sd_ctrls[] = {
111 .type = V4L2_CTRL_TYPE_INTEGER, 122 .type = V4L2_CTRL_TYPE_INTEGER,
112 .name = "Color", 123 .name = "Color",
113 .minimum = 0, 124 .minimum = 0,
114 .maximum = 64, 125 .maximum = 40,
115 .step = 1, 126 .step = 1,
116#define COLOR_DEF 32 127#define COLOR_DEF 32
117 .default_value = COLOR_DEF, 128 .default_value = COLOR_DEF,
@@ -119,7 +130,35 @@ static struct ctrl sd_ctrls[] = {
119 .set = sd_setcolors, 130 .set = sd_setcolors,
120 .get = sd_getcolors, 131 .get = sd_getcolors,
121 }, 132 },
122#define AUTOGAIN_IDX 3 133 {
134 {
135 .id = V4L2_CID_BLUE_BALANCE,
136 .type = V4L2_CTRL_TYPE_INTEGER,
137 .name = "Blue Balance",
138 .minimum = 24,
139 .maximum = 40,
140 .step = 1,
141#define BLUE_BALANCE_DEF 32
142 .default_value = BLUE_BALANCE_DEF,
143 },
144 .set = sd_setblue_balance,
145 .get = sd_getblue_balance,
146 },
147 {
148 {
149 .id = V4L2_CID_RED_BALANCE,
150 .type = V4L2_CTRL_TYPE_INTEGER,
151 .name = "Red Balance",
152 .minimum = 24,
153 .maximum = 40,
154 .step = 1,
155#define RED_BALANCE_DEF 32
156 .default_value = RED_BALANCE_DEF,
157 },
158 .set = sd_setred_balance,
159 .get = sd_getred_balance,
160 },
161#define AUTOGAIN_IDX 5
123 { 162 {
124 { 163 {
125 .id = V4L2_CID_AUTOGAIN, 164 .id = V4L2_CID_AUTOGAIN,
@@ -135,7 +174,7 @@ static struct ctrl sd_ctrls[] = {
135 .get = sd_getautogain, 174 .get = sd_getautogain,
136 }, 175 },
137/* ov7630 only */ 176/* ov7630 only */
138#define VFLIP_IDX 4 177#define VFLIP_IDX 6
139 { 178 {
140 { 179 {
141 .id = V4L2_CID_VFLIP, 180 .id = V4L2_CID_VFLIP,
@@ -150,9 +189,43 @@ static struct ctrl sd_ctrls[] = {
150 .set = sd_setvflip, 189 .set = sd_setvflip,
151 .get = sd_getvflip, 190 .get = sd_getvflip,
152 }, 191 },
192/* mi0360 only */
193#define INFRARED_IDX 7
194 {
195 {
196 .id = V4L2_CID_INFRARED,
197 .type = V4L2_CTRL_TYPE_BOOLEAN,
198 .name = "Infrared",
199 .minimum = 0,
200 .maximum = 1,
201 .step = 1,
202#define INFRARED_DEF 0
203 .default_value = INFRARED_DEF,
204 },
205 .set = sd_setinfrared,
206 .get = sd_getinfrared,
207 },
208};
209
210/* table of the disabled controls */
211static __u32 ctrl_dis[] = {
212 (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
213 /* SENSOR_HV7131R 0 */
214 (1 << VFLIP_IDX),
215 /* SENSOR_MI0360 1 */
216 (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
217 /* SENSOR_MO4000 2 */
218 (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
219 /* SENSOR_OM6802 3 */
220 (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX),
221 /* SENSOR_OV7630 4 */
222 (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
223 /* SENSOR_OV7648 5 */
224 (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
225 /* SENSOR_OV7660 6 */
153}; 226};
154 227
155static struct v4l2_pix_format vga_mode[] = { 228static const struct v4l2_pix_format vga_mode[] = {
156 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 229 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
157 .bytesperline = 160, 230 .bytesperline = 160,
158 .sizeimage = 160 * 120 * 4 / 8 + 590, 231 .sizeimage = 160 * 120 * 4 / 8 + 590,
@@ -231,13 +304,13 @@ static const __u8 sn_ov7630[] = {
231 304
232static const __u8 sn_ov7648[] = { 305static const __u8 sn_ov7648[] = {
233/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 306/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
234 0x00, 0x21, 0x62, 0x00, 0x1a, 0x20, 0x20, 0x20, 307 0x00, 0x63, 0x40, 0x00, 0x1a, 0x20, 0x20, 0x20,
235/* reg8 reg9 rega regb regc regd rege regf */ 308/* reg8 reg9 rega regb regc regd rege regf */
236 0xa1, 0x6e, 0x18, 0x65, 0x00, 0x00, 0x00, 0x10, 309 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
237/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 310/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
238 0x03, 0x00, 0x00, 0x06, 0x06, 0x28, 0x1e, 0x82, 311 0x03, 0x00, 0x00, 0x01, 0x00, 0x28, 0x1e, 0x00,
239/* reg18 reg19 reg1a reg1b reg1c reg1d reg1e reg1f */ 312/* reg18 reg19 reg1a reg1b reg1c reg1d reg1e reg1f */
240 0x07, 0x00, 0x00, 0x00, 0x00, 0x00 313 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00
241}; 314};
242 315
243static const __u8 sn_ov7660[] = { 316static const __u8 sn_ov7660[] = {
@@ -469,6 +542,53 @@ static const __u8 ov7630_sensor_init[][8] = {
469/* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */ 542/* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */
470 {} 543 {}
471}; 544};
545
546static const __u8 ov7648_sensor_init[][8] = {
547 {0xa1, 0x21, 0x76, 0x00, 0x00, 0x00, 0x00, 0x10},
548 {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */
549 {0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10},
550 {0xd1, 0x21, 0x03, 0xa4, 0x30, 0x88, 0x00, 0x10},
551 {0xb1, 0x21, 0x11, 0x80, 0x08, 0x00, 0x00, 0x10},
552 {0xc1, 0x21, 0x13, 0xa0, 0x04, 0x84, 0x00, 0x10},
553 {0xd1, 0x21, 0x17, 0x1a, 0x02, 0xba, 0xf4, 0x10},
554 {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10},
555 {0xd1, 0x21, 0x1f, 0x41, 0xc0, 0x80, 0x80, 0x10},
556 {0xd1, 0x21, 0x23, 0xde, 0xa0, 0x80, 0x32, 0x10},
557 {0xd1, 0x21, 0x27, 0xfe, 0xa0, 0x00, 0x91, 0x10},
558 {0xd1, 0x21, 0x2b, 0x00, 0x88, 0x85, 0x80, 0x10},
559 {0xc1, 0x21, 0x2f, 0x9c, 0x00, 0xc4, 0x00, 0x10},
560 {0xd1, 0x21, 0x60, 0xa6, 0x60, 0x88, 0x12, 0x10},
561 {0xd1, 0x21, 0x64, 0x88, 0x00, 0x00, 0x94, 0x10},
562 {0xd1, 0x21, 0x68, 0x7a, 0x0c, 0x00, 0x00, 0x10},
563 {0xd1, 0x21, 0x6c, 0x11, 0x33, 0x22, 0x00, 0x10},
564 {0xd1, 0x21, 0x70, 0x11, 0x00, 0x10, 0x50, 0x10},
565 {0xd1, 0x21, 0x74, 0x20, 0x06, 0x00, 0xb5, 0x10},
566 {0xd1, 0x21, 0x78, 0x8a, 0x00, 0x00, 0x00, 0x10},
567 {0xb1, 0x21, 0x7c, 0x00, 0x43, 0x00, 0x00, 0x10},
568
569 {0xd1, 0x21, 0x21, 0x86, 0x00, 0xde, 0xa0, 0x10},
570/* {0xd1, 0x21, 0x25, 0x80, 0x32, 0xfe, 0xa0, 0x10}, jfm done */
571/* {0xd1, 0x21, 0x29, 0x00, 0x91, 0x00, 0x88, 0x10}, jfm done */
572 {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10},
573/*...*/
574/* {0xa1, 0x21, 0x12, 0x08, 0x00, 0x00, 0x00, 0x10}, jfm done */
575/* {0xa1, 0x21, 0x75, 0x06, 0x00, 0x00, 0x00, 0x10}, jfm done */
576 {0xa1, 0x21, 0x19, 0x02, 0x00, 0x00, 0x00, 0x10},
577 {0xa1, 0x21, 0x10, 0x32, 0x00, 0x00, 0x00, 0x10},
578/* {0xa1, 0x21, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */
579/* {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, * GAIN - def */
580/* {0xb1, 0x21, 0x01, 0x6c, 0x6c, 0x00, 0x00, 0x10}, * B R - def: 80 */
581/*...*/
582 {0xa1, 0x21, 0x11, 0x81, 0x00, 0x00, 0x00, 0x10}, /* CLKRC */
583/* {0xa1, 0x21, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */
584/* {0xa1, 0x21, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */
585/* {0xa1, 0x21, 0x2a, 0x91, 0x00, 0x00, 0x00, 0x10}, jfm done */
586/* {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */
587/* {0xb1, 0x21, 0x01, 0x64, 0x84, 0x00, 0x00, 0x10}, * B R - def: 80 */
588
589 {}
590};
591
472static const __u8 ov7660_sensor_init[][8] = { 592static const __u8 ov7660_sensor_init[][8] = {
473 {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset SCCB */ 593 {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset SCCB */
474/* (delay 20ms) */ 594/* (delay 20ms) */
@@ -557,64 +677,6 @@ static const __u8 ov7660_sensor_init[][8] = {
557 {0xa1, 0x21, 0x2b, 0xc3, 0x00, 0x00, 0x00, 0x10}, 677 {0xa1, 0x21, 0x2b, 0xc3, 0x00, 0x00, 0x00, 0x10},
558 {} 678 {}
559}; 679};
560/* reg 0x04 reg 0x07 reg 0x10 */
561/* expo = (COM1 & 0x02) | ((AECHH & 0x2f) << 10) | (AECh << 2) */
562
563static const __u8 ov7648_sensor_init[][8] = {
564 {0xC1, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00},
565 {0xC1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00},
566 {0xC1, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00},
567 {0xA1, 0x6E, 0x3F, 0x20, 0x00, 0x00, 0x00, 0x10},
568 {0xA1, 0x6E, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x10},
569 {0xA1, 0x6E, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x10},
570 {0xD1, 0x6E, 0x04, 0x02, 0xB1, 0x02, 0x39, 0x10},
571 {0xD1, 0x6E, 0x08, 0x00, 0x01, 0x00, 0x00, 0x10},
572 {0xD1, 0x6E, 0x0C, 0x02, 0x7F, 0x01, 0xE0, 0x10},
573 {0xD1, 0x6E, 0x12, 0x03, 0x02, 0x00, 0x03, 0x10},
574 {0xD1, 0x6E, 0x16, 0x85, 0x40, 0x4A, 0x40, 0x10},
575 {0xC1, 0x6E, 0x1A, 0x00, 0x80, 0x00, 0x00, 0x10},
576 {0xD1, 0x6E, 0x1D, 0x08, 0x03, 0x00, 0x00, 0x10},
577 {0xD1, 0x6E, 0x23, 0x00, 0xB0, 0x00, 0x94, 0x10},
578 {0xD1, 0x6E, 0x27, 0x58, 0x00, 0x00, 0x00, 0x10},
579 {0xD1, 0x6E, 0x2D, 0x14, 0x35, 0x61, 0x84, 0x10},
580 {0xD1, 0x6E, 0x31, 0xA2, 0xBD, 0xD8, 0xFF, 0x10},
581 {0xD1, 0x6E, 0x35, 0x06, 0x1E, 0x12, 0x02, 0x10},
582 {0xD1, 0x6E, 0x39, 0xAA, 0x53, 0x37, 0xD5, 0x10},
583 {0xA1, 0x6E, 0x3D, 0xF2, 0x00, 0x00, 0x00, 0x10},
584 {0xD1, 0x6E, 0x3E, 0x00, 0x00, 0x80, 0x03, 0x10},
585 {0xD1, 0x6E, 0x42, 0x03, 0x00, 0x00, 0x00, 0x10},
586 {0xC1, 0x6E, 0x46, 0x00, 0x80, 0x80, 0x00, 0x10},
587 {0xD1, 0x6E, 0x4B, 0x02, 0xEF, 0x08, 0xCD, 0x10},
588 {0xD1, 0x6E, 0x4F, 0x00, 0xD0, 0x00, 0xA0, 0x10},
589 {0xD1, 0x6E, 0x53, 0x01, 0xAA, 0x01, 0x40, 0x10},
590 {0xD1, 0x6E, 0x5A, 0x50, 0x04, 0x30, 0x03, 0x10},
591 {0xA1, 0x6E, 0x5E, 0x00, 0x00, 0x00, 0x00, 0x10},
592 {0xD1, 0x6E, 0x5F, 0x10, 0x40, 0xFF, 0x00, 0x10},
593 /* {0xD1, 0x6E, 0x63, 0x40, 0x40, 0x00, 0x00, 0x10},
594 {0xD1, 0x6E, 0x67, 0x00, 0x00, 0x00, 0x00, 0x10},
595 * This is currently setting a
596 * blue tint, and some things more , i leave it here for future test if
597 * somene is having problems with color on this sensor
598 {0xD1, 0x6E, 0x6B, 0x00, 0x00, 0x00, 0x00, 0x10},
599 {0xD1, 0x6E, 0x6F, 0x00, 0x00, 0x00, 0x00, 0x10},
600 {0xC1, 0x6E, 0x73, 0x10, 0x80, 0xEB, 0x00, 0x10},
601 {0xA1, 0x6E, 0x1E, 0x03, 0x00, 0x00, 0x00, 0x10},
602 {0xA1, 0x6E, 0x15, 0x01, 0x00, 0x00, 0x00, 0x10},
603 {0xC1, 0x6E, 0x16, 0x40, 0x40, 0x40, 0x00, 0x10},
604 {0xA1, 0x6E, 0x1D, 0x08, 0x00, 0x00, 0x00, 0x10},
605 {0xA1, 0x6E, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10},
606 {0xA1, 0x6E, 0x07, 0xB5, 0x00, 0x00, 0x00, 0x10},
607 {0xA1, 0x6E, 0x18, 0x6B, 0x00, 0x00, 0x00, 0x10},
608 {0xA1, 0x6E, 0x1D, 0x08, 0x00, 0x00, 0x00, 0x10},
609 {0xA1, 0x6E, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10},
610 {0xA1, 0x6E, 0x07, 0xB8, 0x00, 0x00, 0x00, 0x10}, */
611 {0xC1, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00},
612 {0xA1, 0x6E, 0x06, 0x03, 0x00, 0x00, 0x00, 0x10}, /* Bright... */
613 {0xA1, 0x6E, 0x07, 0x66, 0x00, 0x00, 0x00, 0x10}, /* B.. */
614 {0xC1, 0x6E, 0x1A, 0x03, 0x65, 0x90, 0x00, 0x10}, /* Bright/Witen....*/
615/* {0xC1, 0x6E, 0x16, 0x45, 0x40, 0x60, 0x00, 0x10}, * Bright/Witene */
616 {}
617};
618 680
619static const __u8 qtable4[] = { 681static const __u8 qtable4[] = {
620 0x06, 0x04, 0x04, 0x06, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x08, 0x06, 682 0x06, 0x04, 0x04, 0x06, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x08, 0x06,
@@ -757,8 +819,6 @@ static void i2c_r5(struct gspca_dev *gspca_dev, __u8 reg)
757 819
758static int probesensor(struct gspca_dev *gspca_dev) 820static int probesensor(struct gspca_dev *gspca_dev)
759{ 821{
760 struct sd *sd = (struct sd *) gspca_dev;
761
762 i2c_w1(gspca_dev, 0x02, 0); /* sensor wakeup */ 822 i2c_w1(gspca_dev, 0x02, 0); /* sensor wakeup */
763 msleep(10); 823 msleep(10);
764 reg_w1(gspca_dev, 0x02, 0x66); /* Gpio on */ 824 reg_w1(gspca_dev, 0x02, 0x66); /* Gpio on */
@@ -770,8 +830,7 @@ static int probesensor(struct gspca_dev *gspca_dev)
770 && gspca_dev->usb_buf[3] == 0x00 830 && gspca_dev->usb_buf[3] == 0x00
771 && gspca_dev->usb_buf[4] == 0x00) { 831 && gspca_dev->usb_buf[4] == 0x00) {
772 PDEBUG(D_PROBE, "Find Sensor sn9c102P HV7131R"); 832 PDEBUG(D_PROBE, "Find Sensor sn9c102P HV7131R");
773 sd->sensor = SENSOR_HV7131R; 833 return 0;
774 return SENSOR_HV7131R;
775 } 834 }
776 PDEBUG(D_PROBE, "Find Sensor 0x%02x 0x%02x 0x%02x", 835 PDEBUG(D_PROBE, "Find Sensor 0x%02x 0x%02x 0x%02x",
777 gspca_dev->usb_buf[0], gspca_dev->usb_buf[1], 836 gspca_dev->usb_buf[0], gspca_dev->usb_buf[1],
@@ -827,17 +886,20 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
827 reg_w1(gspca_dev, 0x01, 0x40); 886 reg_w1(gspca_dev, 0x01, 0x40);
828 break; 887 break;
829 case SENSOR_OV7648: 888 case SENSOR_OV7648:
830 reg_w1(gspca_dev, 0x01, 0x43); 889 reg_w1(gspca_dev, 0x01, 0x63);
831 reg_w1(gspca_dev, 0x17, 0xae); 890 reg_w1(gspca_dev, 0x17, 0x20);
832 reg_w1(gspca_dev, 0x01, 0x42); 891 reg_w1(gspca_dev, 0x01, 0x42);
833 break; 892 break;
834/*jfm: from win trace */ 893/*jfm: from win trace */
835 case SENSOR_OV7660: 894 case SENSOR_OV7660:
836 reg_w1(gspca_dev, 0x01, 0x61); 895 if (sd->bridge == BRIDGE_SN9C120) {
837 reg_w1(gspca_dev, 0x17, 0x20); 896 reg_w1(gspca_dev, 0x01, 0x61);
838 reg_w1(gspca_dev, 0x01, 0x60); 897 reg_w1(gspca_dev, 0x17, 0x20);
839 reg_w1(gspca_dev, 0x01, 0x40); 898 reg_w1(gspca_dev, 0x01, 0x60);
840 break; 899 reg_w1(gspca_dev, 0x01, 0x40);
900 break;
901 }
902 /* fall thru */
841 default: 903 default:
842 reg_w1(gspca_dev, 0x01, 0x43); 904 reg_w1(gspca_dev, 0x01, 0x43);
843 reg_w1(gspca_dev, 0x17, 0x61); 905 reg_w1(gspca_dev, 0x17, 0x61);
@@ -922,6 +984,13 @@ static void ov7648_InitSensor(struct gspca_dev *gspca_dev)
922{ 984{
923 int i = 0; 985 int i = 0;
924 986
987 i2c_w8(gspca_dev, ov7648_sensor_init[i]);
988 i++;
989/* win: dble reset */
990 i2c_w8(gspca_dev, ov7648_sensor_init[i]); /* reset */
991 i++;
992 msleep(20);
993/* win: i2c reg read 00..7f */
925 while (ov7648_sensor_init[i][0]) { 994 while (ov7648_sensor_init[i][0]) {
926 i2c_w8(gspca_dev, ov7648_sensor_init[i]); 995 i2c_w8(gspca_dev, ov7648_sensor_init[i]);
927 i++; 996 i++;
@@ -961,19 +1030,14 @@ static int sd_config(struct gspca_dev *gspca_dev,
961 sd->brightness = BRIGHTNESS_DEF; 1030 sd->brightness = BRIGHTNESS_DEF;
962 sd->contrast = CONTRAST_DEF; 1031 sd->contrast = CONTRAST_DEF;
963 sd->colors = COLOR_DEF; 1032 sd->colors = COLOR_DEF;
1033 sd->blue = BLUE_BALANCE_DEF;
1034 sd->red = RED_BALANCE_DEF;
964 sd->autogain = AUTOGAIN_DEF; 1035 sd->autogain = AUTOGAIN_DEF;
965 sd->ag_cnt = -1; 1036 sd->ag_cnt = -1;
1037 sd->vflip = VFLIP_DEF;
1038 sd->infrared = INFRARED_DEF;
966 1039
967 switch (sd->sensor) { 1040 gspca_dev->ctrl_dis = ctrl_dis[sd->sensor];
968 case SENSOR_OV7630:
969 case SENSOR_OV7648:
970 case SENSOR_OV7660:
971 gspca_dev->ctrl_dis = (1 << AUTOGAIN_IDX);
972 break;
973 }
974 if (sd->sensor != SENSOR_OV7630)
975 gspca_dev->ctrl_dis |= (1 << VFLIP_IDX);
976
977 return 0; 1041 return 0;
978} 1042}
979 1043
@@ -981,7 +1045,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
981static int sd_init(struct gspca_dev *gspca_dev) 1045static int sd_init(struct gspca_dev *gspca_dev)
982{ 1046{
983 struct sd *sd = (struct sd *) gspca_dev; 1047 struct sd *sd = (struct sd *) gspca_dev;
984/* const __u8 *sn9c1xx; */
985 __u8 regGpio[] = { 0x29, 0x74 }; 1048 __u8 regGpio[] = { 0x29, 0x74 };
986 __u8 regF1; 1049 __u8 regF1;
987 1050
@@ -1100,32 +1163,13 @@ static unsigned int setexposure(struct gspca_dev *gspca_dev,
1100 return expo; 1163 return expo;
1101} 1164}
1102 1165
1103/* this function is used for sensors o76xx only */
1104static void setbrightcont(struct gspca_dev *gspca_dev)
1105{
1106 struct sd *sd = (struct sd *) gspca_dev;
1107 int val;
1108 __u8 reg84_full[0x15];
1109
1110 memcpy(reg84_full, reg84, sizeof reg84_full);
1111 val = sd->contrast * 0x30 / CONTRAST_MAX + 0x10; /* 10..40 */
1112 reg84_full[0] = (val + 1) / 2; /* red */
1113 reg84_full[2] = val; /* green */
1114 reg84_full[4] = (val + 1) / 5; /* blue */
1115 val = (sd->brightness - BRIGHTNESS_DEF) * 0x10
1116 / BRIGHTNESS_MAX;
1117 reg84_full[0x12] = val & 0x1f; /* 5:0 signed value */
1118 reg_w(gspca_dev, 0x84, reg84_full, sizeof reg84_full);
1119}
1120
1121/* sensor != ov76xx */
1122static void setbrightness(struct gspca_dev *gspca_dev) 1166static void setbrightness(struct gspca_dev *gspca_dev)
1123{ 1167{
1124 struct sd *sd = (struct sd *) gspca_dev; 1168 struct sd *sd = (struct sd *) gspca_dev;
1125 unsigned int expo; 1169 unsigned int expo;
1126 __u8 k2; 1170 __u8 k2;
1127 1171
1128 k2 = sd->brightness >> 10; 1172 k2 = ((int) sd->brightness - 0x8000) >> 10;
1129 switch (sd->sensor) { 1173 switch (sd->sensor) {
1130 case SENSOR_HV7131R: 1174 case SENSOR_HV7131R:
1131 expo = sd->brightness << 4; 1175 expo = sd->brightness << 4;
@@ -1147,38 +1191,49 @@ static void setbrightness(struct gspca_dev *gspca_dev)
1147 break; 1191 break;
1148 } 1192 }
1149 1193
1150 reg_w1(gspca_dev, 0x96, k2); 1194 reg_w1(gspca_dev, 0x96, k2); /* color matrix Y offset */
1151} 1195}
1152 1196
1153/* sensor != ov76xx */
1154static void setcontrast(struct gspca_dev *gspca_dev) 1197static void setcontrast(struct gspca_dev *gspca_dev)
1155{ 1198{
1156 struct sd *sd = (struct sd *) gspca_dev; 1199 struct sd *sd = (struct sd *) gspca_dev;
1157 __u8 k2; 1200 __u8 k2;
1158 __u8 contrast[] = { 0x00, 0x00, 0x28, 0x00, 0x07, 0x00 }; 1201 __u8 contrast[6];
1159 1202
1160 k2 = sd->contrast; 1203 k2 = sd->contrast * 0x30 / (CONTRAST_MAX + 1) + 0x10; /* 10..40 */
1161 contrast[2] = k2; 1204 contrast[0] = (k2 + 1) / 2; /* red */
1162 contrast[0] = (k2 + 1) >> 1; 1205 contrast[1] = 0;
1163 contrast[4] = (k2 + 1) / 5; 1206 contrast[2] = k2; /* green */
1164 reg_w(gspca_dev, 0x84, contrast, 6); 1207 contrast[3] = 0;
1208 contrast[4] = (k2 + 1) / 5; /* blue */
1209 contrast[5] = 0;
1210 reg_w(gspca_dev, 0x84, contrast, sizeof contrast);
1165} 1211}
1166 1212
1167static void setcolors(struct gspca_dev *gspca_dev) 1213static void setcolors(struct gspca_dev *gspca_dev)
1168{ 1214{
1169 struct sd *sd = (struct sd *) gspca_dev; 1215 struct sd *sd = (struct sd *) gspca_dev;
1170 __u8 blue, red; 1216 int i, v;
1171 1217 __u8 reg8a[12]; /* U & V gains */
1172 if (sd->colors >= 32) { 1218 static __s16 uv[6] = { /* same as reg84 in signed decimal */
1173 red = 32 + (sd->colors - 32) / 2; 1219 -24, -38, 64, /* UR UG UB */
1174 blue = 64 - sd->colors; 1220 62, -51, -9 /* VR VG VB */
1175 } else { 1221 };
1176 red = sd->colors; 1222 for (i = 0; i < 6; i++) {
1177 blue = 32 + (32 - sd->colors) / 2; 1223 v = uv[i] * sd->colors / COLOR_DEF;
1224 reg8a[i * 2] = v;
1225 reg8a[i * 2 + 1] = (v >> 8) & 0x0f;
1178 } 1226 }
1179 reg_w1(gspca_dev, 0x05, red); 1227 reg_w(gspca_dev, 0x8a, reg8a, sizeof reg8a);
1228}
1229
1230static void setredblue(struct gspca_dev *gspca_dev)
1231{
1232 struct sd *sd = (struct sd *) gspca_dev;
1233
1234 reg_w1(gspca_dev, 0x05, sd->red);
1180/* reg_w1(gspca_dev, 0x07, 32); */ 1235/* reg_w1(gspca_dev, 0x07, 32); */
1181 reg_w1(gspca_dev, 0x06, blue); 1236 reg_w1(gspca_dev, 0x06, sd->blue);
1182} 1237}
1183 1238
1184static void setautogain(struct gspca_dev *gspca_dev) 1239static void setautogain(struct gspca_dev *gspca_dev)
@@ -1195,12 +1250,18 @@ static void setautogain(struct gspca_dev *gspca_dev)
1195 1250
1196static void setvflip(struct sd *sd) 1251static void setvflip(struct sd *sd)
1197{ 1252{
1198 if (sd->sensor != SENSOR_OV7630)
1199 return;
1200 i2c_w1(&sd->gspca_dev, 0x75, /* COMN */ 1253 i2c_w1(&sd->gspca_dev, 0x75, /* COMN */
1201 sd->vflip ? 0x82 : 0x02); 1254 sd->vflip ? 0x82 : 0x02);
1202} 1255}
1203 1256
1257static void setinfrared(struct sd *sd)
1258{
1259/*fixme: different sequence for StarCam Clip and StarCam 370i */
1260/* Clip */
1261 i2c_w1(&sd->gspca_dev, 0x02, /* gpio */
1262 sd->infrared ? 0x66 : 0x64);
1263}
1264
1204/* -- start the camera -- */ 1265/* -- start the camera -- */
1205static int sd_start(struct gspca_dev *gspca_dev) 1266static int sd_start(struct gspca_dev *gspca_dev)
1206{ 1267{
@@ -1235,28 +1296,39 @@ static int sd_start(struct gspca_dev *gspca_dev)
1235 reg17 = 0xe2; 1296 reg17 = 0xe2;
1236 break; 1297 break;
1237 case SENSOR_OV7648: 1298 case SENSOR_OV7648:
1238 reg17 = 0xae; 1299 reg17 = 0x20;
1239 break; 1300 break;
1240/*jfm: from win trace */ 1301/*jfm: from win trace */
1241 case SENSOR_OV7660: 1302 case SENSOR_OV7660:
1242 reg17 = 0xa0; 1303 if (sd->bridge == BRIDGE_SN9C120) {
1243 break; 1304 reg17 = 0xa0;
1305 break;
1306 }
1307 /* fall thru */
1244 default: 1308 default:
1245 reg17 = 0x60; 1309 reg17 = 0x60;
1246 break; 1310 break;
1247 } 1311 }
1248 reg_w1(gspca_dev, 0x17, reg17); 1312 reg_w1(gspca_dev, 0x17, reg17);
1249 reg_w1(gspca_dev, 0x05, sn9c1xx[5]); 1313/* set reg1 was here */
1250 reg_w1(gspca_dev, 0x07, sn9c1xx[7]); 1314 reg_w1(gspca_dev, 0x05, sn9c1xx[5]); /* red */
1251 reg_w1(gspca_dev, 0x06, sn9c1xx[6]); 1315 reg_w1(gspca_dev, 0x07, sn9c1xx[7]); /* green */
1316 reg_w1(gspca_dev, 0x06, sn9c1xx[6]); /* blue */
1252 reg_w1(gspca_dev, 0x14, sn9c1xx[0x14]); 1317 reg_w1(gspca_dev, 0x14, sn9c1xx[0x14]);
1253 reg_w(gspca_dev, 0x20, gamma_def, sizeof gamma_def); 1318 reg_w(gspca_dev, 0x20, gamma_def, sizeof gamma_def);
1254 for (i = 0; i < 8; i++) 1319 for (i = 0; i < 8; i++)
1255 reg_w(gspca_dev, 0x84, reg84, sizeof reg84); 1320 reg_w(gspca_dev, 0x84, reg84, sizeof reg84);
1256 switch (sd->sensor) { 1321 switch (sd->sensor) {
1257 case SENSOR_OV7660: 1322 case SENSOR_OV7648:
1258 reg_w1(gspca_dev, 0x9a, 0x05); 1323 reg_w1(gspca_dev, 0x9a, 0x0a);
1324 reg_w1(gspca_dev, 0x99, 0x60);
1259 break; 1325 break;
1326 case SENSOR_OV7660:
1327 if (sd->bridge == BRIDGE_SN9C120) {
1328 reg_w1(gspca_dev, 0x9a, 0x05);
1329 break;
1330 }
1331 /* fall thru */
1260 default: 1332 default:
1261 reg_w1(gspca_dev, 0x9a, 0x08); 1333 reg_w1(gspca_dev, 0x9a, 0x08);
1262 reg_w1(gspca_dev, 0x99, 0x59); 1334 reg_w1(gspca_dev, 0x99, 0x59);
@@ -1265,10 +1337,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
1265 1337
1266 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; 1338 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
1267 if (mode) 1339 if (mode)
1268 reg1 = 0x46; /* 320 clk 48Mhz */ 1340 reg1 = 0x46; /* 320x240: clk 48Mhz, video trf enable */
1269 else 1341 else
1270 reg1 = 0x06; /* 640 clk 24Mz */ 1342 reg1 = 0x06; /* 640x480: clk 24Mhz, video trf enable */
1271 reg17 = 0x61; 1343 reg17 = 0x61; /* 0x:20: enable sensor clock */
1272 switch (sd->sensor) { 1344 switch (sd->sensor) {
1273 case SENSOR_HV7131R: 1345 case SENSOR_HV7131R:
1274 hv7131R_InitSensor(gspca_dev); 1346 hv7131R_InitSensor(gspca_dev);
@@ -1298,23 +1370,21 @@ static int sd_start(struct gspca_dev *gspca_dev)
1298 break; 1370 break;
1299 case SENSOR_OV7648: 1371 case SENSOR_OV7648:
1300 ov7648_InitSensor(gspca_dev); 1372 ov7648_InitSensor(gspca_dev);
1301 reg17 = 0xa2; 1373 reg17 = 0x21;
1302 reg1 = 0x44; 1374/* reg1 = 0x42; * 42 - 46? */
1303/* if (mode)
1304 ; * 320x2...
1305 else
1306 ; * 640x... */
1307 break; 1375 break;
1308 default: 1376 default:
1309/* case SENSOR_OV7660: */ 1377/* case SENSOR_OV7660: */
1310 ov7660_InitSensor(gspca_dev); 1378 ov7660_InitSensor(gspca_dev);
1311 if (mode) { 1379 if (sd->bridge == BRIDGE_SN9C120) {
1312/* reg17 = 0x21; * 320 */ 1380 if (mode) { /* 320x240 - 160x120 */
1313/* reg1 = 0x44; */ 1381 reg17 = 0xa2;
1314/* reg1 = 0x46; (done) */ 1382 reg1 = 0x44; /* 48 Mhz, video trf eneble */
1383 }
1315 } else { 1384 } else {
1316 reg17 = 0xa2; /* 640 */ 1385 reg17 = 0x22;
1317 reg1 = 0x44; 1386 reg1 = 0x06; /* 24 Mhz, video trf eneble
1387 * inverse power down */
1318 } 1388 }
1319 break; 1389 break;
1320 } 1390 }
@@ -1342,23 +1412,18 @@ static int sd_start(struct gspca_dev *gspca_dev)
1342 reg_w1(gspca_dev, 0x18, reg18); 1412 reg_w1(gspca_dev, 0x18, reg18);
1343 1413
1344 reg_w1(gspca_dev, 0x17, reg17); 1414 reg_w1(gspca_dev, 0x17, reg17);
1415 reg_w1(gspca_dev, 0x01, reg1);
1345 switch (sd->sensor) { 1416 switch (sd->sensor) {
1346 case SENSOR_HV7131R:
1347 case SENSOR_MI0360: 1417 case SENSOR_MI0360:
1348 case SENSOR_MO4000: 1418 setinfrared(sd);
1349 case SENSOR_OM6802:
1350 setbrightness(gspca_dev);
1351 setcontrast(gspca_dev);
1352 break; 1419 break;
1353 case SENSOR_OV7630: 1420 case SENSOR_OV7630:
1354 setvflip(sd); 1421 setvflip(sd);
1355 /* fall thru */
1356 default: /* OV76xx */
1357 setbrightcont(gspca_dev);
1358 break; 1422 break;
1359 } 1423 }
1424 setbrightness(gspca_dev);
1425 setcontrast(gspca_dev);
1360 setautogain(gspca_dev); 1426 setautogain(gspca_dev);
1361 reg_w1(gspca_dev, 0x01, reg1);
1362 return 0; 1427 return 0;
1363} 1428}
1364 1429
@@ -1369,6 +1434,8 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1369 { 0xa1, 0x11, 0x02, 0x09, 0x00, 0x00, 0x00, 0x10 }; 1434 { 0xa1, 0x11, 0x02, 0x09, 0x00, 0x00, 0x00, 0x10 };
1370 static const __u8 stopmi0360[] = 1435 static const __u8 stopmi0360[] =
1371 { 0xb1, 0x5d, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10 }; 1436 { 0xb1, 0x5d, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10 };
1437 static const __u8 stopov7648[] =
1438 { 0xa1, 0x21, 0x76, 0x20, 0x00, 0x00, 0x00, 0x10 };
1372 __u8 data; 1439 __u8 data;
1373 const __u8 *sn9c1xx; 1440 const __u8 *sn9c1xx;
1374 1441
@@ -1382,8 +1449,10 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1382 i2c_w8(gspca_dev, stopmi0360); 1449 i2c_w8(gspca_dev, stopmi0360);
1383 data = 0x29; 1450 data = 0x29;
1384 break; 1451 break;
1385 case SENSOR_OV7630:
1386 case SENSOR_OV7648: 1452 case SENSOR_OV7648:
1453 i2c_w8(gspca_dev, stopov7648);
1454 /* fall thru */
1455 case SENSOR_OV7630:
1387 data = 0x29; 1456 data = 0x29;
1388 break; 1457 break;
1389 default: 1458 default:
@@ -1437,7 +1506,7 @@ static void do_autogain(struct gspca_dev *gspca_dev)
1437 expotimes = 0; 1506 expotimes = 0;
1438 sd->exposure = setexposure(gspca_dev, 1507 sd->exposure = setexposure(gspca_dev,
1439 (unsigned int) expotimes); 1508 (unsigned int) expotimes);
1440 setcolors(gspca_dev); 1509 setredblue(gspca_dev);
1441 break; 1510 break;
1442 } 1511 }
1443 } 1512 }
@@ -1491,19 +1560,8 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
1491 struct sd *sd = (struct sd *) gspca_dev; 1560 struct sd *sd = (struct sd *) gspca_dev;
1492 1561
1493 sd->brightness = val; 1562 sd->brightness = val;
1494 if (gspca_dev->streaming) { 1563 if (gspca_dev->streaming)
1495 switch (sd->sensor) { 1564 setbrightness(gspca_dev);
1496 case SENSOR_HV7131R:
1497 case SENSOR_MI0360:
1498 case SENSOR_MO4000:
1499 case SENSOR_OM6802:
1500 setbrightness(gspca_dev);
1501 break;
1502 default: /* OV76xx */
1503 setbrightcont(gspca_dev);
1504 break;
1505 }
1506 }
1507 return 0; 1565 return 0;
1508} 1566}
1509 1567
@@ -1520,19 +1578,8 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
1520 struct sd *sd = (struct sd *) gspca_dev; 1578 struct sd *sd = (struct sd *) gspca_dev;
1521 1579
1522 sd->contrast = val; 1580 sd->contrast = val;
1523 if (gspca_dev->streaming) { 1581 if (gspca_dev->streaming)
1524 switch (sd->sensor) { 1582 setcontrast(gspca_dev);
1525 case SENSOR_HV7131R:
1526 case SENSOR_MI0360:
1527 case SENSOR_MO4000:
1528 case SENSOR_OM6802:
1529 setcontrast(gspca_dev);
1530 break;
1531 default: /* OV76xx */
1532 setbrightcont(gspca_dev);
1533 break;
1534 }
1535 }
1536 return 0; 1583 return 0;
1537} 1584}
1538 1585
@@ -1562,6 +1609,42 @@ static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
1562 return 0; 1609 return 0;
1563} 1610}
1564 1611
1612static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val)
1613{
1614 struct sd *sd = (struct sd *) gspca_dev;
1615
1616 sd->blue = val;
1617 if (gspca_dev->streaming)
1618 setredblue(gspca_dev);
1619 return 0;
1620}
1621
1622static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val)
1623{
1624 struct sd *sd = (struct sd *) gspca_dev;
1625
1626 *val = sd->blue;
1627 return 0;
1628}
1629
1630static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val)
1631{
1632 struct sd *sd = (struct sd *) gspca_dev;
1633
1634 sd->red = val;
1635 if (gspca_dev->streaming)
1636 setredblue(gspca_dev);
1637 return 0;
1638}
1639
1640static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val)
1641{
1642 struct sd *sd = (struct sd *) gspca_dev;
1643
1644 *val = sd->red;
1645 return 0;
1646}
1647
1565static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) 1648static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
1566{ 1649{
1567 struct sd *sd = (struct sd *) gspca_dev; 1650 struct sd *sd = (struct sd *) gspca_dev;
@@ -1598,6 +1681,24 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
1598 return 0; 1681 return 0;
1599} 1682}
1600 1683
1684static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val)
1685{
1686 struct sd *sd = (struct sd *) gspca_dev;
1687
1688 sd->infrared = val;
1689 if (gspca_dev->streaming)
1690 setinfrared(sd);
1691 return 0;
1692}
1693
1694static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val)
1695{
1696 struct sd *sd = (struct sd *) gspca_dev;
1697
1698 *val = sd->infrared;
1699 return 0;
1700}
1701
1601/* sub-driver description */ 1702/* sub-driver description */
1602static const struct sd_desc sd_desc = { 1703static const struct sd_desc sd_desc = {
1603 .name = MODULE_NAME, 1704 .name = MODULE_NAME,
@@ -1620,12 +1721,15 @@ static const __devinitdata struct usb_device_id device_table[] = {
1620#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE 1721#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1621 {USB_DEVICE(0x0458, 0x7025), BSI(SN9C120, MI0360, 0x5d)}, 1722 {USB_DEVICE(0x0458, 0x7025), BSI(SN9C120, MI0360, 0x5d)},
1622 {USB_DEVICE(0x0458, 0x702e), BSI(SN9C120, OV7660, 0x21)}, 1723 {USB_DEVICE(0x0458, 0x702e), BSI(SN9C120, OV7660, 0x21)},
1724#endif
1623 {USB_DEVICE(0x045e, 0x00f5), BSI(SN9C105, OV7660, 0x21)}, 1725 {USB_DEVICE(0x045e, 0x00f5), BSI(SN9C105, OV7660, 0x21)},
1624 {USB_DEVICE(0x045e, 0x00f7), BSI(SN9C105, OV7660, 0x21)}, 1726 {USB_DEVICE(0x045e, 0x00f7), BSI(SN9C105, OV7660, 0x21)},
1727#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1625 {USB_DEVICE(0x0471, 0x0327), BSI(SN9C105, MI0360, 0x5d)}, 1728 {USB_DEVICE(0x0471, 0x0327), BSI(SN9C105, MI0360, 0x5d)},
1626 {USB_DEVICE(0x0471, 0x0328), BSI(SN9C105, MI0360, 0x5d)},
1627#endif 1729#endif
1730 {USB_DEVICE(0x0471, 0x0328), BSI(SN9C105, MI0360, 0x5d)},
1628 {USB_DEVICE(0x0471, 0x0330), BSI(SN9C105, MI0360, 0x5d)}, 1731 {USB_DEVICE(0x0471, 0x0330), BSI(SN9C105, MI0360, 0x5d)},
1732 {USB_DEVICE(0x06f8, 0x3004), BSI(SN9C105, OV7660, 0x21)},
1629 {USB_DEVICE(0x0c45, 0x6040), BSI(SN9C102P, HV7131R, 0x11)}, 1733 {USB_DEVICE(0x0c45, 0x6040), BSI(SN9C102P, HV7131R, 0x11)},
1630/* bw600.inf: 1734/* bw600.inf:
1631 {USB_DEVICE(0x0c45, 0x6040), BSI(SN9C102P, MI0360, 0x5d)}, */ 1735 {USB_DEVICE(0x0c45, 0x6040), BSI(SN9C102P, MI0360, 0x5d)}, */
@@ -1649,7 +1753,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
1649/* {USB_DEVICE(0x0c45, 0x6123), BSI(SN9C110, SanyoCCD, 0x??)}, */ 1753/* {USB_DEVICE(0x0c45, 0x6123), BSI(SN9C110, SanyoCCD, 0x??)}, */
1650 {USB_DEVICE(0x0c45, 0x6128), BSI(SN9C110, OM6802, 0x21)}, /*sn9c325?*/ 1754 {USB_DEVICE(0x0c45, 0x6128), BSI(SN9C110, OM6802, 0x21)}, /*sn9c325?*/
1651/*bw600.inf:*/ 1755/*bw600.inf:*/
1652 {USB_DEVICE(0x0c45, 0x612a), BSI(SN9C110, OV7648, 0x21)}, /*sn9c325?*/ 1756 {USB_DEVICE(0x0c45, 0x612a), BSI(SN9C120, OV7648, 0x21)}, /*sn9c110?*/
1653 {USB_DEVICE(0x0c45, 0x612c), BSI(SN9C110, MO4000, 0x21)}, 1757 {USB_DEVICE(0x0c45, 0x612c), BSI(SN9C110, MO4000, 0x21)},
1654 {USB_DEVICE(0x0c45, 0x612e), BSI(SN9C110, OV7630, 0x21)}, 1758 {USB_DEVICE(0x0c45, 0x612e), BSI(SN9C110, OV7630, 0x21)},
1655/* {USB_DEVICE(0x0c45, 0x612f), BSI(SN9C110, ICM105C, 0x??)}, */ 1759/* {USB_DEVICE(0x0c45, 0x612f), BSI(SN9C110, ICM105C, 0x??)}, */
@@ -1657,8 +1761,8 @@ static const __devinitdata struct usb_device_id device_table[] = {
1657 {USB_DEVICE(0x0c45, 0x6130), BSI(SN9C120, MI0360, 0x5d)}, 1761 {USB_DEVICE(0x0c45, 0x6130), BSI(SN9C120, MI0360, 0x5d)},
1658#endif 1762#endif
1659 {USB_DEVICE(0x0c45, 0x6138), BSI(SN9C120, MO4000, 0x21)}, 1763 {USB_DEVICE(0x0c45, 0x6138), BSI(SN9C120, MO4000, 0x21)},
1764 {USB_DEVICE(0x0c45, 0x613a), BSI(SN9C120, OV7648, 0x21)},
1660#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE 1765#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1661/* {USB_DEVICE(0x0c45, 0x613a), BSI(SN9C120, OV7648, 0x??)}, */
1662 {USB_DEVICE(0x0c45, 0x613b), BSI(SN9C120, OV7660, 0x21)}, 1766 {USB_DEVICE(0x0c45, 0x613b), BSI(SN9C120, OV7660, 0x21)},
1663 {USB_DEVICE(0x0c45, 0x613c), BSI(SN9C120, HV7131R, 0x11)}, 1767 {USB_DEVICE(0x0c45, 0x613c), BSI(SN9C120, HV7131R, 0x11)},
1664/* {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x??)}, */ 1768/* {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x??)}, */
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index bca106c153fa..942f04cd44dd 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -111,7 +111,7 @@ static struct ctrl sd_ctrls[] = {
111 }, 111 },
112}; 112};
113 113
114static struct v4l2_pix_format vga_mode[] = { 114static const struct v4l2_pix_format vga_mode[] = {
115 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 115 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
116 .bytesperline = 320, 116 .bytesperline = 320,
117 .sizeimage = 320 * 240 * 3 / 8 + 590, 117 .sizeimage = 320 * 240 * 3 / 8 + 590,
@@ -124,7 +124,7 @@ static struct v4l2_pix_format vga_mode[] = {
124 .priv = 0}, 124 .priv = 0},
125}; 125};
126 126
127static struct v4l2_pix_format sif_mode[] = { 127static const struct v4l2_pix_format sif_mode[] = {
128 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 128 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
129 .bytesperline = 176, 129 .bytesperline = 176,
130 .sizeimage = 176 * 144 * 3 / 8 + 590, 130 .sizeimage = 176 * 144 * 3 / 8 + 590,
@@ -633,10 +633,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
633 sd->subtype = id->driver_info; 633 sd->subtype = id->driver_info;
634 if (sd->subtype != LogitechClickSmart310) { 634 if (sd->subtype != LogitechClickSmart310) {
635 cam->cam_mode = vga_mode; 635 cam->cam_mode = vga_mode;
636 cam->nmodes = sizeof vga_mode / sizeof vga_mode[0]; 636 cam->nmodes = ARRAY_SIZE(vga_mode);
637 } else { 637 } else {
638 cam->cam_mode = sif_mode; 638 cam->cam_mode = sif_mode;
639 cam->nmodes = sizeof sif_mode / sizeof sif_mode[0]; 639 cam->nmodes = ARRAY_SIZE(sif_mode);
640 } 640 }
641 sd->qindex = 5; 641 sd->qindex = 5;
642 sd->brightness = BRIGHTNESS_DEF; 642 sd->brightness = BRIGHTNESS_DEF;
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index e29954c1c38c..82e3e3e2ada1 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -34,6 +34,8 @@ struct sd {
34 unsigned short contrast; 34 unsigned short contrast;
35 __u8 brightness; 35 __u8 brightness;
36 __u8 colors; 36 __u8 colors;
37 __u8 blue_balance;
38 __u8 red_balance;
37 39
38 char subtype; 40 char subtype;
39#define Arowana300KCMOSCamera 0 41#define Arowana300KCMOSCamera 0
@@ -52,6 +54,10 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
52static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); 54static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
53static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val); 55static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
54static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); 56static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
57static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val);
58static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val);
59static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val);
60static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val);
55 61
56static struct ctrl sd_ctrls[] = { 62static struct ctrl sd_ctrls[] = {
57#define MY_BRIGHTNESS 0 63#define MY_BRIGHTNESS 0
@@ -63,7 +69,7 @@ static struct ctrl sd_ctrls[] = {
63 .minimum = 0, 69 .minimum = 0,
64 .maximum = 127, 70 .maximum = 127,
65 .step = 1, 71 .step = 1,
66 .default_value = 63, 72 .default_value = 0,
67 }, 73 },
68 .set = sd_setbrightness, 74 .set = sd_setbrightness,
69 .get = sd_getbrightness, 75 .get = sd_getbrightness,
@@ -75,9 +81,9 @@ static struct ctrl sd_ctrls[] = {
75 .type = V4L2_CTRL_TYPE_INTEGER, 81 .type = V4L2_CTRL_TYPE_INTEGER,
76 .name = "Contrast", 82 .name = "Contrast",
77 .minimum = 0, 83 .minimum = 0,
78 .maximum = 0xffff, 84 .maximum = 64725,
79 .step = 1, 85 .step = 1,
80 .default_value = 0xaa00, 86 .default_value = 64725,
81 }, 87 },
82 .set = sd_setcontrast, 88 .set = sd_setcontrast,
83 .get = sd_getcontrast, 89 .get = sd_getcontrast,
@@ -91,14 +97,42 @@ static struct ctrl sd_ctrls[] = {
91 .minimum = 0, 97 .minimum = 0,
92 .maximum = 63, 98 .maximum = 63,
93 .step = 1, 99 .step = 1,
94 .default_value = 31, 100 .default_value = 20,
95 }, 101 },
96 .set = sd_setcolors, 102 .set = sd_setcolors,
97 .get = sd_getcolors, 103 .get = sd_getcolors,
98 }, 104 },
105#define MY_BLUE_BALANCE 3
106 {
107 {
108 .id = V4L2_CID_BLUE_BALANCE,
109 .type = V4L2_CTRL_TYPE_INTEGER,
110 .name = "Blue Balance",
111 .minimum = 0,
112 .maximum = 127,
113 .step = 1,
114 .default_value = 0,
115 },
116 .set = sd_setblue_balance,
117 .get = sd_getblue_balance,
118 },
119#define MY_RED_BALANCE 4
120 {
121 {
122 .id = V4L2_CID_RED_BALANCE,
123 .type = V4L2_CTRL_TYPE_INTEGER,
124 .name = "Red Balance",
125 .minimum = 0,
126 .maximum = 127,
127 .step = 1,
128 .default_value = 0,
129 },
130 .set = sd_setred_balance,
131 .get = sd_getred_balance,
132 },
99}; 133};
100 134
101static struct v4l2_pix_format vga_mode[] = { 135static const struct v4l2_pix_format vga_mode[] = {
102 {160, 120, V4L2_PIX_FMT_SPCA501, V4L2_FIELD_NONE, 136 {160, 120, V4L2_PIX_FMT_SPCA501, V4L2_FIELD_NONE,
103 .bytesperline = 160, 137 .bytesperline = 160,
104 .sizeimage = 160 * 120 * 3 / 2, 138 .sizeimage = 160 * 120 * 3 / 2,
@@ -1822,29 +1856,6 @@ static int reg_write(struct usb_device *dev,
1822 return ret; 1856 return ret;
1823} 1857}
1824 1858
1825/* returns: negative is error, pos or zero is data */
1826static int reg_read(struct gspca_dev *gspca_dev,
1827 __u16 req, /* bRequest */
1828 __u16 index, /* wIndex */
1829 __u16 length) /* wLength (1 or 2 only) */
1830{
1831 int ret;
1832
1833 gspca_dev->usb_buf[1] = 0;
1834 ret = usb_control_msg(gspca_dev->dev,
1835 usb_rcvctrlpipe(gspca_dev->dev, 0),
1836 req,
1837 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1838 0, /* value */
1839 index,
1840 gspca_dev->usb_buf, length,
1841 500); /* timeout */
1842 if (ret < 0) {
1843 PDEBUG(D_ERR, "reg_read err %d", ret);
1844 return -1;
1845 }
1846 return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0];
1847}
1848 1859
1849static int write_vector(struct gspca_dev *gspca_dev, 1860static int write_vector(struct gspca_dev *gspca_dev,
1850 const __u16 data[][3]) 1861 const __u16 data[][3])
@@ -1869,18 +1880,11 @@ static void setbrightness(struct gspca_dev *gspca_dev)
1869{ 1880{
1870 struct sd *sd = (struct sd *) gspca_dev; 1881 struct sd *sd = (struct sd *) gspca_dev;
1871 1882
1872 reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x11, sd->brightness);
1873 reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x12, sd->brightness); 1883 reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x12, sd->brightness);
1874 reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x13, sd->brightness);
1875} 1884}
1876 1885
1877static void getbrightness(struct gspca_dev *gspca_dev) 1886static void getbrightness(struct gspca_dev *gspca_dev)
1878{ 1887{
1879 struct sd *sd = (struct sd *) gspca_dev;
1880 __u16 brightness;
1881
1882 brightness = reg_read(gspca_dev, SPCA501_REG_CCDSP, 0x11, 2);
1883 sd->brightness = brightness << 1;
1884} 1888}
1885 1889
1886static void setcontrast(struct gspca_dev *gspca_dev) 1890static void setcontrast(struct gspca_dev *gspca_dev)
@@ -1895,7 +1899,6 @@ static void setcontrast(struct gspca_dev *gspca_dev)
1895 1899
1896static void getcontrast(struct gspca_dev *gspca_dev) 1900static void getcontrast(struct gspca_dev *gspca_dev)
1897{ 1901{
1898/* spca50x->contrast = 0xaa01; */
1899} 1902}
1900 1903
1901static void setcolors(struct gspca_dev *gspca_dev) 1904static void setcolors(struct gspca_dev *gspca_dev)
@@ -1907,11 +1910,20 @@ static void setcolors(struct gspca_dev *gspca_dev)
1907 1910
1908static void getcolors(struct gspca_dev *gspca_dev) 1911static void getcolors(struct gspca_dev *gspca_dev)
1909{ 1912{
1913}
1914
1915static void setblue_balance(struct gspca_dev *gspca_dev)
1916{
1917 struct sd *sd = (struct sd *) gspca_dev;
1918
1919 reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x11, sd->blue_balance);
1920}
1921
1922static void setred_balance(struct gspca_dev *gspca_dev)
1923{
1910 struct sd *sd = (struct sd *) gspca_dev; 1924 struct sd *sd = (struct sd *) gspca_dev;
1911 1925
1912 sd->colors = reg_read(gspca_dev, SPCA501_REG_CCDSP, 0x0c, 2); 1926 reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x13, sd->red_balance);
1913/* sd->hue = (reg_read(gspca_dev, SPCA501_REG_CCDSP, 0x13, */
1914/* 2) & 0xFF) << 8; */
1915} 1927}
1916 1928
1917/* this function is called at probe time */ 1929/* this function is called at probe time */
@@ -1930,6 +1942,14 @@ static int sd_config(struct gspca_dev *gspca_dev,
1930 sd->contrast = sd_ctrls[MY_CONTRAST].qctrl.default_value; 1942 sd->contrast = sd_ctrls[MY_CONTRAST].qctrl.default_value;
1931 sd->colors = sd_ctrls[MY_COLOR].qctrl.default_value; 1943 sd->colors = sd_ctrls[MY_COLOR].qctrl.default_value;
1932 1944
1945 return 0;
1946}
1947
1948/* this function is called at probe and resume time */
1949static int sd_init(struct gspca_dev *gspca_dev)
1950{
1951 struct sd *sd = (struct sd *) gspca_dev;
1952
1933 switch (sd->subtype) { 1953 switch (sd->subtype) {
1934 case Arowana300KCMOSCamera: 1954 case Arowana300KCMOSCamera:
1935 case SmileIntlCamera: 1955 case SmileIntlCamera:
@@ -1948,15 +1968,17 @@ static int sd_config(struct gspca_dev *gspca_dev,
1948 goto error; 1968 goto error;
1949 break; 1969 break;
1950 } 1970 }
1971 PDEBUG(D_STREAM, "Initializing SPCA501 finished");
1951 return 0; 1972 return 0;
1952error: 1973error:
1953 return -EINVAL; 1974 return -EINVAL;
1954} 1975}
1955 1976
1956/* this function is called at probe and resume time */ 1977static int sd_start(struct gspca_dev *gspca_dev)
1957static int sd_init(struct gspca_dev *gspca_dev)
1958{ 1978{
1959 struct sd *sd = (struct sd *) gspca_dev; 1979 struct sd *sd = (struct sd *) gspca_dev;
1980 struct usb_device *dev = gspca_dev->dev;
1981 int mode;
1960 1982
1961 switch (sd->subtype) { 1983 switch (sd->subtype) {
1962 case ThreeComHomeConnectLite: 1984 case ThreeComHomeConnectLite:
@@ -1976,14 +1998,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
1976 /* Generic 501 open data */ 1998 /* Generic 501 open data */
1977 write_vector(gspca_dev, spca501_open_data); 1999 write_vector(gspca_dev, spca501_open_data);
1978 } 2000 }
1979 PDEBUG(D_STREAM, "Initializing SPCA501 finished");
1980 return 0;
1981}
1982
1983static int sd_start(struct gspca_dev *gspca_dev)
1984{
1985 struct usb_device *dev = gspca_dev->dev;
1986 int mode;
1987 2001
1988 /* memorize the wanted pixel format */ 2002 /* memorize the wanted pixel format */
1989 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; 2003 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
@@ -2113,6 +2127,42 @@ static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
2113 return 0; 2127 return 0;
2114} 2128}
2115 2129
2130static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val)
2131{
2132 struct sd *sd = (struct sd *) gspca_dev;
2133
2134 sd->blue_balance = val;
2135 if (gspca_dev->streaming)
2136 setblue_balance(gspca_dev);
2137 return 0;
2138}
2139
2140static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val)
2141{
2142 struct sd *sd = (struct sd *) gspca_dev;
2143
2144 *val = sd->blue_balance;
2145 return 0;
2146}
2147
2148static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val)
2149{
2150 struct sd *sd = (struct sd *) gspca_dev;
2151
2152 sd->red_balance = val;
2153 if (gspca_dev->streaming)
2154 setred_balance(gspca_dev);
2155 return 0;
2156}
2157
2158static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val)
2159{
2160 struct sd *sd = (struct sd *) gspca_dev;
2161
2162 *val = sd->red_balance;
2163 return 0;
2164}
2165
2116/* sub-driver description */ 2166/* sub-driver description */
2117static const struct sd_desc sd_desc = { 2167static const struct sd_desc sd_desc = {
2118 .name = MODULE_NAME, 2168 .name = MODULE_NAME,
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index 895b9fe4018c..2a33a29010ee 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -59,7 +59,7 @@ static struct ctrl sd_ctrls[] = {
59 }, 59 },
60}; 60};
61 61
62static struct v4l2_pix_format vga_mode[] = { 62static const struct v4l2_pix_format vga_mode[] = {
63 {160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 63 {160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
64 .bytesperline = 160, 64 .bytesperline = 160,
65 .sizeimage = 160 * 120 * 3 / 2, 65 .sizeimage = 160 * 120 * 3 / 2,
diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
index 645ee9d44d02..96e2512e0621 100644
--- a/drivers/media/video/gspca/spca506.c
+++ b/drivers/media/video/gspca/spca506.c
@@ -110,7 +110,7 @@ static struct ctrl sd_ctrls[] = {
110 }, 110 },
111}; 111};
112 112
113static struct v4l2_pix_format vga_mode[] = { 113static const struct v4l2_pix_format vga_mode[] = {
114 {160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE, 114 {160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
115 .bytesperline = 160, 115 .bytesperline = 160,
116 .sizeimage = 160 * 120 * 3 / 2, 116 .sizeimage = 160 * 120 * 3 / 2,
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index 63ec902c895d..be5d740a315d 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -62,7 +62,7 @@ static struct ctrl sd_ctrls[] = {
62 }, 62 },
63}; 63};
64 64
65static struct v4l2_pix_format sif_mode[] = { 65static const struct v4l2_pix_format sif_mode[] = {
66 {160, 120, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE, 66 {160, 120, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
67 .bytesperline = 160, 67 .bytesperline = 160,
68 .sizeimage = 160 * 120 * 3 / 2, 68 .sizeimage = 160 * 120 * 3 / 2,
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index c3de4e44123d..3c9288019e96 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -32,22 +32,22 @@ MODULE_LICENSE("GPL");
32struct sd { 32struct sd {
33 struct gspca_dev gspca_dev; /* !! must be the first item */ 33 struct gspca_dev gspca_dev; /* !! must be the first item */
34 34
35 __u16 contrast; /* rev72a only */
36#define CONTRAST_MIN 0x0000
37#define CONTRAST_DEF 0x2000
38#define CONTRAST_MAX 0x3fff
39
40 __u16 exposure; /* rev12a only */ 35 __u16 exposure; /* rev12a only */
41#define EXPOSURE_MIN 1 36#define EXPOSURE_MIN 1
42#define EXPOSURE_DEF 200 37#define EXPOSURE_DEF 200
43#define EXPOSURE_MAX (4095 - 900) /* see set_exposure */ 38#define EXPOSURE_MAX (4095 - 900) /* see set_exposure */
44 39
40 __u8 contrast; /* rev72a only */
41#define CONTRAST_MIN 0x00
42#define CONTRAST_DEF 0x20
43#define CONTRAST_MAX 0x3f
44
45 __u8 brightness; /* rev72a only */ 45 __u8 brightness; /* rev72a only */
46#define BRIGHTNESS_MIN 0 46#define BRIGHTNESS_MIN 0
47#define BRIGHTNESS_DEF 32 47#define BRIGHTNESS_DEF 0x20
48#define BRIGHTNESS_MAX 63 48#define BRIGHTNESS_MAX 0x3f
49 49
50 __u8 white; /* rev12a only */ 50 __u8 white;
51#define WHITE_MIN 1 51#define WHITE_MIN 1
52#define WHITE_DEF 0x40 52#define WHITE_DEF 0x40
53#define WHITE_MAX 0x7f 53#define WHITE_MAX 0x7f
@@ -73,7 +73,7 @@ struct sd {
73#define AG_CNT_START 13 73#define AG_CNT_START 13
74}; 74};
75 75
76static struct v4l2_pix_format sif_012a_mode[] = { 76static const struct v4l2_pix_format sif_012a_mode[] = {
77 {160, 120, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, 77 {160, 120, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
78 .bytesperline = 160, 78 .bytesperline = 160,
79 .sizeimage = 160 * 120, 79 .sizeimage = 160 * 120,
@@ -96,7 +96,7 @@ static struct v4l2_pix_format sif_012a_mode[] = {
96 .priv = 0}, 96 .priv = 0},
97}; 97};
98 98
99static struct v4l2_pix_format sif_072a_mode[] = { 99static const struct v4l2_pix_format sif_072a_mode[] = {
100 {160, 120, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, 100 {160, 120, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
101 .bytesperline = 160, 101 .bytesperline = 160,
102 .sizeimage = 160 * 120, 102 .sizeimage = 160 * 120,
@@ -146,98 +146,7 @@ static struct v4l2_pix_format sif_072a_mode[] = {
146#define SPCA561_SNAPBIT 0x20 146#define SPCA561_SNAPBIT 0x20
147#define SPCA561_SNAPCTRL 0x40 147#define SPCA561_SNAPCTRL 0x40
148 148
149static void reg_w_val(struct usb_device *dev, __u16 index, __u8 value) 149static const __u16 rev72a_init_data1[][2] = {
150{
151 int ret;
152
153 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
154 0, /* request */
155 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
156 value, index, NULL, 0, 500);
157 PDEBUG(D_USBO, "reg write: 0x%02x:0x%02x", index, value);
158 if (ret < 0)
159 PDEBUG(D_ERR, "reg write: error %d", ret);
160}
161
162static void write_vector(struct gspca_dev *gspca_dev,
163 const __u16 data[][2])
164{
165 struct usb_device *dev = gspca_dev->dev;
166 int i;
167
168 i = 0;
169 while (data[i][1] != 0) {
170 reg_w_val(dev, data[i][1], data[i][0]);
171 i++;
172 }
173}
174
175/* read 'len' bytes to gspca_dev->usb_buf */
176static void reg_r(struct gspca_dev *gspca_dev,
177 __u16 index, __u16 length)
178{
179 usb_control_msg(gspca_dev->dev,
180 usb_rcvctrlpipe(gspca_dev->dev, 0),
181 0, /* request */
182 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
183 0, /* value */
184 index, gspca_dev->usb_buf, length, 500);
185}
186
187static void reg_w_buf(struct gspca_dev *gspca_dev,
188 __u16 index, const __u8 *buffer, __u16 len)
189{
190 memcpy(gspca_dev->usb_buf, buffer, len);
191 usb_control_msg(gspca_dev->dev,
192 usb_sndctrlpipe(gspca_dev->dev, 0),
193 0, /* request */
194 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
195 0, /* value */
196 index, gspca_dev->usb_buf, len, 500);
197}
198
199static void i2c_write(struct gspca_dev *gspca_dev, __u16 valeur, __u16 reg)
200{
201 int retry = 60;
202 __u8 DataLow;
203 __u8 DataHight;
204
205 DataLow = valeur;
206 DataHight = valeur >> 8;
207 reg_w_val(gspca_dev->dev, 0x8801, reg);
208 reg_w_val(gspca_dev->dev, 0x8805, DataLow);
209 reg_w_val(gspca_dev->dev, 0x8800, DataHight);
210 while (retry--) {
211 reg_r(gspca_dev, 0x8803, 1);
212 if (!gspca_dev->usb_buf[0])
213 break;
214 }
215}
216
217static int i2c_read(struct gspca_dev *gspca_dev, __u16 reg, __u8 mode)
218{
219 int retry = 60;
220 __u8 value;
221 __u8 vallsb;
222
223 reg_w_val(gspca_dev->dev, 0x8804, 0x92);
224 reg_w_val(gspca_dev->dev, 0x8801, reg);
225 reg_w_val(gspca_dev->dev, 0x8802, (mode | 0x01));
226 do {
227 reg_r(gspca_dev, 0x8803, 1);
228 if (!gspca_dev->usb_buf[0])
229 break;
230 } while (--retry);
231 if (retry == 0)
232 return -1;
233 reg_r(gspca_dev, 0x8800, 1);
234 value = gspca_dev->usb_buf[0];
235 reg_r(gspca_dev, 0x8805, 1);
236 vallsb = gspca_dev->usb_buf[0];
237 return ((int) value << 8) | vallsb;
238}
239
240static const __u16 spca561_init_data[][2] = {
241 {0x0000, 0x8114}, /* Software GPIO output data */ 150 {0x0000, 0x8114}, /* Software GPIO output data */
242 {0x0001, 0x8114}, /* Software GPIO output data */ 151 {0x0001, 0x8114}, /* Software GPIO output data */
243 {0x0000, 0x8112}, /* Some kind of reset */ 152 {0x0000, 0x8112}, /* Some kind of reset */
@@ -247,44 +156,26 @@ static const __u16 spca561_init_data[][2] = {
247 {0x0001, 0x8118}, /* Conf sensor */ 156 {0x0001, 0x8118}, /* Conf sensor */
248 {0x0092, 0x8804}, /* I know nothing about these */ 157 {0x0092, 0x8804}, /* I know nothing about these */
249 {0x0010, 0x8802}, /* 0x88xx registers, so I won't */ 158 {0x0010, 0x8802}, /* 0x88xx registers, so I won't */
250 /***************/
251 {0x000d, 0x8805}, /* sensor default setting */ 159 {0x000d, 0x8805}, /* sensor default setting */
252 {0x0001, 0x8801}, /* 1 <- 0x0d */ 160 {}
253 {0x0000, 0x8800}, 161};
254 {0x0018, 0x8805}, 162static const __u16 rev72a_init_sensor1[][2] = {
255 {0x0002, 0x8801}, /* 2 <- 0x18 */ 163 /* ms-win values */
256 {0x0000, 0x8800}, 164 {0x0001, 0x0018}, /* 0x01 <- 0x0d */
257 {0x0065, 0x8805}, 165 {0x0002, 0x0065}, /* 0x02 <- 0x18 */
258 {0x0004, 0x8801}, /* 4 <- 0x01 0x65 */ 166 {0x0004, 0x0121}, /* 0x04 <- 0x0165 */
259 {0x0001, 0x8800}, 167 {0x0005, 0x00aa}, /* 0x05 <- 0x21 */
260 {0x0021, 0x8805}, 168 {0x0007, 0x0004}, /* 0x07 <- 0xaa */
261 {0x0005, 0x8801}, /* 5 <- 0x21 */ 169 {0x0020, 0x1502}, /* 0x20 <- 0x1504 */
262 {0x0000, 0x8800}, 170 {0x0039, 0x0010}, /* 0x39 <- 0x02 */
263 {0x00aa, 0x8805}, 171 {0x0035, 0x0049}, /* 0x35 <- 0x10 */
264 {0x0007, 0x8801}, /* 7 <- 0xaa */ 172 {0x0009, 0x100b}, /* 0x09 <- 0x1049 */
265 {0x0000, 0x8800}, 173 {0x0028, 0x000f}, /* 0x28 <- 0x0b */
266 {0x0004, 0x8805}, 174 {0x003b, 0x003c}, /* 0x3b <- 0x0f */
267 {0x0020, 0x8801}, /* 0x20 <- 0x15 0x04 */ 175 {0x003c, 0x0000}, /* 0x3c <- 0x00 */
268 {0x0015, 0x8800}, 176 {}
269 {0x0002, 0x8805}, 177};
270 {0x0039, 0x8801}, /* 0x39 <- 0x02 */ 178static const __u16 rev72a_init_data2[][2] = {
271 {0x0000, 0x8800},
272 {0x0010, 0x8805},
273 {0x0035, 0x8801}, /* 0x35 <- 0x10 */
274 {0x0000, 0x8800},
275 {0x0049, 0x8805},
276 {0x0009, 0x8801}, /* 0x09 <- 0x10 0x49 */
277 {0x0010, 0x8800},
278 {0x000b, 0x8805},
279 {0x0028, 0x8801}, /* 0x28 <- 0x0b */
280 {0x0000, 0x8800},
281 {0x000f, 0x8805},
282 {0x003b, 0x8801}, /* 0x3b <- 0x0f */
283 {0x0000, 0x8800},
284 {0x0000, 0x8805},
285 {0x003c, 0x8801}, /* 0x3c <- 0x00 */
286 {0x0000, 0x8800},
287 /***************/
288 {0x0018, 0x8601}, /* Pixel/line selection for color separation */ 179 {0x0018, 0x8601}, /* Pixel/line selection for color separation */
289 {0x0000, 0x8602}, /* Optical black level for user setting */ 180 {0x0000, 0x8602}, /* Optical black level for user setting */
290 {0x0060, 0x8604}, /* Optical black horizontal offset */ 181 {0x0060, 0x8604}, /* Optical black horizontal offset */
@@ -309,10 +200,11 @@ static const __u16 spca561_init_data[][2] = {
309 {0x0004, 0x8612}, /* Gr offset for white balance */ 200 {0x0004, 0x8612}, /* Gr offset for white balance */
310 {0x0007, 0x8613}, /* B offset for white balance */ 201 {0x0007, 0x8613}, /* B offset for white balance */
311 {0x0000, 0x8614}, /* Gb offset for white balance */ 202 {0x0000, 0x8614}, /* Gb offset for white balance */
312 {0x008c, 0x8651}, /* R gain for white balance */ 203/* from ms-win */
313 {0x008c, 0x8652}, /* Gr gain for white balance */ 204 {0x0035, 0x8651}, /* R gain for white balance */
314 {0x00b5, 0x8653}, /* B gain for white balance */ 205 {0x0040, 0x8652}, /* Gr gain for white balance */
315 {0x008c, 0x8654}, /* Gb gain for white balance */ 206 {0x005f, 0x8653}, /* B gain for white balance */
207 {0x0040, 0x8654}, /* Gb gain for white balance */
316 {0x0002, 0x8502}, /* Maximum average bit rate stuff */ 208 {0x0002, 0x8502}, /* Maximum average bit rate stuff */
317 209
318 {0x0011, 0x8802}, 210 {0x0011, 0x8802},
@@ -324,29 +216,22 @@ static const __u16 spca561_init_data[][2] = {
324 216
325 {0x0002, 0x865b}, /* Horizontal offset for valid pixels */ 217 {0x0002, 0x865b}, /* Horizontal offset for valid pixels */
326 {0x0003, 0x865c}, /* Vertical offset for valid lines */ 218 {0x0003, 0x865c}, /* Vertical offset for valid lines */
327 /***************//* sensor active */ 219 {}
328 {0x0003, 0x8801}, /* 0x03 <- 0x01 0x21 //289 */ 220};
329 {0x0021, 0x8805}, 221static const __u16 rev72a_init_sensor2[][2] = {
330 {0x0001, 0x8800}, 222 /* ms-win values */
331 {0x0004, 0x8801}, /* 0x04 <- 0x01 0x65 //357 */ 223 {0x0003, 0x0121}, /* 0x03 <- 0x01 0x21 //289 */
332 {0x0065, 0x8805}, 224 {0x0004, 0x0165}, /* 0x04 <- 0x01 0x65 //357 */
333 {0x0001, 0x8800}, 225 {0x0005, 0x002f}, /* 0x05 <- 0x2f */
334 {0x0005, 0x8801}, /* 0x05 <- 0x2f */ 226 {0x0006, 0x0000}, /* 0x06 <- 0 */
335 {0x002f, 0x8805}, 227 {0x000a, 0x0002}, /* 0x0a <- 2 */
336 {0x0000, 0x8800}, 228 {0x0009, 0x1061}, /* 0x09 <- 0x1061 */
337 {0x0006, 0x8801}, /* 0x06 <- 0 */ 229 {0x0035, 0x0014}, /* 0x35 <- 0x14 */
338 {0x0000, 0x8805}, 230 {}
339 {0x0000, 0x8800}, 231};
340 {0x000a, 0x8801}, /* 0x0a <- 2 */ 232static const __u16 rev72a_init_data3[][2] = {
341 {0x0002, 0x8805},
342 {0x0000, 0x8800},
343 {0x0009, 0x8801}, /* 0x09 <- 0x1061 */
344 {0x0061, 0x8805},
345 {0x0010, 0x8800},
346 {0x0035, 0x8801}, /* 0x35 <-0x14 */
347 {0x0014, 0x8805},
348 {0x0000, 0x8800},
349 {0x0030, 0x8112}, /* ISO and drop packet enable */ 233 {0x0030, 0x8112}, /* ISO and drop packet enable */
234/*fixme: should stop here*/
350 {0x0000, 0x8112}, /* Some kind of reset ???? */ 235 {0x0000, 0x8112}, /* Some kind of reset ???? */
351 {0x0009, 0x8118}, /* Enable sensor and set standby */ 236 {0x0009, 0x8118}, /* Enable sensor and set standby */
352 {0x0000, 0x8114}, /* Software GPIO output data */ 237 {0x0000, 0x8114}, /* Software GPIO output data */
@@ -434,7 +319,6 @@ static const __u16 spca561_init_data[][2] = {
434 {} 319 {}
435}; 320};
436 321
437
438/******************** QC Express etch2 stuff ********************/ 322/******************** QC Express etch2 stuff ********************/
439static const __u16 Pb100_1map8300[][2] = { 323static const __u16 Pb100_1map8300[][2] = {
440 /* reg, value */ 324 /* reg, value */
@@ -515,22 +399,112 @@ static const __u16 spca561_161rev12A_data2[][2] = {
515 {} 399 {}
516}; 400};
517 401
518static void sensor_mapwrite(struct gspca_dev *gspca_dev, 402static void reg_w_val(struct usb_device *dev, __u16 index, __u8 value)
519 const __u16 sensormap[][2]) 403{
404 int ret;
405
406 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
407 0, /* request */
408 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
409 value, index, NULL, 0, 500);
410 PDEBUG(D_USBO, "reg write: 0x%02x:0x%02x", index, value);
411 if (ret < 0)
412 PDEBUG(D_ERR, "reg write: error %d", ret);
413}
414
415static void write_vector(struct gspca_dev *gspca_dev,
416 const __u16 data[][2])
520{ 417{
521 int i = 0; 418 struct usb_device *dev = gspca_dev->dev;
522 __u8 usbval[2]; 419 int i;
523 420
524 while (sensormap[i][0]) { 421 i = 0;
525 usbval[0] = sensormap[i][1]; 422 while (data[i][1] != 0) {
526 usbval[1] = sensormap[i][1] >> 8; 423 reg_w_val(dev, data[i][1], data[i][0]);
527 reg_w_buf(gspca_dev, sensormap[i][0], usbval, 2);
528 i++; 424 i++;
529 } 425 }
530} 426}
427
428/* read 'len' bytes to gspca_dev->usb_buf */
429static void reg_r(struct gspca_dev *gspca_dev,
430 __u16 index, __u16 length)
431{
432 usb_control_msg(gspca_dev->dev,
433 usb_rcvctrlpipe(gspca_dev->dev, 0),
434 0, /* request */
435 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
436 0, /* value */
437 index, gspca_dev->usb_buf, length, 500);
438}
439
440/* write 'len' bytes from gspca_dev->usb_buf */
441static void reg_w_buf(struct gspca_dev *gspca_dev,
442 __u16 index, __u16 len)
443{
444 usb_control_msg(gspca_dev->dev,
445 usb_sndctrlpipe(gspca_dev->dev, 0),
446 0, /* request */
447 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
448 0, /* value */
449 index, gspca_dev->usb_buf, len, 500);
450}
451
452static void i2c_write(struct gspca_dev *gspca_dev, __u16 value, __u16 reg)
453{
454 int retry = 60;
455
456 reg_w_val(gspca_dev->dev, 0x8801, reg);
457 reg_w_val(gspca_dev->dev, 0x8805, value);
458 reg_w_val(gspca_dev->dev, 0x8800, value >> 8);
459 do {
460 reg_r(gspca_dev, 0x8803, 1);
461 if (!gspca_dev->usb_buf[0])
462 return;
463 } while (--retry);
464}
465
466static int i2c_read(struct gspca_dev *gspca_dev, __u16 reg, __u8 mode)
467{
468 int retry = 60;
469 __u8 value;
470
471 reg_w_val(gspca_dev->dev, 0x8804, 0x92);
472 reg_w_val(gspca_dev->dev, 0x8801, reg);
473 reg_w_val(gspca_dev->dev, 0x8802, mode | 0x01);
474 do {
475 reg_r(gspca_dev, 0x8803, 1);
476 if (!gspca_dev->usb_buf[0]) {
477 reg_r(gspca_dev, 0x8800, 1);
478 value = gspca_dev->usb_buf[0];
479 reg_r(gspca_dev, 0x8805, 1);
480 return ((int) value << 8) | gspca_dev->usb_buf[0];
481 }
482 } while (--retry);
483 return -1;
484}
485
486static void sensor_mapwrite(struct gspca_dev *gspca_dev,
487 const __u16 (*sensormap)[2])
488{
489 while ((*sensormap)[0]) {
490 gspca_dev->usb_buf[0] = (*sensormap)[1];
491 gspca_dev->usb_buf[1] = (*sensormap)[1] >> 8;
492 reg_w_buf(gspca_dev, (*sensormap)[0], 2);
493 sensormap++;
494 }
495}
496
497static void write_sensor_72a(struct gspca_dev *gspca_dev,
498 const __u16 (*sensor)[2])
499{
500 while ((*sensor)[0]) {
501 i2c_write(gspca_dev, (*sensor)[1], (*sensor)[0]);
502 sensor++;
503 }
504}
505
531static void init_161rev12A(struct gspca_dev *gspca_dev) 506static void init_161rev12A(struct gspca_dev *gspca_dev)
532{ 507{
533/* sensor_reset(gspca_dev); (not in win) */
534 write_vector(gspca_dev, spca561_161rev12A_data1); 508 write_vector(gspca_dev, spca561_161rev12A_data1);
535 sensor_mapwrite(gspca_dev, Pb100_1map8300); 509 sensor_mapwrite(gspca_dev, Pb100_1map8300);
536/*fixme: should be in sd_start*/ 510/*fixme: should be in sd_start*/
@@ -598,49 +572,68 @@ static int sd_init_12a(struct gspca_dev *gspca_dev)
598static int sd_init_72a(struct gspca_dev *gspca_dev) 572static int sd_init_72a(struct gspca_dev *gspca_dev)
599{ 573{
600 PDEBUG(D_STREAM, "Chip revision: 072a"); 574 PDEBUG(D_STREAM, "Chip revision: 072a");
601 write_vector(gspca_dev, spca561_init_data); 575 write_vector(gspca_dev, rev72a_init_data1);
576 write_sensor_72a(gspca_dev, rev72a_init_sensor1);
577 write_vector(gspca_dev, rev72a_init_data2);
578 write_sensor_72a(gspca_dev, rev72a_init_sensor2);
579 write_vector(gspca_dev, rev72a_init_data3);
602 return 0; 580 return 0;
603} 581}
604 582
605static void setcontrast(struct gspca_dev *gspca_dev) 583/* rev 72a only */
584static void setbrightness(struct gspca_dev *gspca_dev)
606{ 585{
607 struct sd *sd = (struct sd *) gspca_dev; 586 struct sd *sd = (struct sd *) gspca_dev;
608 struct usb_device *dev = gspca_dev->dev; 587 struct usb_device *dev = gspca_dev->dev;
609 __u8 lowb; 588 __u8 value;
610 589
611 switch (sd->chip_revision) { 590 value = sd->brightness;
612 case Rev072A:
613 lowb = sd->contrast >> 8;
614 reg_w_val(dev, 0x8651, lowb);
615 reg_w_val(dev, 0x8652, lowb);
616 reg_w_val(dev, 0x8653, lowb);
617 reg_w_val(dev, 0x8654, lowb);
618 break;
619 default: {
620/* case Rev012A: { */
621 static const __u8 Reg8391[] =
622 { 0x92, 0x30, 0x20, 0x00, 0x0c, 0x00, 0x00, 0x00 };
623 591
624 reg_w_buf(gspca_dev, 0x8391, Reg8391, 8); 592 /* offsets for white balance */
625 reg_w_buf(gspca_dev, 0x8390, Reg8391, 8); 593 reg_w_val(dev, 0x8611, value); /* R */
626 break; 594 reg_w_val(dev, 0x8612, value); /* Gr */
627 } 595 reg_w_val(dev, 0x8613, value); /* B */
628 } 596 reg_w_val(dev, 0x8614, value); /* Gb */
629} 597}
630 598
631/* rev12a only */
632static void setwhite(struct gspca_dev *gspca_dev) 599static void setwhite(struct gspca_dev *gspca_dev)
633{ 600{
634 struct sd *sd = (struct sd *) gspca_dev; 601 struct sd *sd = (struct sd *) gspca_dev;
635 __u16 white; 602 __u16 white;
636 __u8 reg8614, reg8616; 603 __u8 blue, red;
604 __u16 reg;
637 605
638 white = sd->white;
639 /* try to emulate MS-win as possible */ 606 /* try to emulate MS-win as possible */
640 reg8616 = 0x90 - white * 5 / 8; 607 white = sd->white;
641 reg_w_val(gspca_dev->dev, 0x8616, reg8616); 608 red = 0x20 + white * 3 / 8;
642 reg8614 = 0x20 + white * 3 / 8; 609 blue = 0x90 - white * 5 / 8;
643 reg_w_val(gspca_dev->dev, 0x8614, reg8614); 610 if (sd->chip_revision == Rev012A) {
611 reg = 0x8614;
612 } else {
613 reg = 0x8651;
614 red += sd->contrast - 0x20;
615 blue += sd->contrast - 0x20;
616 }
617 reg_w_val(gspca_dev->dev, reg, red);
618 reg_w_val(gspca_dev->dev, reg + 2, blue);
619}
620
621static void setcontrast(struct gspca_dev *gspca_dev)
622{
623 struct sd *sd = (struct sd *) gspca_dev;
624 struct usb_device *dev = gspca_dev->dev;
625 __u8 value;
626
627 if (sd->chip_revision != Rev072A)
628 return;
629 value = sd->contrast + 0x20;
630
631 /* gains for white balance */
632 setwhite(gspca_dev);
633/* reg_w_val(dev, 0x8651, value); * R - done by setwhite */
634 reg_w_val(dev, 0x8652, value); /* Gr */
635/* reg_w_val(dev, 0x8653, value); * B - done by setwhite */
636 reg_w_val(dev, 0x8654, value); /* Gb */
644} 637}
645 638
646/* rev 12a only */ 639/* rev 12a only */
@@ -649,7 +642,6 @@ static void setexposure(struct gspca_dev *gspca_dev)
649 struct sd *sd = (struct sd *) gspca_dev; 642 struct sd *sd = (struct sd *) gspca_dev;
650 int expo; 643 int expo;
651 int clock_divider; 644 int clock_divider;
652 __u8 data[2];
653 645
654 /* Register 0x8309 controls exposure for the spca561, 646 /* Register 0x8309 controls exposure for the spca561,
655 the basic exposure setting goes from 1-2047, where 1 is completely 647 the basic exposure setting goes from 1-2047, where 1 is completely
@@ -673,20 +665,19 @@ static void setexposure(struct gspca_dev *gspca_dev)
673 clock_divider = 3; 665 clock_divider = 3;
674 } 666 }
675 expo |= clock_divider << 11; 667 expo |= clock_divider << 11;
676 data[0] = expo; 668 gspca_dev->usb_buf[0] = expo;
677 data[1] = expo >> 8; 669 gspca_dev->usb_buf[1] = expo >> 8;
678 reg_w_buf(gspca_dev, 0x8309, data, 2); 670 reg_w_buf(gspca_dev, 0x8309, 2);
679} 671}
680 672
681/* rev 12a only */ 673/* rev 12a only */
682static void setgain(struct gspca_dev *gspca_dev) 674static void setgain(struct gspca_dev *gspca_dev)
683{ 675{
684 struct sd *sd = (struct sd *) gspca_dev; 676 struct sd *sd = (struct sd *) gspca_dev;
685 __u8 data[2];
686 677
687 data[0] = sd->gain; 678 gspca_dev->usb_buf[0] = sd->gain;
688 data[1] = 0; 679 gspca_dev->usb_buf[1] = 0;
689 reg_w_buf(gspca_dev, 0x8335, data, 2); 680 reg_w_buf(gspca_dev, 0x8335, 2);
690} 681}
691 682
692static void setautogain(struct gspca_dev *gspca_dev) 683static void setautogain(struct gspca_dev *gspca_dev)
@@ -702,9 +693,9 @@ static void setautogain(struct gspca_dev *gspca_dev)
702static int sd_start_12a(struct gspca_dev *gspca_dev) 693static int sd_start_12a(struct gspca_dev *gspca_dev)
703{ 694{
704 struct usb_device *dev = gspca_dev->dev; 695 struct usb_device *dev = gspca_dev->dev;
705 int Clck = 0x8a; /* lower 0x8X values lead to fps > 30 */
706 __u8 Reg8307[] = { 0xaa, 0x00 };
707 int mode; 696 int mode;
697 static const __u8 Reg8391[8] =
698 {0x92, 0x30, 0x20, 0x00, 0x0c, 0x00, 0x00, 0x00};
708 699
709 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; 700 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
710 if (mode <= 1) { 701 if (mode <= 1) {
@@ -716,14 +707,21 @@ static int sd_start_12a(struct gspca_dev *gspca_dev)
716 * is sufficient to push raw frames at ~20fps */ 707 * is sufficient to push raw frames at ~20fps */
717 reg_w_val(dev, 0x8500, mode); 708 reg_w_val(dev, 0x8500, mode);
718 } /* -- qq@kuku.eu.org */ 709 } /* -- qq@kuku.eu.org */
719 reg_w_buf(gspca_dev, 0x8307, Reg8307, 2); 710
720 reg_w_val(gspca_dev->dev, 0x8700, Clck); 711 gspca_dev->usb_buf[0] = 0xaa;
712 gspca_dev->usb_buf[1] = 0x00;
713 reg_w_buf(gspca_dev, 0x8307, 2);
714 /* clock - lower 0x8X values lead to fps > 30 */
715 reg_w_val(gspca_dev->dev, 0x8700, 0x8a);
721 /* 0x8f 0x85 0x27 clock */ 716 /* 0x8f 0x85 0x27 clock */
722 reg_w_val(gspca_dev->dev, 0x8112, 0x1e | 0x20); 717 reg_w_val(gspca_dev->dev, 0x8112, 0x1e | 0x20);
723 reg_w_val(gspca_dev->dev, 0x850b, 0x03); 718 reg_w_val(gspca_dev->dev, 0x850b, 0x03);
724 setcontrast(gspca_dev); 719 memcpy(gspca_dev->usb_buf, Reg8391, 8);
720 reg_w_buf(gspca_dev, 0x8391, 8);
721 reg_w_buf(gspca_dev, 0x8390, 8);
725 setwhite(gspca_dev); 722 setwhite(gspca_dev);
726 setautogain(gspca_dev); 723 setautogain(gspca_dev);
724/* setgain(gspca_dev); */
727 setexposure(gspca_dev); 725 setexposure(gspca_dev);
728 return 0; 726 return 0;
729} 727}
@@ -750,6 +748,9 @@ static int sd_start_72a(struct gspca_dev *gspca_dev)
750 reg_w_val(dev, 0x8500, mode); /* mode */ 748 reg_w_val(dev, 0x8500, mode); /* mode */
751 reg_w_val(dev, 0x8700, Clck); /* 0x27 clock */ 749 reg_w_val(dev, 0x8700, Clck); /* 0x27 clock */
752 reg_w_val(dev, 0x8112, 0x10 | 0x20); 750 reg_w_val(dev, 0x8112, 0x10 | 0x20);
751 setcontrast(gspca_dev);
752/* setbrightness(gspca_dev); * fixme: bad values */
753 setwhite(gspca_dev);
753 setautogain(gspca_dev); 754 setautogain(gspca_dev);
754 return 0; 755 return 0;
755} 756}
@@ -791,7 +792,6 @@ static void do_autogain(struct gspca_dev *gspca_dev)
791 __u8 luma_mean = 110; 792 __u8 luma_mean = 110;
792 __u8 luma_delta = 20; 793 __u8 luma_delta = 20;
793 __u8 spring = 4; 794 __u8 spring = 4;
794 __u8 reg8339[2];
795 795
796 if (sd->ag_cnt < 0) 796 if (sd->ag_cnt < 0)
797 return; 797 return;
@@ -834,13 +834,13 @@ static void do_autogain(struct gspca_dev *gspca_dev)
834 834
835 if (gainG > 0x3f) 835 if (gainG > 0x3f)
836 gainG = 0x3f; 836 gainG = 0x3f;
837 else if (gainG < 4) 837 else if (gainG < 3)
838 gainG = 3; 838 gainG = 3;
839 i2c_write(gspca_dev, gainG, 0x35); 839 i2c_write(gspca_dev, gainG, 0x35);
840 840
841 if (expotimes >= 0x0256) 841 if (expotimes > 0x0256)
842 expotimes = 0x0256; 842 expotimes = 0x0256;
843 else if (expotimes < 4) 843 else if (expotimes < 3)
844 expotimes = 3; 844 expotimes = 3;
845 i2c_write(gspca_dev, expotimes | pixelclk, 0x09); 845 i2c_write(gspca_dev, expotimes | pixelclk, 0x09);
846 } 846 }
@@ -848,13 +848,13 @@ static void do_autogain(struct gspca_dev *gspca_dev)
848 case Rev012A: 848 case Rev012A:
849 reg_r(gspca_dev, 0x8330, 2); 849 reg_r(gspca_dev, 0x8330, 2);
850 if (gspca_dev->usb_buf[1] > 0x08) { 850 if (gspca_dev->usb_buf[1] > 0x08) {
851 reg8339[0] = ++sd->expo12a; 851 gspca_dev->usb_buf[0] = ++sd->expo12a;
852 reg8339[1] = 0; 852 gspca_dev->usb_buf[1] = 0;
853 reg_w_buf(gspca_dev, 0x8339, reg8339, 2); 853 reg_w_buf(gspca_dev, 0x8339, 2);
854 } else if (gspca_dev->usb_buf[1] < 0x02) { 854 } else if (gspca_dev->usb_buf[1] < 0x02) {
855 reg8339[0] = --sd->expo12a; 855 gspca_dev->usb_buf[0] = --sd->expo12a;
856 reg8339[1] = 0; 856 gspca_dev->usb_buf[1] = 0;
857 reg_w_buf(gspca_dev, 0x8339, reg8339, 2); 857 reg_w_buf(gspca_dev, 0x8339, 2);
858 } 858 }
859 break; 859 break;
860 } 860 }
@@ -867,8 +867,8 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
867{ 867{
868 struct sd *sd = (struct sd *) gspca_dev; 868 struct sd *sd = (struct sd *) gspca_dev;
869 869
870 switch (data[0]) { 870 switch (data[0]) { /* sequence number */
871 case 0: /* start of frame */ 871 case 0: /* start of frame */
872 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame, 872 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame,
873 data, 0); 873 data, 0);
874 data += SPCA561_OFFSET_DATA; 874 data += SPCA561_OFFSET_DATA;
@@ -890,8 +890,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
890 frame, data, len); 890 frame, data, len);
891 } 891 }
892 return; 892 return;
893 case 0xff: /* drop */ 893 case 0xff: /* drop (empty mpackets) */
894/* gspca_dev->last_packet_type = DISCARD_PACKET; */
895 return; 894 return;
896 } 895 }
897 data++; 896 data++;
@@ -900,55 +899,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
900} 899}
901 900
902/* rev 72a only */ 901/* rev 72a only */
903static void setbrightness(struct gspca_dev *gspca_dev)
904{
905 struct sd *sd = (struct sd *) gspca_dev;
906 __u8 value;
907
908 value = sd->brightness;
909 reg_w_val(gspca_dev->dev, 0x8611, value);
910 reg_w_val(gspca_dev->dev, 0x8612, value);
911 reg_w_val(gspca_dev->dev, 0x8613, value);
912 reg_w_val(gspca_dev->dev, 0x8614, value);
913}
914
915static void getbrightness(struct gspca_dev *gspca_dev)
916{
917 struct sd *sd = (struct sd *) gspca_dev;
918 __u16 tot;
919
920 tot = 0;
921 reg_r(gspca_dev, 0x8611, 1);
922 tot += gspca_dev->usb_buf[0];
923 reg_r(gspca_dev, 0x8612, 1);
924 tot += gspca_dev->usb_buf[0];
925 reg_r(gspca_dev, 0x8613, 1);
926 tot += gspca_dev->usb_buf[0];
927 reg_r(gspca_dev, 0x8614, 1);
928 tot += gspca_dev->usb_buf[0];
929 sd->brightness = tot >> 2;
930}
931
932/* rev72a only */
933static void getcontrast(struct gspca_dev *gspca_dev)
934{
935 struct sd *sd = (struct sd *) gspca_dev;
936 __u16 tot;
937
938 tot = 0;
939 reg_r(gspca_dev, 0x8651, 1);
940 tot += gspca_dev->usb_buf[0];
941 reg_r(gspca_dev, 0x8652, 1);
942 tot += gspca_dev->usb_buf[0];
943 reg_r(gspca_dev, 0x8653, 1);
944 tot += gspca_dev->usb_buf[0];
945 reg_r(gspca_dev, 0x8654, 1);
946 tot += gspca_dev->usb_buf[0];
947 sd->contrast = tot << 6;
948 PDEBUG(D_CONF, "get contrast %d", sd->contrast);
949}
950
951/* rev 72a only */
952static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) 902static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
953{ 903{
954 struct sd *sd = (struct sd *) gspca_dev; 904 struct sd *sd = (struct sd *) gspca_dev;
@@ -963,7 +913,6 @@ static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
963{ 913{
964 struct sd *sd = (struct sd *) gspca_dev; 914 struct sd *sd = (struct sd *) gspca_dev;
965 915
966 getbrightness(gspca_dev);
967 *val = sd->brightness; 916 *val = sd->brightness;
968 return 0; 917 return 0;
969} 918}
@@ -983,7 +932,6 @@ static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
983{ 932{
984 struct sd *sd = (struct sd *) gspca_dev; 933 struct sd *sd = (struct sd *) gspca_dev;
985 934
986 getcontrast(gspca_dev);
987 *val = sd->contrast; 935 *val = sd->contrast;
988 return 0; 936 return 0;
989} 937}
@@ -1006,7 +954,6 @@ static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
1006 return 0; 954 return 0;
1007} 955}
1008 956
1009/* rev12a only */
1010static int sd_setwhite(struct gspca_dev *gspca_dev, __s32 val) 957static int sd_setwhite(struct gspca_dev *gspca_dev, __s32 val)
1011{ 958{
1012 struct sd *sd = (struct sd *) gspca_dev; 959 struct sd *sd = (struct sd *) gspca_dev;
@@ -1121,6 +1068,19 @@ static struct ctrl sd_ctrls_12a[] = {
1121 1068
1122static struct ctrl sd_ctrls_72a[] = { 1069static struct ctrl sd_ctrls_72a[] = {
1123 { 1070 {
1071 {
1072 .id = V4L2_CID_DO_WHITE_BALANCE,
1073 .type = V4L2_CTRL_TYPE_INTEGER,
1074 .name = "White Balance",
1075 .minimum = WHITE_MIN,
1076 .maximum = WHITE_MAX,
1077 .step = 1,
1078 .default_value = WHITE_DEF,
1079 },
1080 .set = sd_setwhite,
1081 .get = sd_getwhite,
1082 },
1083 {
1124 { 1084 {
1125 .id = V4L2_CID_BRIGHTNESS, 1085 .id = V4L2_CID_BRIGHTNESS,
1126 .type = V4L2_CTRL_TYPE_INTEGER, 1086 .type = V4L2_CTRL_TYPE_INTEGER,
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index d9d64911f22a..60de9af87fbb 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -109,7 +109,7 @@ static struct ctrl sd_ctrls[] = {
109 }, 109 },
110}; 110};
111 111
112static struct v4l2_pix_format vga_mode[] = { 112static const struct v4l2_pix_format vga_mode[] = {
113 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 113 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
114 .bytesperline = 320, 114 .bytesperline = 320,
115 .sizeimage = 320 * 240 * 3 / 8 + 590, 115 .sizeimage = 320 * 240 * 3 / 8 + 590,
@@ -424,10 +424,8 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
424 424
425 /* beginning of the frame */ 425 /* beginning of the frame */
426#define STKHDRSZ 12 426#define STKHDRSZ 12
427 gspca_frame_add(gspca_dev, INTER_PACKET, frame, 427 data += STKHDRSZ;
428 data + STKHDRSZ, len - STKHDRSZ); 428 len -= STKHDRSZ;
429#undef STKHDRSZ
430 return;
431 } 429 }
432 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len); 430 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
433} 431}
diff --git a/drivers/media/video/gspca/stv06xx/Kconfig b/drivers/media/video/gspca/stv06xx/Kconfig
new file mode 100644
index 000000000000..634ad38d9fb8
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/Kconfig
@@ -0,0 +1,9 @@
1config USB_STV06XX
2 tristate "STV06XX USB Camera Driver"
3 depends on USB_GSPCA
4 help
5 Say Y here if you want support for cameras based on
6 the ST STV06XX chip.
7
8 To compile this driver as a module, choose M here: the
9 module will be called gspca_stv06xx.
diff --git a/drivers/media/video/gspca/stv06xx/Makefile b/drivers/media/video/gspca/stv06xx/Makefile
new file mode 100644
index 000000000000..feeaa94ab588
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/Makefile
@@ -0,0 +1,9 @@
1obj-$(CONFIG_USB_STV06XX) += gspca_stv06xx.o
2
3gspca_stv06xx-objs := stv06xx.o \
4 stv06xx_vv6410.o \
5 stv06xx_hdcs.o \
6 stv06xx_pb0100.o
7
8EXTRA_CFLAGS += -Idrivers/media/video/gspca
9
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
new file mode 100644
index 000000000000..13a021e3cbb7
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -0,0 +1,522 @@
1/*
2 * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
3 * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
4 * Copyright (c) 2002, 2003 Tuukka Toivonen
5 * Copyright (c) 2008 Erik Andrén
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * P/N 861037: Sensor HDCS1000 ASIC STV0600
22 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
23 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
24 * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
25 * P/N 861075-0040: Sensor HDCS1000 ASIC
26 * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
27 * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
28 */
29
30#include "stv06xx_sensor.h"
31
32MODULE_AUTHOR("Erik Andrén");
33MODULE_DESCRIPTION("STV06XX USB Camera Driver");
34MODULE_LICENSE("GPL");
35
36static int dump_bridge;
37static int dump_sensor;
38
39int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data)
40{
41 int err;
42 struct usb_device *udev = sd->gspca_dev.dev;
43 __u8 *buf = sd->gspca_dev.usb_buf;
44 u8 len = (i2c_data > 0xff) ? 2 : 1;
45
46 buf[0] = i2c_data & 0xff;
47 buf[1] = (i2c_data >> 8) & 0xff;
48
49 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
50 0x04, 0x40, address, 0, buf, len,
51 STV06XX_URB_MSG_TIMEOUT);
52
53
54 PDEBUG(D_CONF, "Written 0x%x to address 0x%x, status: %d",
55 i2c_data, address, err);
56
57 return (err < 0) ? err : 0;
58}
59
60int stv06xx_read_bridge(struct sd *sd, u16 address, u8 *i2c_data)
61{
62 int err;
63 struct usb_device *udev = sd->gspca_dev.dev;
64 __u8 *buf = sd->gspca_dev.usb_buf;
65
66 err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
67 0x04, 0xc0, address, 0, buf, 1,
68 STV06XX_URB_MSG_TIMEOUT);
69
70 *i2c_data = buf[0];
71
72 PDEBUG(D_CONF, "Read 0x%x from address 0x%x, status %d",
73 *i2c_data, address, err);
74
75 return (err < 0) ? err : 0;
76}
77
78/* Wraps the normal write sensor bytes / words functions for writing a
79 single value */
80int stv06xx_write_sensor(struct sd *sd, u8 address, u16 value)
81{
82 if (sd->sensor->i2c_len == 2) {
83 u16 data[2] = { address, value };
84 return stv06xx_write_sensor_words(sd, data, 1);
85 } else {
86 u8 data[2] = { address, value };
87 return stv06xx_write_sensor_bytes(sd, data, 1);
88 }
89}
90
91static int stv06xx_write_sensor_finish(struct sd *sd)
92{
93 int err = 0;
94
95 if (IS_850(sd)) {
96 struct usb_device *udev = sd->gspca_dev.dev;
97 __u8 *buf = sd->gspca_dev.usb_buf;
98
99 /* Quickam Web needs an extra packet */
100 buf[0] = 0;
101 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
102 0x04, 0x40, 0x1704, 0, buf, 1,
103 STV06XX_URB_MSG_TIMEOUT);
104 }
105
106 return (err < 0) ? err : 0;
107}
108
109int stv06xx_write_sensor_bytes(struct sd *sd, const u8 *data, u8 len)
110{
111 int err, i, j;
112 struct usb_device *udev = sd->gspca_dev.dev;
113 __u8 *buf = sd->gspca_dev.usb_buf;
114
115 PDEBUG(D_USBO, "I2C: Command buffer contains %d entries", len);
116 for (i = 0; i < len;) {
117 /* Build the command buffer */
118 memset(buf, 0, I2C_BUFFER_LENGTH);
119 for (j = 0; j < I2C_MAX_BYTES && i < len; j++, i++) {
120 buf[j] = data[2*i];
121 buf[0x10 + j] = data[2*i+1];
122 PDEBUG(D_USBO, "I2C: Writing 0x%02x to reg 0x%02x",
123 data[2*i+1], data[2*i]);
124 }
125 buf[0x20] = sd->sensor->i2c_addr;
126 buf[0x21] = j - 1; /* Number of commands to send - 1 */
127 buf[0x22] = I2C_WRITE_CMD;
128 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
129 0x04, 0x40, 0x0400, 0, buf,
130 I2C_BUFFER_LENGTH,
131 STV06XX_URB_MSG_TIMEOUT);
132 if (err < 0)
133 return err;
134 }
135 return stv06xx_write_sensor_finish(sd);
136}
137
138int stv06xx_write_sensor_words(struct sd *sd, const u16 *data, u8 len)
139{
140 int err, i, j;
141 struct usb_device *udev = sd->gspca_dev.dev;
142 __u8 *buf = sd->gspca_dev.usb_buf;
143
144 PDEBUG(D_USBO, "I2C: Command buffer contains %d entries", len);
145
146 for (i = 0; i < len;) {
147 /* Build the command buffer */
148 memset(buf, 0, I2C_BUFFER_LENGTH);
149 for (j = 0; j < I2C_MAX_WORDS && i < len; j++, i++) {
150 buf[j] = data[2*i];
151 buf[0x10 + j * 2] = data[2*i+1];
152 buf[0x10 + j * 2 + 1] = data[2*i+1] >> 8;
153 PDEBUG(D_USBO, "I2C: Writing 0x%04x to reg 0x%02x",
154 data[2*i+1], data[2*i]);
155 }
156 buf[0x20] = sd->sensor->i2c_addr;
157 buf[0x21] = j - 1; /* Number of commands to send - 1 */
158 buf[0x22] = I2C_WRITE_CMD;
159 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
160 0x04, 0x40, 0x0400, 0, buf,
161 I2C_BUFFER_LENGTH,
162 STV06XX_URB_MSG_TIMEOUT);
163 if (err < 0)
164 return err;
165 }
166 return stv06xx_write_sensor_finish(sd);
167}
168
169int stv06xx_read_sensor(struct sd *sd, const u8 address, u16 *value)
170{
171 int err;
172 struct usb_device *udev = sd->gspca_dev.dev;
173 __u8 *buf = sd->gspca_dev.usb_buf;
174
175 err = stv06xx_write_bridge(sd, STV_I2C_FLUSH, sd->sensor->i2c_flush);
176 if (err < 0)
177 return err;
178
179 /* Clear mem */
180 memset(buf, 0, I2C_BUFFER_LENGTH);
181
182 buf[0] = address;
183 buf[0x20] = sd->sensor->i2c_addr;
184 buf[0x21] = 0;
185
186 /* Read I2C register */
187 buf[0x22] = I2C_READ_CMD;
188
189 err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
190 0x04, 0x40, 0x1400, 0, buf, I2C_BUFFER_LENGTH,
191 STV06XX_URB_MSG_TIMEOUT);
192 if (err < 0) {
193 PDEBUG(D_ERR, "I2C Read: error writing address: %d", err);
194 return err;
195 }
196
197 err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
198 0x04, 0xc0, 0x1410, 0, buf, sd->sensor->i2c_len,
199 STV06XX_URB_MSG_TIMEOUT);
200 if (sd->sensor->i2c_len == 2)
201 *value = buf[0] | (buf[1] << 8);
202 else
203 *value = buf[0];
204
205 PDEBUG(D_USBO, "I2C: Read 0x%x from address 0x%x, status: %d",
206 *value, address, err);
207
208 return (err < 0) ? err : 0;
209}
210
211/* Dumps all bridge registers */
212static void stv06xx_dump_bridge(struct sd *sd)
213{
214 int i;
215 u8 data, buf;
216
217 info("Dumping all stv06xx bridge registers");
218 for (i = 0x1400; i < 0x160f; i++) {
219 stv06xx_read_bridge(sd, i, &data);
220
221 info("Read 0x%x from address 0x%x", data, i);
222 }
223
224 for (i = 0x1400; i < 0x160f; i++) {
225 stv06xx_read_bridge(sd, i, &data);
226 buf = data;
227
228 stv06xx_write_bridge(sd, i, 0xff);
229 stv06xx_read_bridge(sd, i, &data);
230 if (data == 0xff)
231 info("Register 0x%x is read/write", i);
232 else if (data != buf)
233 info("Register 0x%x is read/write,"
234 "but only partially", i);
235 else
236 info("Register 0x%x is read-only", i);
237
238 stv06xx_write_bridge(sd, i, buf);
239 }
240}
241
242/* this function is called at probe and resume time */
243static int stv06xx_init(struct gspca_dev *gspca_dev)
244{
245 struct sd *sd = (struct sd *) gspca_dev;
246 int err;
247
248 PDEBUG(D_PROBE, "Initializing camera");
249
250 /* Let the usb init settle for a bit
251 before performing the initialization */
252 msleep(250);
253
254 err = sd->sensor->init(sd);
255
256 if (dump_sensor)
257 sd->sensor->dump(sd);
258
259 return (err < 0) ? err : 0;
260}
261
262/* Start the camera */
263static int stv06xx_start(struct gspca_dev *gspca_dev)
264{
265 struct sd *sd = (struct sd *) gspca_dev;
266 int err;
267
268 /* Prepare the sensor for start */
269 err = sd->sensor->start(sd);
270 if (err < 0)
271 goto out;
272
273 /* Start isochronous streaming */
274 err = stv06xx_write_bridge(sd, STV_ISO_ENABLE, 1);
275
276out:
277 if (err < 0)
278 PDEBUG(D_STREAM, "Starting stream failed");
279 else
280 PDEBUG(D_STREAM, "Started streaming");
281
282 return (err < 0) ? err : 0;
283}
284
285static void stv06xx_stopN(struct gspca_dev *gspca_dev)
286{
287 int err;
288 struct sd *sd = (struct sd *) gspca_dev;
289
290 /* stop ISO-streaming */
291 err = stv06xx_write_bridge(sd, STV_ISO_ENABLE, 0);
292 if (err < 0)
293 goto out;
294
295 err = sd->sensor->stop(sd);
296 if (err < 0)
297 goto out;
298
299out:
300 if (err < 0)
301 PDEBUG(D_STREAM, "Failed to stop stream");
302 else
303 PDEBUG(D_STREAM, "Stopped streaming");
304}
305
306/*
307 * Analyse an USB packet of the data stream and store it appropriately.
308 * Each packet contains an integral number of chunks. Each chunk has
309 * 2-bytes identification, followed by 2-bytes that describe the chunk
310 * length. Known/guessed chunk identifications are:
311 * 8001/8005/C001/C005 - Begin new frame
312 * 8002/8006/C002/C006 - End frame
313 * 0200/4200 - Contains actual image data, bayer or compressed
314 * 0005 - 11 bytes of unknown data
315 * 0100 - 2 bytes of unknown data
316 * The 0005 and 0100 chunks seem to appear only in compressed stream.
317 */
318static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
319 struct gspca_frame *frame, /* target */
320 __u8 *data, /* isoc packet */
321 int len) /* iso packet length */
322{
323 PDEBUG(D_PACK, "Packet of length %d arrived", len);
324
325 /* A packet may contain several frames
326 loop until the whole packet is reached */
327 while (len) {
328 int id, chunk_len;
329
330 if (len < 4) {
331 PDEBUG(D_PACK, "Packet is smaller than 4 bytes");
332 return;
333 }
334
335 /* Capture the id */
336 id = (data[0] << 8) | data[1];
337
338 /* Capture the chunk length */
339 chunk_len = (data[2] << 8) | data[3];
340 PDEBUG(D_PACK, "Chunk id: %x, length: %d", id, chunk_len);
341
342 data += 4;
343 len -= 4;
344
345 if (len < chunk_len) {
346 PDEBUG(D_ERR, "URB packet length is smaller"
347 " than the specified chunk length");
348 return;
349 }
350
351 switch (id) {
352 case 0x0200:
353 case 0x4200:
354 PDEBUG(D_PACK, "Frame data packet detected");
355
356 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
357 data, chunk_len);
358 break;
359
360 case 0x8001:
361 case 0x8005:
362 case 0xc001:
363 case 0xc005:
364 PDEBUG(D_PACK, "Starting new frame");
365
366 /* Create a new frame, chunk length should be zero */
367 gspca_frame_add(gspca_dev, FIRST_PACKET,
368 frame, data, 0);
369
370 if (chunk_len)
371 PDEBUG(D_ERR, "Chunk length is "
372 "non-zero on a SOF");
373 break;
374
375 case 0x8002:
376 case 0x8006:
377 case 0xc002:
378 PDEBUG(D_PACK, "End of frame detected");
379
380 /* Complete the last frame (if any) */
381 gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0);
382
383 if (chunk_len)
384 PDEBUG(D_ERR, "Chunk length is "
385 "non-zero on a EOF");
386 break;
387
388 case 0x0005:
389 PDEBUG(D_PACK, "Chunk 0x005 detected");
390 /* Unknown chunk with 11 bytes of data,
391 occurs just before end of each frame
392 in compressed mode */
393 break;
394
395 case 0x0100:
396 PDEBUG(D_PACK, "Chunk 0x0100 detected");
397 /* Unknown chunk with 2 bytes of data,
398 occurs 2-3 times per USB interrupt */
399 break;
400 default:
401 PDEBUG(D_PACK, "Unknown chunk %d detected", id);
402 /* Unknown chunk */
403 }
404 data += chunk_len;
405 len -= chunk_len;
406 }
407}
408
409static int stv06xx_config(struct gspca_dev *gspca_dev,
410 const struct usb_device_id *id);
411
412/* sub-driver description */
413static const struct sd_desc sd_desc = {
414 .name = MODULE_NAME,
415 .config = stv06xx_config,
416 .init = stv06xx_init,
417 .start = stv06xx_start,
418 .stopN = stv06xx_stopN,
419 .pkt_scan = stv06xx_pkt_scan
420};
421
422/* This function is called at probe time */
423static int stv06xx_config(struct gspca_dev *gspca_dev,
424 const struct usb_device_id *id)
425{
426 struct sd *sd = (struct sd *) gspca_dev;
427 struct cam *cam;
428
429 PDEBUG(D_PROBE, "Configuring camera");
430
431 cam = &gspca_dev->cam;
432 cam->epaddr = STV_ISOC_ENDPOINT_ADDR;
433 sd->desc = sd_desc;
434 gspca_dev->sd_desc = &sd->desc;
435
436 if (dump_bridge)
437 stv06xx_dump_bridge(sd);
438
439 sd->sensor = &stv06xx_sensor_vv6410;
440 if (!sd->sensor->probe(sd))
441 return 0;
442
443 sd->sensor = &stv06xx_sensor_hdcs1x00;
444 if (!sd->sensor->probe(sd))
445 return 0;
446
447 sd->sensor = &stv06xx_sensor_hdcs1020;
448 if (!sd->sensor->probe(sd))
449 return 0;
450
451 sd->sensor = &stv06xx_sensor_pb0100;
452 if (!sd->sensor->probe(sd))
453 return 0;
454
455 sd->sensor = NULL;
456 return -ENODEV;
457}
458
459
460
461/* -- module initialisation -- */
462static const __devinitdata struct usb_device_id device_table[] = {
463 {USB_DEVICE(0x046d, 0x0840)}, /* QuickCam Express */
464 {USB_DEVICE(0x046d, 0x0850)}, /* LEGO cam / QuickCam Web */
465 {USB_DEVICE(0x046d, 0x0870)}, /* Dexxa WebCam USB */
466 {}
467};
468MODULE_DEVICE_TABLE(usb, device_table);
469
470/* -- device connect -- */
471static int sd_probe(struct usb_interface *intf,
472 const struct usb_device_id *id)
473{
474 PDEBUG(D_PROBE, "Probing for a stv06xx device");
475 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
476 THIS_MODULE);
477}
478
479static void sd_disconnect(struct usb_interface *intf)
480{
481 struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
482 struct sd *sd = (struct sd *) gspca_dev;
483 PDEBUG(D_PROBE, "Disconnecting the stv06xx device");
484
485 if (sd->sensor->disconnect)
486 sd->sensor->disconnect(sd);
487 gspca_disconnect(intf);
488}
489
490static struct usb_driver sd_driver = {
491 .name = MODULE_NAME,
492 .id_table = device_table,
493 .probe = sd_probe,
494 .disconnect = sd_disconnect,
495#ifdef CONFIG_PM
496 .suspend = gspca_suspend,
497 .resume = gspca_resume,
498#endif
499};
500
501/* -- module insert / remove -- */
502static int __init sd_mod_init(void)
503{
504 if (usb_register(&sd_driver) < 0)
505 return -1;
506 PDEBUG(D_PROBE, "registered");
507 return 0;
508}
509static void __exit sd_mod_exit(void)
510{
511 usb_deregister(&sd_driver);
512 PDEBUG(D_PROBE, "deregistered");
513}
514
515module_init(sd_mod_init);
516module_exit(sd_mod_exit);
517
518module_param(dump_bridge, bool, S_IRUGO | S_IWUSR);
519MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup");
520
521module_param(dump_sensor, bool, S_IRUGO | S_IWUSR);
522MODULE_PARM_DESC(dump_sensor, "Dumps all sensor registers at startup");
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
new file mode 100644
index 000000000000..1207e7d17f14
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -0,0 +1,107 @@
1/*
2 * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
3 * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
4 * Copyright (c) 2002, 2003 Tuukka Toivonen
5 * Copyright (c) 2008 Erik Andrén
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * P/N 861037: Sensor HDCS1000 ASIC STV0600
22 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
23 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
24 * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
25 * P/N 861075-0040: Sensor HDCS1000 ASIC
26 * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
27 * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
28 */
29
30#ifndef STV06XX_H_
31#define STV06XX_H_
32
33#include "gspca.h"
34
35#define MODULE_NAME "STV06xx"
36
37#define STV_ISOC_ENDPOINT_ADDR 0x81
38
39#ifndef V4L2_PIX_FMT_SGRBG8
40#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G')
41#endif
42
43#define STV_REG23 0x0423
44
45/* Control registers of the STV0600 ASIC */
46#define STV_I2C_PARTNER 0x1420
47#define STV_I2C_VAL_REG_VAL_PAIRS_MIN1 0x1421
48#define STV_I2C_READ_WRITE_TOGGLE 0x1422
49#define STV_I2C_FLUSH 0x1423
50#define STV_I2C_SUCC_READ_REG_VALS 0x1424
51
52#define STV_ISO_ENABLE 0x1440
53#define STV_SCAN_RATE 0x1443
54#define STV_LED_CTRL 0x1445
55#define STV_STV0600_EMULATION 0x1446
56#define STV_REG00 0x1500
57#define STV_REG01 0x1501
58#define STV_REG02 0x1502
59#define STV_REG03 0x1503
60#define STV_REG04 0x1504
61
62#define STV_ISO_SIZE_L 0x15c1
63#define STV_ISO_SIZE_H 0x15c2
64
65/* Refers to the CIF 352x288 and QCIF 176x144 */
66/* 1: 288 lines, 2: 144 lines */
67#define STV_Y_CTRL 0x15c3
68
69/* 0xa: 352 columns, 0x6: 176 columns */
70#define STV_X_CTRL 0x1680
71
72#define STV06XX_URB_MSG_TIMEOUT 5000
73
74#define I2C_MAX_BYTES 16
75#define I2C_MAX_WORDS 8
76
77#define I2C_BUFFER_LENGTH 0x23
78#define I2C_READ_CMD 3
79#define I2C_WRITE_CMD 1
80
81#define LED_ON 1
82#define LED_OFF 0
83
84/* STV06xx device descriptor */
85struct sd {
86 struct gspca_dev gspca_dev;
87
88 /* A pointer to the currently connected sensor */
89 const struct stv06xx_sensor *sensor;
90
91 /* A pointer to the sd_desc struct */
92 struct sd_desc desc;
93
94 /* Sensor private data */
95 void *sensor_priv;
96};
97
98int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data);
99int stv06xx_read_bridge(struct sd *sd, u16 address, u8 *i2c_data);
100
101int stv06xx_write_sensor_bytes(struct sd *sd, const u8 *data, u8 len);
102int stv06xx_write_sensor_words(struct sd *sd, const u16 *data, u8 len);
103
104int stv06xx_read_sensor(struct sd *sd, const u8 address, u16 *value);
105int stv06xx_write_sensor(struct sd *sd, u8 address, u16 value);
106
107#endif
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
new file mode 100644
index 000000000000..14335a9e4bb5
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
@@ -0,0 +1,535 @@
1/*
2 * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
3 * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
4 * Copyright (c) 2002, 2003 Tuukka Toivonen
5 * Copyright (c) 2008 Erik Andrén
6 * Copyright (c) 2008 Chia-I Wu
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 * P/N 861037: Sensor HDCS1000 ASIC STV0600
23 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
24 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
25 * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
26 * P/N 861075-0040: Sensor HDCS1000 ASIC
27 * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
28 * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
29 */
30
31#include "stv06xx_hdcs.h"
32
33enum hdcs_power_state {
34 HDCS_STATE_SLEEP,
35 HDCS_STATE_IDLE,
36 HDCS_STATE_RUN
37};
38
39/* no lock? */
40struct hdcs {
41 enum hdcs_power_state state;
42 int w, h;
43
44 /* visible area of the sensor array */
45 struct {
46 int left, top;
47 int width, height;
48 int border;
49 } array;
50
51 struct {
52 /* Column timing overhead */
53 u8 cto;
54 /* Column processing overhead */
55 u8 cpo;
56 /* Row sample period constant */
57 u16 rs;
58 /* Exposure reset duration */
59 u16 er;
60 } exp;
61
62 int psmp;
63};
64
65static int hdcs_reg_write_seq(struct sd *sd, u8 reg, u8 *vals, u8 len)
66{
67 u8 regs[I2C_MAX_BYTES * 2];
68 int i;
69
70 if (unlikely((len <= 0) || (len >= I2C_MAX_BYTES) ||
71 (reg + len > 0xff)))
72 return -EINVAL;
73
74 for (i = 0; i < len; i++, reg++) {
75 regs[2*i] = reg;
76 regs[2*i+1] = vals[i];
77 }
78
79 return stv06xx_write_sensor_bytes(sd, regs, len);
80}
81
82static int hdcs_set_state(struct sd *sd, enum hdcs_power_state state)
83{
84 struct hdcs *hdcs = sd->sensor_priv;
85 u8 val;
86 int ret;
87
88 if (hdcs->state == state)
89 return 0;
90
91 /* we need to go idle before running or sleeping */
92 if (hdcs->state != HDCS_STATE_IDLE) {
93 ret = stv06xx_write_sensor(sd, HDCS_REG_CONTROL(sd), 0);
94 if (ret)
95 return ret;
96 }
97
98 hdcs->state = HDCS_STATE_IDLE;
99
100 if (state == HDCS_STATE_IDLE)
101 return 0;
102
103 switch (state) {
104 case HDCS_STATE_SLEEP:
105 val = HDCS_SLEEP_MODE;
106 break;
107
108 case HDCS_STATE_RUN:
109 val = HDCS_RUN_ENABLE;
110 break;
111
112 default:
113 return -EINVAL;
114 }
115
116 ret = stv06xx_write_sensor(sd, HDCS_REG_CONTROL(sd), val);
117 if (ret < 0)
118 hdcs->state = state;
119
120 return ret;
121}
122
123static int hdcs_reset(struct sd *sd)
124{
125 struct hdcs *hdcs = sd->sensor_priv;
126 int err;
127
128 err = stv06xx_write_sensor(sd, HDCS_REG_CONTROL(sd), 1);
129 if (err < 0)
130 return err;
131
132 err = stv06xx_write_sensor(sd, HDCS_REG_CONTROL(sd), 0);
133 if (err < 0)
134 hdcs->state = HDCS_STATE_IDLE;
135
136 return err;
137}
138
139static int hdcs_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
140{
141 struct sd *sd = (struct sd *) gspca_dev;
142 struct hdcs *hdcs = sd->sensor_priv;
143
144 /* Column time period */
145 int ct;
146 /* Column processing period */
147 int cp;
148 /* Row processing period */
149 int rp;
150 int cycles;
151 int err;
152 int rowexp;
153 u16 data[2];
154
155 err = stv06xx_read_sensor(sd, HDCS_ROWEXPL, &data[0]);
156 if (err < 0)
157 return err;
158
159 err = stv06xx_read_sensor(sd, HDCS_ROWEXPH, &data[1]);
160 if (err < 0)
161 return err;
162
163 rowexp = (data[1] << 8) | data[0];
164
165 ct = hdcs->exp.cto + hdcs->psmp + (HDCS_ADC_START_SIG_DUR + 2);
166 cp = hdcs->exp.cto + (hdcs->w * ct / 2);
167 rp = hdcs->exp.rs + cp;
168
169 cycles = rp * rowexp;
170 *val = cycles / HDCS_CLK_FREQ_MHZ;
171 PDEBUG(D_V4L2, "Read exposure %d", *val);
172 return 0;
173}
174
175static int hdcs_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
176{
177 struct sd *sd = (struct sd *) gspca_dev;
178 struct hdcs *hdcs = sd->sensor_priv;
179 int rowexp, srowexp;
180 int max_srowexp;
181 /* Column time period */
182 int ct;
183 /* Column processing period */
184 int cp;
185 /* Row processing period */
186 int rp;
187 /* Minimum number of column timing periods
188 within the column processing period */
189 int mnct;
190 int cycles, err;
191 u8 exp[4];
192
193 cycles = val * HDCS_CLK_FREQ_MHZ;
194
195 ct = hdcs->exp.cto + hdcs->psmp + (HDCS_ADC_START_SIG_DUR + 2);
196 cp = hdcs->exp.cto + (hdcs->w * ct / 2);
197
198 /* the cycles one row takes */
199 rp = hdcs->exp.rs + cp;
200
201 rowexp = cycles / rp;
202
203 /* the remaining cycles */
204 cycles -= rowexp * rp;
205
206 /* calculate sub-row exposure */
207 if (IS_1020(sd)) {
208 /* see HDCS-1020 datasheet 3.5.6.4, p. 63 */
209 srowexp = hdcs->w - (cycles + hdcs->exp.er + 13) / ct;
210
211 mnct = (hdcs->exp.er + 12 + ct - 1) / ct;
212 max_srowexp = hdcs->w - mnct;
213 } else {
214 /* see HDCS-1000 datasheet 3.4.5.5, p. 61 */
215 srowexp = cp - hdcs->exp.er - 6 - cycles;
216
217 mnct = (hdcs->exp.er + 5 + ct - 1) / ct;
218 max_srowexp = cp - mnct * ct - 1;
219 }
220
221 if (srowexp < 0)
222 srowexp = 0;
223 else if (srowexp > max_srowexp)
224 srowexp = max_srowexp;
225
226 if (IS_1020(sd)) {
227 exp[0] = rowexp & 0xff;
228 exp[1] = rowexp >> 8;
229 exp[2] = (srowexp >> 2) & 0xff;
230 /* this clears exposure error flag */
231 exp[3] = 0x1;
232 err = hdcs_reg_write_seq(sd, HDCS_ROWEXPL, exp, 4);
233 } else {
234 exp[0] = rowexp & 0xff;
235 exp[1] = rowexp >> 8;
236 exp[2] = srowexp & 0xff;
237 exp[3] = srowexp >> 8;
238 err = hdcs_reg_write_seq(sd, HDCS_ROWEXPL, exp, 4);
239 if (err < 0)
240 return err;
241
242 /* clear exposure error flag */
243 err = stv06xx_write_sensor(sd,
244 HDCS_STATUS, BIT(4));
245 }
246 PDEBUG(D_V4L2, "Writing exposure %d, rowexp %d, srowexp %d",
247 val, rowexp, srowexp);
248 return err;
249}
250
251static int hdcs_set_gains(struct sd *sd, u8 r, u8 g, u8 b)
252{
253 u8 gains[4];
254
255 /* the voltage gain Av = (1 + 19 * val / 127) * (1 + bit7) */
256 if (r > 127)
257 r = 0x80 | (r / 2);
258 if (g > 127)
259 g = 0x80 | (g / 2);
260 if (b > 127)
261 b = 0x80 | (b / 2);
262
263 gains[0] = g;
264 gains[1] = r;
265 gains[2] = b;
266 gains[3] = g;
267
268 return hdcs_reg_write_seq(sd, HDCS_ERECPGA, gains, 4);
269}
270
271static int hdcs_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
272{
273 struct sd *sd = (struct sd *) gspca_dev;
274 int err;
275 u16 data;
276
277 err = stv06xx_read_sensor(sd, HDCS_ERECPGA, &data);
278
279 /* Bit 7 doubles the gain */
280 if (data & 0x80)
281 *val = (data & 0x7f) * 2;
282 else
283 *val = data;
284
285 PDEBUG(D_V4L2, "Read gain %d", *val);
286 return err;
287}
288
289static int hdcs_set_gain(struct gspca_dev *gspca_dev, __s32 val)
290{
291 PDEBUG(D_V4L2, "Writing gain %d", val);
292 return hdcs_set_gains((struct sd *) gspca_dev,
293 val & 0xff, val & 0xff, val & 0xff);
294}
295
296static int hdcs_set_size(struct sd *sd,
297 unsigned int width, unsigned int height)
298{
299 struct hdcs *hdcs = sd->sensor_priv;
300 u8 win[4];
301 unsigned int x, y;
302 int err;
303
304 /* must be multiple of 4 */
305 width = (width + 3) & ~0x3;
306 height = (height + 3) & ~0x3;
307
308 if (width > hdcs->array.width)
309 width = hdcs->array.width;
310
311 if (IS_1020(sd)) {
312 /* the borders are also invalid */
313 if (height + 2 * hdcs->array.border + HDCS_1020_BOTTOM_Y_SKIP
314 > hdcs->array.height)
315 height = hdcs->array.height - 2 * hdcs->array.border -
316 HDCS_1020_BOTTOM_Y_SKIP;
317
318 y = (hdcs->array.height - HDCS_1020_BOTTOM_Y_SKIP - height) / 2
319 + hdcs->array.top;
320 } else {
321 if (height > hdcs->array.height)
322 height = hdcs->array.height;
323
324 y = hdcs->array.top + (hdcs->array.height - height) / 2;
325 }
326
327 x = hdcs->array.left + (hdcs->array.width - width) / 2;
328
329 win[0] = y / 4;
330 win[1] = x / 4;
331 win[2] = (y + height) / 4 - 1;
332 win[3] = (x + width) / 4 - 1;
333
334 err = hdcs_reg_write_seq(sd, HDCS_FWROW, win, 4);
335 if (err < 0)
336 return err;
337
338 /* Update the current width and height */
339 hdcs->w = width;
340 hdcs->h = height;
341 return err;
342}
343
344static int hdcs_probe_1x00(struct sd *sd)
345{
346 struct hdcs *hdcs;
347 u16 sensor;
348 int ret;
349
350 ret = stv06xx_read_sensor(sd, HDCS_IDENT, &sensor);
351 if (ret < 0 || sensor != 0x08)
352 return -ENODEV;
353
354 info("HDCS-1000/1100 sensor detected");
355
356 sd->gspca_dev.cam.cam_mode = stv06xx_sensor_hdcs1x00.modes;
357 sd->gspca_dev.cam.nmodes = stv06xx_sensor_hdcs1x00.nmodes;
358 sd->desc.ctrls = stv06xx_sensor_hdcs1x00.ctrls;
359 sd->desc.nctrls = stv06xx_sensor_hdcs1x00.nctrls;
360
361 hdcs = kmalloc(sizeof(struct hdcs), GFP_KERNEL);
362 if (!hdcs)
363 return -ENOMEM;
364
365 hdcs->array.left = 8;
366 hdcs->array.top = 8;
367 hdcs->array.width = HDCS_1X00_DEF_WIDTH;
368 hdcs->array.height = HDCS_1X00_DEF_HEIGHT;
369 hdcs->array.border = 4;
370
371 hdcs->exp.cto = 4;
372 hdcs->exp.cpo = 2;
373 hdcs->exp.rs = 186;
374 hdcs->exp.er = 100;
375
376 /*
377 * Frame rate on HDCS-1000 0x46D:0x840 depends on PSMP:
378 * 4 = doesn't work at all
379 * 5 = 7.8 fps,
380 * 6 = 6.9 fps,
381 * 8 = 6.3 fps,
382 * 10 = 5.5 fps,
383 * 15 = 4.4 fps,
384 * 31 = 2.8 fps
385 *
386 * Frame rate on HDCS-1000 0x46D:0x870 depends on PSMP:
387 * 15 = doesn't work at all
388 * 18 = doesn't work at all
389 * 19 = 7.3 fps
390 * 20 = 7.4 fps
391 * 21 = 7.4 fps
392 * 22 = 7.4 fps
393 * 24 = 6.3 fps
394 * 30 = 5.4 fps
395 */
396 hdcs->psmp = IS_870(sd) ? 20 : 5;
397
398 sd->sensor_priv = hdcs;
399
400 return 0;
401}
402
403static int hdcs_probe_1020(struct sd *sd)
404{
405 struct hdcs *hdcs;
406 u16 sensor;
407 int ret;
408
409 ret = stv06xx_read_sensor(sd, HDCS_IDENT, &sensor);
410 if (ret < 0 || sensor != 0x10)
411 return -ENODEV;
412
413 info("HDCS-1020 sensor detected");
414
415 sd->gspca_dev.cam.cam_mode = stv06xx_sensor_hdcs1020.modes;
416 sd->gspca_dev.cam.nmodes = stv06xx_sensor_hdcs1020.nmodes;
417 sd->desc.ctrls = stv06xx_sensor_hdcs1020.ctrls;
418 sd->desc.nctrls = stv06xx_sensor_hdcs1020.nctrls;
419
420 hdcs = kmalloc(sizeof(struct hdcs), GFP_KERNEL);
421 if (!hdcs)
422 return -ENOMEM;
423
424 /*
425 * From Andrey's test image: looks like HDCS-1020 upper-left
426 * visible pixel is at 24,8 (y maybe even smaller?) and lower-right
427 * visible pixel at 375,299 (x maybe even larger?)
428 */
429 hdcs->array.left = 24;
430 hdcs->array.top = 4;
431 hdcs->array.width = HDCS_1020_DEF_WIDTH;
432 hdcs->array.height = 304;
433 hdcs->array.border = 4;
434
435 hdcs->psmp = 6;
436
437 hdcs->exp.cto = 3;
438 hdcs->exp.cpo = 3;
439 hdcs->exp.rs = 155;
440 hdcs->exp.er = 96;
441
442 sd->sensor_priv = hdcs;
443
444 return 0;
445}
446
447static int hdcs_start(struct sd *sd)
448{
449 PDEBUG(D_STREAM, "Starting stream");
450
451 return hdcs_set_state(sd, HDCS_STATE_RUN);
452}
453
454static int hdcs_stop(struct sd *sd)
455{
456 PDEBUG(D_STREAM, "Halting stream");
457
458 return hdcs_set_state(sd, HDCS_STATE_SLEEP);
459}
460
461static void hdcs_disconnect(struct sd *sd)
462{
463 PDEBUG(D_PROBE, "Disconnecting the sensor");
464 kfree(sd->sensor_priv);
465}
466
467static int hdcs_init(struct sd *sd)
468{
469 struct hdcs *hdcs = sd->sensor_priv;
470 int i, err = 0;
471
472 /* Set the STV0602AA in STV0600 emulation mode */
473 if (IS_870(sd))
474 stv06xx_write_bridge(sd, STV_STV0600_EMULATION, 1);
475
476 /* Execute the bridge init */
477 for (i = 0; i < ARRAY_SIZE(stv_bridge_init) && !err; i++) {
478 err = stv06xx_write_bridge(sd, stv_bridge_init[i][0],
479 stv_bridge_init[i][1]);
480 }
481 if (err < 0)
482 return err;
483
484 /* sensor soft reset */
485 hdcs_reset(sd);
486
487 /* Execute the sensor init */
488 for (i = 0; i < ARRAY_SIZE(stv_sensor_init) && !err; i++) {
489 err = stv06xx_write_sensor(sd, stv_sensor_init[i][0],
490 stv_sensor_init[i][1]);
491 }
492 if (err < 0)
493 return err;
494
495 /* Enable continous frame capture, bit 2: stop when frame complete */
496 err = stv06xx_write_sensor(sd, HDCS_REG_CONFIG(sd), BIT(3));
497 if (err < 0)
498 return err;
499
500 /* Set PGA sample duration
501 (was 0x7E for IS_870, but caused slow framerate with HDCS-1020) */
502 if (IS_1020(sd))
503 err = stv06xx_write_sensor(sd, HDCS_TCTRL,
504 (HDCS_ADC_START_SIG_DUR << 6) | hdcs->psmp);
505 else
506 err = stv06xx_write_sensor(sd, HDCS_TCTRL,
507 (HDCS_ADC_START_SIG_DUR << 5) | hdcs->psmp);
508 if (err < 0)
509 return err;
510
511 err = hdcs_set_gains(sd, HDCS_DEFAULT_GAIN, HDCS_DEFAULT_GAIN,
512 HDCS_DEFAULT_GAIN);
513 if (err < 0)
514 return err;
515
516 err = hdcs_set_exposure(&sd->gspca_dev, HDCS_DEFAULT_EXPOSURE);
517 if (err < 0)
518 return err;
519
520 err = hdcs_set_size(sd, hdcs->array.width, hdcs->array.height);
521 return err;
522}
523
524static int hdcs_dump(struct sd *sd)
525{
526 u16 reg, val;
527
528 info("Dumping sensor registers:");
529
530 for (reg = HDCS_IDENT; reg <= HDCS_ROWEXPH; reg++) {
531 stv06xx_read_sensor(sd, reg, &val);
532 info("reg 0x%02x = 0x%02x", reg, val);
533 }
534 return 0;
535}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
new file mode 100644
index 000000000000..9c7279a4cd88
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
@@ -0,0 +1,263 @@
1/*
2 * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
3 * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
4 * Copyright (c) 2002, 2003 Tuukka Toivonen
5 * Copyright (c) 2008 Erik Andrén
6 * Copyright (c) 2008 Chia-I Wu
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 * P/N 861037: Sensor HDCS1000 ASIC STV0600
23 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
24 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
25 * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
26 * P/N 861075-0040: Sensor HDCS1000 ASIC
27 * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
28 * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
29 */
30
31#ifndef STV06XX_HDCS_H_
32#define STV06XX_HDCS_H_
33
34#include "stv06xx_sensor.h"
35
36#define HDCS_REG_CONFIG(sd) (IS_1020(sd) ? HDCS20_CONFIG : HDCS00_CONFIG)
37#define HDCS_REG_CONTROL(sd) (IS_1020(sd) ? HDCS20_CONTROL : HDCS00_CONTROL)
38
39#define HDCS_1X00_DEF_WIDTH 360
40#define HDCS_1X00_DEF_HEIGHT 296
41
42#define HDCS_1020_DEF_WIDTH 352
43#define HDCS_1020_DEF_HEIGHT 292
44
45#define HDCS_1020_BOTTOM_Y_SKIP 4
46
47#define HDCS_CLK_FREQ_MHZ 25
48
49#define HDCS_ADC_START_SIG_DUR 3
50
51/* LSB bit of I2C or register address signifies write (0) or read (1) */
52/* I2C Registers common for both HDCS-1000/1100 and HDCS-1020 */
53/* Identifications Register */
54#define HDCS_IDENT (0x00 << 1)
55/* Status Register */
56#define HDCS_STATUS (0x01 << 1)
57/* Interrupt Mask Register */
58#define HDCS_IMASK (0x02 << 1)
59/* Pad Control Register */
60#define HDCS_PCTRL (0x03 << 1)
61/* Pad Drive Control Register */
62#define HDCS_PDRV (0x04 << 1)
63/* Interface Control Register */
64#define HDCS_ICTRL (0x05 << 1)
65/* Interface Timing Register */
66#define HDCS_ITMG (0x06 << 1)
67/* Baud Fraction Register */
68#define HDCS_BFRAC (0x07 << 1)
69/* Baud Rate Register */
70#define HDCS_BRATE (0x08 << 1)
71/* ADC Control Register */
72#define HDCS_ADCCTRL (0x09 << 1)
73/* First Window Row Register */
74#define HDCS_FWROW (0x0a << 1)
75/* First Window Column Register */
76#define HDCS_FWCOL (0x0b << 1)
77/* Last Window Row Register */
78#define HDCS_LWROW (0x0c << 1)
79/* Last Window Column Register */
80#define HDCS_LWCOL (0x0d << 1)
81/* Timing Control Register */
82#define HDCS_TCTRL (0x0e << 1)
83/* PGA Gain Register: Even Row, Even Column */
84#define HDCS_ERECPGA (0x0f << 1)
85/* PGA Gain Register: Even Row, Odd Column */
86#define HDCS_EROCPGA (0x10 << 1)
87/* PGA Gain Register: Odd Row, Even Column */
88#define HDCS_ORECPGA (0x11 << 1)
89/* PGA Gain Register: Odd Row, Odd Column */
90#define HDCS_OROCPGA (0x12 << 1)
91/* Row Exposure Low Register */
92#define HDCS_ROWEXPL (0x13 << 1)
93/* Row Exposure High Register */
94#define HDCS_ROWEXPH (0x14 << 1)
95
96/* I2C Registers only for HDCS-1000/1100 */
97/* Sub-Row Exposure Low Register */
98#define HDCS00_SROWEXPL (0x15 << 1)
99/* Sub-Row Exposure High Register */
100#define HDCS00_SROWEXPH (0x16 << 1)
101/* Configuration Register */
102#define HDCS00_CONFIG (0x17 << 1)
103/* Control Register */
104#define HDCS00_CONTROL (0x18 << 1)
105
106/* I2C Registers only for HDCS-1020 */
107/* Sub-Row Exposure Register */
108#define HDCS20_SROWEXP (0x15 << 1)
109/* Error Control Register */
110#define HDCS20_ERROR (0x16 << 1)
111/* Interface Timing 2 Register */
112#define HDCS20_ITMG2 (0x17 << 1)
113/* Interface Control 2 Register */
114#define HDCS20_ICTRL2 (0x18 << 1)
115/* Horizontal Blank Register */
116#define HDCS20_HBLANK (0x19 << 1)
117/* Vertical Blank Register */
118#define HDCS20_VBLANK (0x1a << 1)
119/* Configuration Register */
120#define HDCS20_CONFIG (0x1b << 1)
121/* Control Register */
122#define HDCS20_CONTROL (0x1c << 1)
123
124#define HDCS_RUN_ENABLE (1 << 2)
125#define HDCS_SLEEP_MODE (1 << 1)
126
127#define HDCS_DEFAULT_EXPOSURE 5000
128#define HDCS_DEFAULT_GAIN 128
129
130static int hdcs_probe_1x00(struct sd *sd);
131static int hdcs_probe_1020(struct sd *sd);
132static int hdcs_start(struct sd *sd);
133static int hdcs_init(struct sd *sd);
134static int hdcs_stop(struct sd *sd);
135static int hdcs_dump(struct sd *sd);
136static void hdcs_disconnect(struct sd *sd);
137
138static int hdcs_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
139static int hdcs_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
140static int hdcs_set_gain(struct gspca_dev *gspca_dev, __s32 val);
141static int hdcs_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
142
143const struct stv06xx_sensor stv06xx_sensor_hdcs1x00 = {
144 .name = "HP HDCS-1000/1100",
145 .i2c_flush = 0,
146 .i2c_addr = (0x55 << 1),
147 .i2c_len = 1,
148
149 .init = hdcs_init,
150 .probe = hdcs_probe_1x00,
151 .start = hdcs_start,
152 .stop = hdcs_stop,
153 .disconnect = hdcs_disconnect,
154 .dump = hdcs_dump,
155
156 .nctrls = 2,
157 .ctrls = {
158 {
159 {
160 .id = V4L2_CID_EXPOSURE,
161 .type = V4L2_CTRL_TYPE_INTEGER,
162 .name = "exposure",
163 .minimum = 0x00,
164 .maximum = 0xffff,
165 .step = 0x1,
166 .default_value = HDCS_DEFAULT_EXPOSURE,
167 .flags = V4L2_CTRL_FLAG_SLIDER
168 },
169 .set = hdcs_set_exposure,
170 .get = hdcs_get_exposure
171 },
172 {
173 {
174 .id = V4L2_CID_GAIN,
175 .type = V4L2_CTRL_TYPE_INTEGER,
176 .name = "gain",
177 .minimum = 0x00,
178 .maximum = 0xff,
179 .step = 0x1,
180 .default_value = HDCS_DEFAULT_GAIN,
181 .flags = V4L2_CTRL_FLAG_SLIDER
182 },
183 .set = hdcs_set_gain,
184 .get = hdcs_get_gain
185 }
186 },
187
188 .nmodes = 1,
189 .modes = {
190 {
191 HDCS_1X00_DEF_WIDTH,
192 HDCS_1X00_DEF_HEIGHT,
193 V4L2_PIX_FMT_SBGGR8,
194 V4L2_FIELD_NONE,
195 .sizeimage =
196 HDCS_1X00_DEF_WIDTH * HDCS_1X00_DEF_HEIGHT,
197 .bytesperline = HDCS_1X00_DEF_WIDTH,
198 .colorspace = V4L2_COLORSPACE_SRGB,
199 .priv = 1
200 }
201 }
202};
203
204const struct stv06xx_sensor stv06xx_sensor_hdcs1020 = {
205 .name = "HDCS-1020",
206 .i2c_flush = 0,
207 .i2c_addr = (0x55 << 1),
208 .i2c_len = 1,
209
210 .nctrls = 0,
211 .ctrls = {},
212
213 .init = hdcs_init,
214 .probe = hdcs_probe_1020,
215 .start = hdcs_start,
216 .stop = hdcs_stop,
217 .dump = hdcs_dump,
218
219 .nmodes = 1,
220 .modes = {
221 {
222 HDCS_1020_DEF_WIDTH,
223 HDCS_1020_DEF_HEIGHT,
224 V4L2_PIX_FMT_SBGGR8,
225 V4L2_FIELD_NONE,
226 .sizeimage =
227 HDCS_1020_DEF_WIDTH * HDCS_1020_DEF_HEIGHT,
228 .bytesperline = HDCS_1020_DEF_WIDTH,
229 .colorspace = V4L2_COLORSPACE_SRGB,
230 .priv = 1
231 }
232 }
233};
234
235static const u16 stv_bridge_init[][2] = {
236 {STV_ISO_ENABLE, 0},
237 {STV_REG23, 0},
238 {STV_REG00, 0x1d},
239 {STV_REG01, 0xb5},
240 {STV_REG02, 0xa8},
241 {STV_REG03, 0x95},
242 {STV_REG04, 0x07},
243
244 {STV_SCAN_RATE, 0x20},
245 {STV_ISO_SIZE_L, 847},
246 {STV_Y_CTRL, 0x01},
247 {STV_X_CTRL, 0x0a}
248};
249
250static const u8 stv_sensor_init[][2] = {
251 /* Clear status (writing 1 will clear the corresponding status bit) */
252 {HDCS_STATUS, BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(1)},
253 /* Disable all interrupts */
254 {HDCS_IMASK, 0x00},
255 {HDCS_PCTRL, BIT(6) | BIT(5) | BIT(1) | BIT(0)},
256 {HDCS_PDRV, 0x00},
257 {HDCS_ICTRL, BIT(5)},
258 {HDCS_ITMG, BIT(4) | BIT(1)},
259 /* ADC output resolution to 10 bits */
260 {HDCS_ADCCTRL, 10}
261};
262
263#endif
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c
new file mode 100644
index 000000000000..d0a0f8596454
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c
@@ -0,0 +1,430 @@
1/*
2 * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
3 * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
4 * Copyright (c) 2002, 2003 Tuukka Toivonen
5 * Copyright (c) 2008 Erik Andrén
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * P/N 861037: Sensor HDCS1000 ASIC STV0600
22 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
23 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
24 * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
25 * P/N 861075-0040: Sensor HDCS1000 ASIC
26 * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
27 * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
28 */
29
30/*
31 * The spec file for the PB-0100 suggests the following for best quality
32 * images after the sensor has been reset :
33 *
34 * PB_ADCGAINL = R60 = 0x03 (3 dec) : sets low reference of ADC
35 to produce good black level
36 * PB_PREADCTRL = R32 = 0x1400 (5120 dec) : Enables global gain changes
37 through R53
38 * PB_ADCMINGAIN = R52 = 0x10 (16 dec) : Sets the minimum gain for
39 auto-exposure
40 * PB_ADCGLOBALGAIN = R53 = 0x10 (16 dec) : Sets the global gain
41 * PB_EXPGAIN = R14 = 0x11 (17 dec) : Sets the auto-exposure value
42 * PB_UPDATEINT = R23 = 0x02 (2 dec) : Sets the speed on
43 auto-exposure routine
44 * PB_CFILLIN = R5 = 0x0E (14 dec) : Sets the frame rate
45 */
46
47#include "stv06xx_pb0100.h"
48
49static int pb0100_probe(struct sd *sd)
50{
51 u16 sensor;
52 int i, err;
53 s32 *sensor_settings;
54
55 err = stv06xx_read_sensor(sd, PB_IDENT, &sensor);
56
57 if (err < 0)
58 return -ENODEV;
59
60 if ((sensor >> 8) == 0x64) {
61 sensor_settings = kmalloc(
62 stv06xx_sensor_pb0100.nctrls * sizeof(s32),
63 GFP_KERNEL);
64 if (!sensor_settings)
65 return -ENOMEM;
66
67 info("Photobit pb0100 sensor detected");
68
69 sd->gspca_dev.cam.cam_mode = stv06xx_sensor_pb0100.modes;
70 sd->gspca_dev.cam.nmodes = stv06xx_sensor_pb0100.nmodes;
71 sd->desc.ctrls = stv06xx_sensor_pb0100.ctrls;
72 sd->desc.nctrls = stv06xx_sensor_pb0100.nctrls;
73 for (i = 0; i < stv06xx_sensor_pb0100.nctrls; i++)
74 sensor_settings[i] = stv06xx_sensor_pb0100.
75 ctrls[i].qctrl.default_value;
76 sd->sensor_priv = sensor_settings;
77
78 return 0;
79 }
80
81 return -ENODEV;
82}
83
84static int pb0100_start(struct sd *sd)
85{
86 int err;
87 struct cam *cam = &sd->gspca_dev.cam;
88 s32 *sensor_settings = sd->sensor_priv;
89 u32 mode = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
90
91 /* Setup sensor window */
92 if (mode & PB0100_CROP_TO_VGA) {
93 stv06xx_write_sensor(sd, PB_RSTART, 30);
94 stv06xx_write_sensor(sd, PB_CSTART, 20);
95 stv06xx_write_sensor(sd, PB_RWSIZE, 240 - 1);
96 stv06xx_write_sensor(sd, PB_CWSIZE, 320 - 1);
97 } else {
98 stv06xx_write_sensor(sd, PB_RSTART, 8);
99 stv06xx_write_sensor(sd, PB_CSTART, 4);
100 stv06xx_write_sensor(sd, PB_RWSIZE, 288 - 1);
101 stv06xx_write_sensor(sd, PB_CWSIZE, 352 - 1);
102 }
103
104 if (mode & PB0100_SUBSAMPLE) {
105 stv06xx_write_bridge(sd, STV_Y_CTRL, 0x02); /* Wrong, FIXME */
106 stv06xx_write_bridge(sd, STV_X_CTRL, 0x06);
107
108 stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x10);
109 } else {
110 stv06xx_write_bridge(sd, STV_Y_CTRL, 0x01);
111 stv06xx_write_bridge(sd, STV_X_CTRL, 0x0a);
112 /* larger -> slower */
113 stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x20);
114 }
115
116 /* set_gain also sets red and blue balance */
117 pb0100_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
118 pb0100_set_exposure(&sd->gspca_dev, sensor_settings[EXPOSURE_IDX]);
119 pb0100_set_autogain_target(&sd->gspca_dev,
120 sensor_settings[AUTOGAIN_TARGET_IDX]);
121 pb0100_set_autogain(&sd->gspca_dev, sensor_settings[AUTOGAIN_IDX]);
122
123 err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3)|BIT(1));
124 PDEBUG(D_STREAM, "Started stream, status: %d", err);
125
126 return (err < 0) ? err : 0;
127}
128
129static int pb0100_stop(struct sd *sd)
130{
131 int err;
132
133 err = stv06xx_write_sensor(sd, PB_ABORTFRAME, 1);
134
135 if (err < 0)
136 goto out;
137
138 /* Set bit 1 to zero */
139 err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3));
140
141 PDEBUG(D_STREAM, "Halting stream");
142out:
143 return (err < 0) ? err : 0;
144}
145
146/* FIXME: Sort the init commands out and put them into tables,
147 this is only for getting the camera to work */
148/* FIXME: No error handling for now,
149 add this once the init has been converted to proper tables */
150static int pb0100_init(struct sd *sd)
151{
152 stv06xx_write_bridge(sd, STV_REG00, 1);
153 stv06xx_write_bridge(sd, STV_SCAN_RATE, 0);
154
155 /* Reset sensor */
156 stv06xx_write_sensor(sd, PB_RESET, 1);
157 stv06xx_write_sensor(sd, PB_RESET, 0);
158
159 /* Disable chip */
160 stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3));
161
162 /* Gain stuff...*/
163 stv06xx_write_sensor(sd, PB_PREADCTRL, BIT(12)|BIT(10)|BIT(6));
164 stv06xx_write_sensor(sd, PB_ADCGLOBALGAIN, 12);
165
166 /* Set up auto-exposure */
167 /* ADC VREF_HI new setting for a transition
168 from the Expose1 to the Expose2 setting */
169 stv06xx_write_sensor(sd, PB_R28, 12);
170 /* gain max for autoexposure */
171 stv06xx_write_sensor(sd, PB_ADCMAXGAIN, 180);
172 /* gain min for autoexposure */
173 stv06xx_write_sensor(sd, PB_ADCMINGAIN, 12);
174 /* Maximum frame integration time (programmed into R8)
175 allowed for auto-exposure routine */
176 stv06xx_write_sensor(sd, PB_R54, 3);
177 /* Minimum frame integration time (programmed into R8)
178 allowed for auto-exposure routine */
179 stv06xx_write_sensor(sd, PB_R55, 0);
180 stv06xx_write_sensor(sd, PB_UPDATEINT, 1);
181 /* R15 Expose0 (maximum that auto-exposure may use) */
182 stv06xx_write_sensor(sd, PB_R15, 800);
183 /* R17 Expose2 (minimum that auto-exposure may use) */
184 stv06xx_write_sensor(sd, PB_R17, 10);
185
186 stv06xx_write_sensor(sd, PB_EXPGAIN, 0);
187
188 /* 0x14 */
189 stv06xx_write_sensor(sd, PB_VOFFSET, 0);
190 /* 0x0D */
191 stv06xx_write_sensor(sd, PB_ADCGAINH, 11);
192 /* Set black level (important!) */
193 stv06xx_write_sensor(sd, PB_ADCGAINL, 0);
194
195 /* ??? */
196 stv06xx_write_bridge(sd, STV_REG00, 0x11);
197 stv06xx_write_bridge(sd, STV_REG03, 0x45);
198 stv06xx_write_bridge(sd, STV_REG04, 0x07);
199
200 /* ISO-Size (0x27b: 635... why? - HDCS uses 847) */
201 stv06xx_write_bridge(sd, STV_ISO_SIZE_L, 847);
202
203 /* Scan/timing for the sensor */
204 stv06xx_write_sensor(sd, PB_ROWSPEED, BIT(4)|BIT(3)|BIT(1));
205 stv06xx_write_sensor(sd, PB_CFILLIN, 14);
206 stv06xx_write_sensor(sd, PB_VBL, 0);
207 stv06xx_write_sensor(sd, PB_FINTTIME, 0);
208 stv06xx_write_sensor(sd, PB_RINTTIME, 123);
209
210 stv06xx_write_bridge(sd, STV_REG01, 0xc2);
211 stv06xx_write_bridge(sd, STV_REG02, 0xb0);
212 return 0;
213}
214
215static int pb0100_dump(struct sd *sd)
216{
217 return 0;
218}
219
220static int pb0100_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
221{
222 struct sd *sd = (struct sd *) gspca_dev;
223 s32 *sensor_settings = sd->sensor_priv;
224
225 *val = sensor_settings[GAIN_IDX];
226
227 return 0;
228}
229
230static int pb0100_set_gain(struct gspca_dev *gspca_dev, __s32 val)
231{
232 int err;
233 struct sd *sd = (struct sd *) gspca_dev;
234 s32 *sensor_settings = sd->sensor_priv;
235
236 if (sensor_settings[AUTOGAIN_IDX])
237 return -EBUSY;
238
239 sensor_settings[GAIN_IDX] = val;
240 err = stv06xx_write_sensor(sd, PB_G1GAIN, val);
241 if (!err)
242 err = stv06xx_write_sensor(sd, PB_G2GAIN, val);
243 PDEBUG(D_V4L2, "Set green gain to %d, status: %d", val, err);
244
245 if (!err)
246 err = pb0100_set_red_balance(gspca_dev,
247 sensor_settings[RED_BALANCE_IDX]);
248 if (!err)
249 err = pb0100_set_blue_balance(gspca_dev,
250 sensor_settings[BLUE_BALANCE_IDX]);
251
252 return err;
253}
254
255static int pb0100_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
256{
257 struct sd *sd = (struct sd *) gspca_dev;
258 s32 *sensor_settings = sd->sensor_priv;
259
260 *val = sensor_settings[RED_BALANCE_IDX];
261
262 return 0;
263}
264
265static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
266{
267 int err;
268 struct sd *sd = (struct sd *) gspca_dev;
269 s32 *sensor_settings = sd->sensor_priv;
270
271 if (sensor_settings[AUTOGAIN_IDX])
272 return -EBUSY;
273
274 sensor_settings[RED_BALANCE_IDX] = val;
275 val += sensor_settings[GAIN_IDX];
276 if (val < 0)
277 val = 0;
278 else if (val > 255)
279 val = 255;
280
281 err = stv06xx_write_sensor(sd, PB_RGAIN, val);
282 PDEBUG(D_V4L2, "Set red gain to %d, status: %d", val, err);
283
284 return err;
285}
286
287static int pb0100_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
288{
289 struct sd *sd = (struct sd *) gspca_dev;
290 s32 *sensor_settings = sd->sensor_priv;
291
292 *val = sensor_settings[BLUE_BALANCE_IDX];
293
294 return 0;
295}
296
297static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
298{
299 int err;
300 struct sd *sd = (struct sd *) gspca_dev;
301 s32 *sensor_settings = sd->sensor_priv;
302
303 if (sensor_settings[AUTOGAIN_IDX])
304 return -EBUSY;
305
306 sensor_settings[BLUE_BALANCE_IDX] = val;
307 val += sensor_settings[GAIN_IDX];
308 if (val < 0)
309 val = 0;
310 else if (val > 255)
311 val = 255;
312
313 err = stv06xx_write_sensor(sd, PB_BGAIN, val);
314 PDEBUG(D_V4L2, "Set blue gain to %d, status: %d", val, err);
315
316 return err;
317}
318
319static int pb0100_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
320{
321 struct sd *sd = (struct sd *) gspca_dev;
322 s32 *sensor_settings = sd->sensor_priv;
323
324 *val = sensor_settings[EXPOSURE_IDX];
325
326 return 0;
327}
328
329static int pb0100_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
330{
331 int err;
332 struct sd *sd = (struct sd *) gspca_dev;
333 s32 *sensor_settings = sd->sensor_priv;
334
335 if (sensor_settings[AUTOGAIN_IDX])
336 return -EBUSY;
337
338 sensor_settings[EXPOSURE_IDX] = val;
339 err = stv06xx_write_sensor(sd, PB_RINTTIME, val);
340 PDEBUG(D_V4L2, "Set exposure to %d, status: %d", val, err);
341
342 return err;
343}
344
345static int pb0100_get_autogain(struct gspca_dev *gspca_dev, __s32 *val)
346{
347 struct sd *sd = (struct sd *) gspca_dev;
348 s32 *sensor_settings = sd->sensor_priv;
349
350 *val = sensor_settings[AUTOGAIN_IDX];
351
352 return 0;
353}
354
355static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val)
356{
357 int err;
358 struct sd *sd = (struct sd *) gspca_dev;
359 s32 *sensor_settings = sd->sensor_priv;
360
361 sensor_settings[AUTOGAIN_IDX] = val;
362 if (sensor_settings[AUTOGAIN_IDX]) {
363 if (sensor_settings[NATURAL_IDX])
364 val = BIT(6)|BIT(4)|BIT(0);
365 else
366 val = BIT(4)|BIT(0);
367 } else
368 val = 0;
369
370 err = stv06xx_write_sensor(sd, PB_EXPGAIN, val);
371 PDEBUG(D_V4L2, "Set autogain to %d (natural: %d), status: %d",
372 sensor_settings[AUTOGAIN_IDX], sensor_settings[NATURAL_IDX],
373 err);
374
375 return err;
376}
377
378static int pb0100_get_autogain_target(struct gspca_dev *gspca_dev, __s32 *val)
379{
380 struct sd *sd = (struct sd *) gspca_dev;
381 s32 *sensor_settings = sd->sensor_priv;
382
383 *val = sensor_settings[AUTOGAIN_TARGET_IDX];
384
385 return 0;
386}
387
388static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
389{
390 int err, totalpixels, brightpixels, darkpixels;
391 struct sd *sd = (struct sd *) gspca_dev;
392 s32 *sensor_settings = sd->sensor_priv;
393
394 sensor_settings[AUTOGAIN_TARGET_IDX] = val;
395
396 /* Number of pixels counted by the sensor when subsampling the pixels.
397 * Slightly larger than the real value to avoid oscillation */
398 totalpixels = gspca_dev->width * gspca_dev->height;
399 totalpixels = totalpixels/(8*8) + totalpixels/(64*64);
400
401 brightpixels = (totalpixels * val) >> 8;
402 darkpixels = totalpixels - brightpixels;
403 err = stv06xx_write_sensor(sd, PB_R21, brightpixels);
404 if (!err)
405 err = stv06xx_write_sensor(sd, PB_R22, darkpixels);
406
407 PDEBUG(D_V4L2, "Set autogain target to %d, status: %d", val, err);
408
409 return err;
410}
411
412static int pb0100_get_natural(struct gspca_dev *gspca_dev, __s32 *val)
413{
414 struct sd *sd = (struct sd *) gspca_dev;
415 s32 *sensor_settings = sd->sensor_priv;
416
417 *val = sensor_settings[NATURAL_IDX];
418
419 return 0;
420}
421
422static int pb0100_set_natural(struct gspca_dev *gspca_dev, __s32 val)
423{
424 struct sd *sd = (struct sd *) gspca_dev;
425 s32 *sensor_settings = sd->sensor_priv;
426
427 sensor_settings[NATURAL_IDX] = val;
428
429 return pb0100_set_autogain(gspca_dev, sensor_settings[AUTOGAIN_IDX]);
430}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h
new file mode 100644
index 000000000000..5ea21a1154c4
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h
@@ -0,0 +1,275 @@
1/*
2 * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
3 * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
4 * Copyright (c) 2002, 2003 Tuukka Toivonen
5 * Copyright (c) 2008 Erik Andrén
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * P/N 861037: Sensor HDCS1000 ASIC STV0600
22 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
23 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
24 * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
25 * P/N 861075-0040: Sensor HDCS1000 ASIC
26 * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
27 * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
28 */
29
30#ifndef STV06XX_PB0100_H_
31#define STV06XX_PB0100_H_
32
33#include "stv06xx_sensor.h"
34
35/* mode priv field flags */
36#define PB0100_CROP_TO_VGA 0x01
37#define PB0100_SUBSAMPLE 0x02
38
39/* I2C Registers */
40#define PB_IDENT 0x00 /* Chip Version */
41#define PB_RSTART 0x01 /* Row Window Start */
42#define PB_CSTART 0x02 /* Column Window Start */
43#define PB_RWSIZE 0x03 /* Row Window Size */
44#define PB_CWSIZE 0x04 /* Column Window Size */
45#define PB_CFILLIN 0x05 /* Column Fill-In */
46#define PB_VBL 0x06 /* Vertical Blank Count */
47#define PB_CONTROL 0x07 /* Control Mode */
48#define PB_FINTTIME 0x08 /* Integration Time/Frame Unit Count */
49#define PB_RINTTIME 0x09 /* Integration Time/Row Unit Count */
50#define PB_ROWSPEED 0x0a /* Row Speed Control */
51#define PB_ABORTFRAME 0x0b /* Abort Frame */
52#define PB_R12 0x0c /* Reserved */
53#define PB_RESET 0x0d /* Reset */
54#define PB_EXPGAIN 0x0e /* Exposure Gain Command */
55#define PB_R15 0x0f /* Expose0 */
56#define PB_R16 0x10 /* Expose1 */
57#define PB_R17 0x11 /* Expose2 */
58#define PB_R18 0x12 /* Low0_DAC */
59#define PB_R19 0x13 /* Low1_DAC */
60#define PB_R20 0x14 /* Low2_DAC */
61#define PB_R21 0x15 /* Threshold11 */
62#define PB_R22 0x16 /* Threshold0x */
63#define PB_UPDATEINT 0x17 /* Update Interval */
64#define PB_R24 0x18 /* High_DAC */
65#define PB_R25 0x19 /* Trans0H */
66#define PB_R26 0x1a /* Trans1L */
67#define PB_R27 0x1b /* Trans1H */
68#define PB_R28 0x1c /* Trans2L */
69#define PB_R29 0x1d /* Reserved */
70#define PB_R30 0x1e /* Reserved */
71#define PB_R31 0x1f /* Wait to Read */
72#define PB_PREADCTRL 0x20 /* Pixel Read Control Mode */
73#define PB_R33 0x21 /* IREF_VLN */
74#define PB_R34 0x22 /* IREF_VLP */
75#define PB_R35 0x23 /* IREF_VLN_INTEG */
76#define PB_R36 0x24 /* IREF_MASTER */
77#define PB_R37 0x25 /* IDACP */
78#define PB_R38 0x26 /* IDACN */
79#define PB_R39 0x27 /* DAC_Control_Reg */
80#define PB_R40 0x28 /* VCL */
81#define PB_R41 0x29 /* IREF_VLN_ADCIN */
82#define PB_R42 0x2a /* Reserved */
83#define PB_G1GAIN 0x2b /* Green 1 Gain */
84#define PB_BGAIN 0x2c /* Blue Gain */
85#define PB_RGAIN 0x2d /* Red Gain */
86#define PB_G2GAIN 0x2e /* Green 2 Gain */
87#define PB_R47 0x2f /* Dark Row Address */
88#define PB_R48 0x30 /* Dark Row Options */
89#define PB_R49 0x31 /* Reserved */
90#define PB_R50 0x32 /* Image Test Data */
91#define PB_ADCMAXGAIN 0x33 /* Maximum Gain */
92#define PB_ADCMINGAIN 0x34 /* Minimum Gain */
93#define PB_ADCGLOBALGAIN 0x35 /* Global Gain */
94#define PB_R54 0x36 /* Maximum Frame */
95#define PB_R55 0x37 /* Minimum Frame */
96#define PB_R56 0x38 /* Reserved */
97#define PB_VOFFSET 0x39 /* VOFFSET */
98#define PB_R58 0x3a /* Snap-Shot Sequence Trigger */
99#define PB_ADCGAINH 0x3b /* VREF_HI */
100#define PB_ADCGAINL 0x3c /* VREF_LO */
101#define PB_R61 0x3d /* Reserved */
102#define PB_R62 0x3e /* Reserved */
103#define PB_R63 0x3f /* Reserved */
104#define PB_R64 0x40 /* Red/Blue Gain */
105#define PB_R65 0x41 /* Green 2/Green 1 Gain */
106#define PB_R66 0x42 /* VREF_HI/LO */
107#define PB_R67 0x43 /* Integration Time/Row Unit Count */
108#define PB_R240 0xf0 /* ADC Test */
109#define PB_R241 0xf1 /* Chip Enable */
110#define PB_R242 0xf2 /* Reserved */
111
112static int pb0100_probe(struct sd *sd);
113static int pb0100_start(struct sd *sd);
114static int pb0100_init(struct sd *sd);
115static int pb0100_stop(struct sd *sd);
116static int pb0100_dump(struct sd *sd);
117
118/* V4L2 controls supported by the driver */
119static int pb0100_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
120static int pb0100_set_gain(struct gspca_dev *gspca_dev, __s32 val);
121static int pb0100_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
122static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
123static int pb0100_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
124static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
125static int pb0100_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
126static int pb0100_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
127static int pb0100_get_autogain(struct gspca_dev *gspca_dev, __s32 *val);
128static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val);
129static int pb0100_get_autogain_target(struct gspca_dev *gspca_dev, __s32 *val);
130static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val);
131static int pb0100_get_natural(struct gspca_dev *gspca_dev, __s32 *val);
132static int pb0100_set_natural(struct gspca_dev *gspca_dev, __s32 val);
133
134const struct stv06xx_sensor stv06xx_sensor_pb0100 = {
135 .name = "PB-0100",
136 .i2c_flush = 1,
137 .i2c_addr = 0xba,
138 .i2c_len = 2,
139
140 .nctrls = 7,
141 .ctrls = {
142#define GAIN_IDX 0
143 {
144 {
145 .id = V4L2_CID_GAIN,
146 .type = V4L2_CTRL_TYPE_INTEGER,
147 .name = "Gain",
148 .minimum = 0,
149 .maximum = 255,
150 .step = 1,
151 .default_value = 128
152 },
153 .set = pb0100_set_gain,
154 .get = pb0100_get_gain
155 },
156#define RED_BALANCE_IDX 1
157 {
158 {
159 .id = V4L2_CID_RED_BALANCE,
160 .type = V4L2_CTRL_TYPE_INTEGER,
161 .name = "Red Balance",
162 .minimum = -255,
163 .maximum = 255,
164 .step = 1,
165 .default_value = 0
166 },
167 .set = pb0100_set_red_balance,
168 .get = pb0100_get_red_balance
169 },
170#define BLUE_BALANCE_IDX 2
171 {
172 {
173 .id = V4L2_CID_BLUE_BALANCE,
174 .type = V4L2_CTRL_TYPE_INTEGER,
175 .name = "Blue Balance",
176 .minimum = -255,
177 .maximum = 255,
178 .step = 1,
179 .default_value = 0
180 },
181 .set = pb0100_set_blue_balance,
182 .get = pb0100_get_blue_balance
183 },
184#define EXPOSURE_IDX 3
185 {
186 {
187 .id = V4L2_CID_EXPOSURE,
188 .type = V4L2_CTRL_TYPE_INTEGER,
189 .name = "Exposure",
190 .minimum = 0,
191 .maximum = 511,
192 .step = 1,
193 .default_value = 12
194 },
195 .set = pb0100_set_exposure,
196 .get = pb0100_get_exposure
197 },
198#define AUTOGAIN_IDX 4
199 {
200 {
201 .id = V4L2_CID_AUTOGAIN,
202 .type = V4L2_CTRL_TYPE_BOOLEAN,
203 .name = "Automatic Gain and Exposure",
204 .minimum = 0,
205 .maximum = 1,
206 .step = 1,
207 .default_value = 1
208 },
209 .set = pb0100_set_autogain,
210 .get = pb0100_get_autogain
211 },
212#define AUTOGAIN_TARGET_IDX 5
213 {
214 {
215 .id = V4L2_CTRL_CLASS_USER + 0x1000,
216 .type = V4L2_CTRL_TYPE_INTEGER,
217 .name = "Automatic Gain Target",
218 .minimum = 0,
219 .maximum = 255,
220 .step = 1,
221 .default_value = 128
222 },
223 .set = pb0100_set_autogain_target,
224 .get = pb0100_get_autogain_target
225 },
226#define NATURAL_IDX 6
227 {
228 {
229 .id = V4L2_CTRL_CLASS_USER + 0x1001,
230 .type = V4L2_CTRL_TYPE_BOOLEAN,
231 .name = "Natural Light Source",
232 .minimum = 0,
233 .maximum = 1,
234 .step = 1,
235 .default_value = 1
236 },
237 .set = pb0100_set_natural,
238 .get = pb0100_get_natural
239 },
240 },
241
242 .init = pb0100_init,
243 .probe = pb0100_probe,
244 .start = pb0100_start,
245 .stop = pb0100_stop,
246 .dump = pb0100_dump,
247
248 .nmodes = 2,
249 .modes = {
250/* low res / subsample modes disabled as they are only half res horizontal,
251 halving the vertical resolution does not seem to work */
252 {
253 320,
254 240,
255 V4L2_PIX_FMT_SGRBG8,
256 V4L2_FIELD_NONE,
257 .sizeimage = 320 * 240,
258 .bytesperline = 320,
259 .colorspace = V4L2_COLORSPACE_SRGB,
260 .priv = PB0100_CROP_TO_VGA
261 },
262 {
263 352,
264 288,
265 V4L2_PIX_FMT_SGRBG8,
266 V4L2_FIELD_NONE,
267 .sizeimage = 352 * 288,
268 .bytesperline = 352,
269 .colorspace = V4L2_COLORSPACE_SRGB,
270 .priv = 0
271 },
272 }
273};
274
275#endif
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
new file mode 100644
index 000000000000..c726dacefa1f
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
3 * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
4 * Copyright (c) 2002, 2003 Tuukka Toivonen
5 * Copyright (c) 2008 Erik Andrén
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * P/N 861037: Sensor HDCS1000 ASIC STV0600
22 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
23 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
24 * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
25 * P/N 861075-0040: Sensor HDCS1000 ASIC
26 * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
27 * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
28 */
29
30#ifndef STV06XX_SENSOR_H_
31#define STV06XX_SENSOR_H_
32
33#include "stv06xx.h"
34
35#define IS_850(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x850)
36#define IS_870(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x870)
37#define IS_1020(sd) ((sd)->sensor == &stv06xx_sensor_hdcs1020)
38
39extern const struct stv06xx_sensor stv06xx_sensor_vv6410;
40extern const struct stv06xx_sensor stv06xx_sensor_hdcs1x00;
41extern const struct stv06xx_sensor stv06xx_sensor_hdcs1020;
42extern const struct stv06xx_sensor stv06xx_sensor_pb0100;
43
44#define STV06XX_MAX_CTRLS (V4L2_CID_LASTP1 - V4L2_CID_BASE + 10)
45
46struct stv06xx_sensor {
47 /* Defines the name of a sensor */
48 char name[32];
49
50 /* Sensor i2c address */
51 u8 i2c_addr;
52
53 /* Flush value*/
54 u8 i2c_flush;
55
56 /* length of an i2c word */
57 u8 i2c_len;
58
59 /* Probes if the sensor is connected */
60 int (*probe)(struct sd *sd);
61
62 /* Performs a initialization sequence */
63 int (*init)(struct sd *sd);
64
65 /* Executed at device disconnect */
66 void (*disconnect)(struct sd *sd);
67
68 /* Reads a sensor register */
69 int (*read_sensor)(struct sd *sd, const u8 address,
70 u8 *i2c_data, const u8 len);
71
72 /* Writes to a sensor register */
73 int (*write_sensor)(struct sd *sd, const u8 address,
74 u8 *i2c_data, const u8 len);
75
76 /* Instructs the sensor to start streaming */
77 int (*start)(struct sd *sd);
78
79 /* Instructs the sensor to stop streaming */
80 int (*stop)(struct sd *sd);
81
82 /* Instructs the sensor to dump all its contents */
83 int (*dump)(struct sd *sd);
84
85 int nctrls;
86 struct ctrl ctrls[STV06XX_MAX_CTRLS];
87
88 char nmodes;
89 struct v4l2_pix_format modes[];
90};
91
92#endif
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
new file mode 100644
index 000000000000..1ca91f2a6dee
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
@@ -0,0 +1,251 @@
1/*
2 * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
3 * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
4 * Copyright (c) 2002, 2003 Tuukka Toivonen
5 * Copyright (c) 2008 Erik Andrén
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * P/N 861037: Sensor HDCS1000 ASIC STV0600
22 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
23 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
24 * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
25 * P/N 861075-0040: Sensor HDCS1000 ASIC
26 * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
27 * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
28 */
29
30#include "stv06xx_vv6410.h"
31
32static int vv6410_probe(struct sd *sd)
33{
34 u16 data;
35 int err;
36
37 err = stv06xx_read_sensor(sd, VV6410_DEVICEH, &data);
38
39 if (err < 0)
40 return -ENODEV;
41
42 if (data == 0x19) {
43 info("vv6410 sensor detected");
44
45 sd->gspca_dev.cam.cam_mode = stv06xx_sensor_vv6410.modes;
46 sd->gspca_dev.cam.nmodes = stv06xx_sensor_vv6410.nmodes;
47 sd->desc.ctrls = stv06xx_sensor_vv6410.ctrls;
48 sd->desc.nctrls = stv06xx_sensor_vv6410.nctrls;
49 return 0;
50 }
51
52 return -ENODEV;
53}
54
55static int vv6410_init(struct sd *sd)
56{
57 int err = 0, i;
58
59 for (i = 0; i < ARRAY_SIZE(stv_bridge_init); i++) {
60 /* if NULL then len contains single value */
61 if (stv_bridge_init[i].data == NULL) {
62 err = stv06xx_write_bridge(sd,
63 stv_bridge_init[i].start,
64 stv_bridge_init[i].len);
65 } else {
66 int j;
67 for (j = 0; j < stv_bridge_init[i].len; j++)
68 err = stv06xx_write_bridge(sd,
69 stv_bridge_init[i].start + j,
70 stv_bridge_init[i].data[j]);
71 }
72 }
73
74 if (err < 0)
75 return err;
76
77 err = stv06xx_write_sensor_bytes(sd, (u8 *) vv6410_sensor_init,
78 ARRAY_SIZE(vv6410_sensor_init));
79
80 return (err < 0) ? err : 0;
81}
82
83static int vv6410_start(struct sd *sd)
84{
85 int err;
86 struct cam *cam = &sd->gspca_dev.cam;
87 u32 priv = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
88
89 if (priv & VV6410_CROP_TO_QVGA) {
90 PDEBUG(D_CONF, "Cropping to QVGA");
91 stv06xx_write_sensor(sd, VV6410_XENDH, 320 - 1);
92 stv06xx_write_sensor(sd, VV6410_YENDH, 240 - 1);
93 } else {
94 stv06xx_write_sensor(sd, VV6410_XENDH, 360 - 1);
95 stv06xx_write_sensor(sd, VV6410_YENDH, 294 - 1);
96 }
97
98 if (priv & VV6410_SUBSAMPLE) {
99 PDEBUG(D_CONF, "Enabling subsampling");
100 stv06xx_write_bridge(sd, STV_Y_CTRL, 0x02);
101 stv06xx_write_bridge(sd, STV_X_CTRL, 0x06);
102
103 stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x10);
104 } else {
105 stv06xx_write_bridge(sd, STV_Y_CTRL, 0x01);
106 stv06xx_write_bridge(sd, STV_X_CTRL, 0x0a);
107
108 stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x20);
109 }
110
111 /* Turn on LED */
112 err = stv06xx_write_bridge(sd, STV_LED_CTRL, LED_ON);
113 if (err < 0)
114 return err;
115
116 err = stv06xx_write_sensor(sd, VV6410_SETUP0, 0);
117 if (err < 0)
118 return err;
119
120 PDEBUG(D_STREAM, "Starting stream");
121
122 return 0;
123}
124
125static int vv6410_stop(struct sd *sd)
126{
127 int err;
128
129 /* Turn off LED */
130 err = stv06xx_write_bridge(sd, STV_LED_CTRL, LED_OFF);
131 if (err < 0)
132 return err;
133
134 err = stv06xx_write_sensor(sd, VV6410_SETUP0, VV6410_LOW_POWER_MODE);
135 if (err < 0)
136 return err;
137
138 PDEBUG(D_STREAM, "Halting stream");
139
140 return (err < 0) ? err : 0;
141}
142
143static int vv6410_dump(struct sd *sd)
144{
145 u8 i;
146 int err = 0;
147
148 info("Dumping all vv6410 sensor registers");
149 for (i = 0; i < 0xff && !err; i++) {
150 u16 data;
151 err = stv06xx_read_sensor(sd, i, &data);
152 info("Register 0x%x contained 0x%x", i, data);
153 }
154 return (err < 0) ? err : 0;
155}
156
157static int vv6410_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
158{
159 int err;
160 u16 i2c_data;
161 struct sd *sd = (struct sd *) gspca_dev;
162
163 err = stv06xx_read_sensor(sd, VV6410_DATAFORMAT, &i2c_data);
164
165 *val = (i2c_data & VV6410_HFLIP) ? 1 : 0;
166
167 PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
168
169 return (err < 0) ? err : 0;
170}
171
172static int vv6410_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
173{
174 int err;
175 u16 i2c_data;
176 struct sd *sd = (struct sd *) gspca_dev;
177 err = stv06xx_read_sensor(sd, VV6410_DATAFORMAT, &i2c_data);
178 if (err < 0)
179 return err;
180
181 if (val)
182 i2c_data |= VV6410_HFLIP;
183 else
184 i2c_data &= ~VV6410_HFLIP;
185
186 PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
187 err = stv06xx_write_sensor(sd, VV6410_DATAFORMAT, i2c_data);
188
189 return (err < 0) ? err : 0;
190}
191
192static int vv6410_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
193{
194 int err;
195 u16 i2c_data;
196 struct sd *sd = (struct sd *) gspca_dev;
197
198 err = stv06xx_read_sensor(sd, VV6410_DATAFORMAT, &i2c_data);
199
200 *val = (i2c_data & VV6410_VFLIP) ? 1 : 0;
201
202 PDEBUG(D_V4L2, "Read vertical flip %d", *val);
203
204 return (err < 0) ? err : 0;
205}
206
207static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
208{
209 int err;
210 u16 i2c_data;
211 struct sd *sd = (struct sd *) gspca_dev;
212 err = stv06xx_read_sensor(sd, VV6410_DATAFORMAT, &i2c_data);
213 if (err < 0)
214 return err;
215
216 if (val)
217 i2c_data |= VV6410_VFLIP;
218 else
219 i2c_data &= ~VV6410_VFLIP;
220
221 PDEBUG(D_V4L2, "Set vertical flip to %d", val);
222 err = stv06xx_write_sensor(sd, VV6410_DATAFORMAT, i2c_data);
223
224 return (err < 0) ? err : 0;
225}
226
227static int vv6410_get_analog_gain(struct gspca_dev *gspca_dev, __s32 *val)
228{
229 int err;
230 u16 i2c_data;
231 struct sd *sd = (struct sd *) gspca_dev;
232
233 err = stv06xx_read_sensor(sd, VV6410_ANALOGGAIN, &i2c_data);
234
235 *val = i2c_data & 0xf;
236
237 PDEBUG(D_V4L2, "Read analog gain %d", *val);
238
239 return (err < 0) ? err : 0;
240}
241
242static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val)
243{
244 int err;
245 struct sd *sd = (struct sd *) gspca_dev;
246
247 PDEBUG(D_V4L2, "Set analog gain to %d", val);
248 err = stv06xx_write_sensor(sd, VV6410_ANALOGGAIN, 0xf0 | (val & 0xf));
249
250 return (err < 0) ? err : 0;
251}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
new file mode 100644
index 000000000000..3ff8c4ea3362
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
@@ -0,0 +1,315 @@
1/*
2 * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
3 * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
4 * Copyright (c) 2002, 2003 Tuukka Toivonen
5 * Copyright (c) 2008 Erik Andrén
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * P/N 861037: Sensor HDCS1000 ASIC STV0600
22 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
23 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
24 * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
25 * P/N 861075-0040: Sensor HDCS1000 ASIC
26 * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
27 * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
28 */
29
30#ifndef STV06XX_VV6410_H_
31#define STV06XX_VV6410_H_
32
33#include "stv06xx_sensor.h"
34
35#define VV6410_COLS 416
36#define VV6410_ROWS 320
37
38/* Status registers */
39/* Chip identification number including revision indicator */
40#define VV6410_DEVICEH 0x00
41#define VV6410_DEVICEL 0x01
42
43/* User can determine whether timed I2C data
44 has been consumed by interrogating flag states */
45#define VV6410_STATUS0 0x02
46
47/* Current line counter value */
48#define VV6410_LINECOUNTH 0x03
49#define VV6410_LINECOUNTL 0x04
50
51/* End x coordinate of image size */
52#define VV6410_XENDH 0x05
53#define VV6410_XENDL 0x06
54
55/* End y coordinate of image size */
56#define VV6410_YENDH 0x07
57#define VV6410_YENDL 0x08
58
59/* This is the average pixel value returned from the
60 dark line offset cancellation algorithm */
61#define VV6410_DARKAVGH 0x09
62#define VV6410_DARKAVGL 0x0a
63
64/* This is the average pixel value returned from the
65 black line offset cancellation algorithm */
66#define VV6410_BLACKAVGH 0x0b
67#define VV6410_BLACKAVGL 0x0c
68
69/* Flags to indicate whether the x or y image coordinates have been clipped */
70#define VV6410_STATUS1 0x0d
71
72/* Setup registers */
73
74/* Low-power/sleep modes & video timing */
75#define VV6410_SETUP0 0x10
76
77/* Various parameters */
78#define VV6410_SETUP1 0x11
79
80/* Contains pixel counter reset value used by external sync */
81#define VV6410_SYNCVALUE 0x12
82
83/* Frame grabbing modes (FST, LST and QCK) */
84#define VV6410_FGMODES 0x14
85
86/* FST and QCK mapping modes. */
87#define VV6410_PINMAPPING 0x15
88
89/* Data resolution */
90#define VV6410_DATAFORMAT 0x16
91
92/* Output coding formats */
93#define VV6410_OPFORMAT 0x17
94
95/* Various mode select bits */
96#define VV6410_MODESELECT 0x18
97
98/* Exposure registers */
99/* Fine exposure. */
100#define VV6410_FINEH 0x20
101#define VV6410_FINEL 0x21
102
103/* Coarse exposure */
104#define VV6410_COARSEH 0x22
105#define VV6410_COARSEL 0x23
106
107/* Analog gain setting */
108#define VV6410_ANALOGGAIN 0x24
109
110/* Clock division */
111#define VV6410_CLKDIV 0x25
112
113/* Dark line offset cancellation value */
114#define VV6410_DARKOFFSETH 0x2c
115#define VV6410_DARKOFFSETL 0x2d
116
117/* Dark line offset cancellation enable */
118#define VV6410_DARKOFFSETSETUP 0x2e
119
120/* Video timing registers */
121/* Line Length (Pixel Clocks) */
122#define VV6410_LINELENGTHH 0x52
123#define VV6410_LINELENGTHL 0x53
124
125/* X-co-ordinate of top left corner of region of interest (x-offset) */
126#define VV6410_XOFFSETH 0x57
127#define VV6410_XOFFSETL 0x58
128
129/* Y-coordinate of top left corner of region of interest (y-offset) */
130#define VV6410_YOFFSETH 0x59
131#define VV6410_YOFFSETL 0x5a
132
133/* Field length (Lines) */
134#define VV6410_FIELDLENGTHH 0x61
135#define VV6410_FIELDLENGTHL 0x62
136
137/* System registers */
138/* Black offset cancellation default value */
139#define VV6410_BLACKOFFSETH 0x70
140#define VV6410_BLACKOFFSETL 0x71
141
142/* Black offset cancellation setup */
143#define VV6410_BLACKOFFSETSETUP 0x72
144
145/* Analog Control Register 0 */
146#define VV6410_CR0 0x75
147
148/* Analog Control Register 1 */
149#define VV6410_CR1 0x76
150
151/* ADC Setup Register */
152#define VV6410_AS0 0x77
153
154/* Analog Test Register */
155#define VV6410_AT0 0x78
156
157/* Audio Amplifier Setup Register */
158#define VV6410_AT1 0x79
159
160#define VV6410_HFLIP (1 << 3)
161#define VV6410_VFLIP (1 << 4)
162
163#define VV6410_LOW_POWER_MODE (1 << 0)
164#define VV6410_SOFT_RESET (1 << 2)
165#define VV6410_PAL_25_FPS (0 << 3)
166
167#define VV6410_CLK_DIV_2 (1 << 1)
168
169#define VV6410_FINE_EXPOSURE 320
170#define VV6410_COARSE_EXPOSURE 192
171#define VV6410_DEFAULT_GAIN 5
172
173#define VV6410_SUBSAMPLE 0x01
174#define VV6410_CROP_TO_QVGA 0x02
175
176static int vv6410_probe(struct sd *sd);
177static int vv6410_start(struct sd *sd);
178static int vv6410_init(struct sd *sd);
179static int vv6410_stop(struct sd *sd);
180static int vv6410_dump(struct sd *sd);
181
182/* V4L2 controls supported by the driver */
183static int vv6410_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
184static int vv6410_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
185static int vv6410_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
186static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
187static int vv6410_get_analog_gain(struct gspca_dev *gspca_dev, __s32 *val);
188static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val);
189
190const struct stv06xx_sensor stv06xx_sensor_vv6410 = {
191 .name = "ST VV6410",
192 .i2c_flush = 5,
193 .i2c_addr = 0x20,
194 .i2c_len = 1,
195 .init = vv6410_init,
196 .probe = vv6410_probe,
197 .start = vv6410_start,
198 .stop = vv6410_stop,
199 .dump = vv6410_dump,
200
201 .nctrls = 3,
202 .ctrls = {
203 {
204 {
205 .id = V4L2_CID_HFLIP,
206 .type = V4L2_CTRL_TYPE_BOOLEAN,
207 .name = "horizontal flip",
208 .minimum = 0,
209 .maximum = 1,
210 .step = 1,
211 .default_value = 0
212 },
213 .set = vv6410_set_hflip,
214 .get = vv6410_get_hflip
215 }, {
216 {
217 .id = V4L2_CID_VFLIP,
218 .type = V4L2_CTRL_TYPE_BOOLEAN,
219 .name = "vertical flip",
220 .minimum = 0,
221 .maximum = 1,
222 .step = 1,
223 .default_value = 0
224 },
225 .set = vv6410_set_vflip,
226 .get = vv6410_get_vflip
227 }, {
228 {
229 .id = V4L2_CID_GAIN,
230 .type = V4L2_CTRL_TYPE_INTEGER,
231 .name = "analog gain",
232 .minimum = 0,
233 .maximum = 15,
234 .step = 1,
235 .default_value = 0
236 },
237 .set = vv6410_set_analog_gain,
238 .get = vv6410_get_analog_gain
239 }
240 },
241
242 .nmodes = 1,
243 .modes = {
244 {
245 356,
246 292,
247 V4L2_PIX_FMT_SGRBG8,
248 V4L2_FIELD_NONE,
249 .sizeimage =
250 356 * 292,
251 .bytesperline = 356,
252 .colorspace = V4L2_COLORSPACE_SRGB,
253 .priv = 0
254 }
255 }
256};
257
258/* If NULL, only single value to write, stored in len */
259struct stv_init {
260 const u8 *data;
261 u16 start;
262 u8 len;
263};
264
265static const u8 x1500[] = { /* 0x1500 - 0x150f */
266 0x0b, 0xa7, 0xb7, 0x00, 0x00
267};
268
269static const u8 x1536[] = { /* 0x1536 - 0x153b */
270 0x02, 0x00, 0x60, 0x01, 0x20, 0x01
271};
272
273static const u8 x15c1[] = { /* 0x15c1 - 0x15c2 */
274 0xff, 0x03 /* Output word 0x03ff = 1023 (ISO size) */
275};
276
277static const struct stv_init stv_bridge_init[] = {
278 /* This reg is written twice. Some kind of reset? */
279 {NULL, 0x1620, 0x80},
280 {NULL, 0x1620, 0x00},
281 {NULL, 0x1423, 0x04},
282 {x1500, 0x1500, ARRAY_SIZE(x1500)},
283 {x1536, 0x1536, ARRAY_SIZE(x1536)},
284 {x15c1, 0x15c1, ARRAY_SIZE(x15c1)}
285};
286
287static const u8 vv6410_sensor_init[][2] = {
288 /* Setup registers */
289 {VV6410_SETUP0, VV6410_SOFT_RESET},
290 {VV6410_SETUP0, VV6410_LOW_POWER_MODE},
291 /* Use shuffled read-out mode */
292 {VV6410_SETUP1, BIT(6)},
293 /* All modes to 1 */
294 {VV6410_FGMODES, BIT(6) | BIT(4) | BIT(2) | BIT(0)},
295 {VV6410_PINMAPPING, 0x00},
296 /* Pre-clock generator divide off */
297 {VV6410_DATAFORMAT, BIT(7) | BIT(0)},
298
299 /* Exposure registers */
300 {VV6410_FINEH, VV6410_FINE_EXPOSURE >> 8},
301 {VV6410_FINEL, VV6410_FINE_EXPOSURE & 0xff},
302 {VV6410_COARSEH, VV6410_COARSE_EXPOSURE >> 8},
303 {VV6410_COARSEL, VV6410_COARSE_EXPOSURE & 0xff},
304 {VV6410_ANALOGGAIN, 0xf0 | VV6410_DEFAULT_GAIN},
305 {VV6410_CLKDIV, VV6410_CLK_DIV_2},
306
307 /* System registers */
308 /* Enable voltage doubler */
309 {VV6410_AS0, BIT(6) | BIT(4) | BIT(3) | BIT(2) | BIT(1)},
310 {VV6410_AT0, 0x00},
311 /* Power up audio, differential */
312 {VV6410_AT1, BIT(4)|BIT(0)},
313};
314
315#endif
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index bd9288665a80..6d904d5e4c74 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -123,7 +123,7 @@ static struct ctrl sd_ctrls[] = {
123 }, 123 },
124}; 124};
125 125
126static struct v4l2_pix_format vga_mode[] = { 126static const struct v4l2_pix_format vga_mode[] = {
127 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 127 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
128 .bytesperline = 320, 128 .bytesperline = 320,
129 .sizeimage = 320 * 240 * 3 / 8 + 590, 129 .sizeimage = 320 * 240 * 3 / 8 + 590,
@@ -136,7 +136,7 @@ static struct v4l2_pix_format vga_mode[] = {
136 .priv = 1}, 136 .priv = 1},
137}; 137};
138 138
139static struct v4l2_pix_format custom_mode[] = { 139static const struct v4l2_pix_format custom_mode[] = {
140 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 140 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
141 .bytesperline = 320, 141 .bytesperline = 320,
142 .sizeimage = 320 * 240 * 3 / 8 + 590, 142 .sizeimage = 320 * 240 * 3 / 8 + 590,
@@ -149,7 +149,7 @@ static struct v4l2_pix_format custom_mode[] = {
149 .priv = 1}, 149 .priv = 1},
150}; 150};
151 151
152static struct v4l2_pix_format vga_mode2[] = { 152static const struct v4l2_pix_format vga_mode2[] = {
153 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 153 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
154 .bytesperline = 176, 154 .bytesperline = 176,
155 .sizeimage = 176 * 144 * 3 / 8 + 590, 155 .sizeimage = 176 * 144 * 3 / 8 + 590,
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index eac245d7a756..6ee111a3cbd1 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -233,7 +233,7 @@ static char *effects_control[] = {
233 "Negative", 233 "Negative",
234}; 234};
235 235
236static struct v4l2_pix_format vga_mode_t16[] = { 236static const struct v4l2_pix_format vga_mode_t16[] = {
237 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 237 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
238 .bytesperline = 160, 238 .bytesperline = 160,
239 .sizeimage = 160 * 120 * 4 / 8 + 590, 239 .sizeimage = 160 * 120 * 4 / 8 + 590,
@@ -499,7 +499,7 @@ static void om6802_sensor_init(struct gspca_dev *gspca_dev)
499 reg_w_buf(gspca_dev, sensor_reset, sizeof sensor_reset); 499 reg_w_buf(gspca_dev, sensor_reset, sizeof sensor_reset);
500 msleep(5); 500 msleep(5);
501 i = 4; 501 i = 4;
502 while (--i < 0) { 502 while (--i > 0) {
503 byte = reg_r(gspca_dev, 0x0060); 503 byte = reg_r(gspca_dev, 0x0060);
504 if (!(byte & 0x01)) 504 if (!(byte & 0x01))
505 break; 505 break;
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index 968a5911704f..94163cceb28a 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -30,15 +30,10 @@ MODULE_LICENSE("GPL");
30struct sd { 30struct sd {
31 struct gspca_dev gspca_dev; /* !! must be the first item */ 31 struct gspca_dev gspca_dev; /* !! must be the first item */
32 32
33 int buflen; /* current length of tmpbuf */ 33 __u16 brightness;
34 __u8 tmpbuf[352 * 288 + 10 * 288]; /* no protection... */ 34 __u16 contrast;
35 __u8 tmpbuf2[352 * 288]; /* no protection... */
36 35
37 unsigned short brightness; 36 __u8 packet;
38 unsigned short contrast;
39
40 char packet;
41 char synchro;
42}; 37};
43 38
44/* V4L2 controls supported by the driver */ 39/* V4L2 controls supported by the driver */
@@ -78,7 +73,7 @@ static struct ctrl sd_ctrls[] = {
78 }, 73 },
79}; 74};
80 75
81static struct v4l2_pix_format sif_mode[] = { 76static const struct v4l2_pix_format sif_mode[] = {
82 {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, 77 {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
83 .bytesperline = 176, 78 .bytesperline = 176,
84 .sizeimage = 176 * 144, 79 .sizeimage = 176 * 144,
@@ -392,6 +387,8 @@ static void setbrightness(struct gspca_dev *gspca_dev)
392/* -- start the camera -- */ 387/* -- start the camera -- */
393static int sd_start(struct gspca_dev *gspca_dev) 388static int sd_start(struct gspca_dev *gspca_dev)
394{ 389{
390 struct sd *sd = (struct sd *) gspca_dev;
391
395 reg_w_1(gspca_dev, TV8532_AD_SLOPE, 0x32); 392 reg_w_1(gspca_dev, TV8532_AD_SLOPE, 0x32);
396 reg_w_1(gspca_dev, TV8532_AD_BITCTRL, 0x00); 393 reg_w_1(gspca_dev, TV8532_AD_BITCTRL, 0x00);
397 tv_8532ReadRegisters(gspca_dev); 394 tv_8532ReadRegisters(gspca_dev);
@@ -443,6 +440,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
443 /************************************************/ 440 /************************************************/
444 tv_8532_PollReg(gspca_dev); 441 tv_8532_PollReg(gspca_dev);
445 reg_w_1(gspca_dev, TV8532_UDP_UPDATE, 0x00); /* 0x31 */ 442 reg_w_1(gspca_dev, TV8532_UDP_UPDATE, 0x00); /* 0x31 */
443
444 gspca_dev->empty_packet = 0; /* check the empty packets */
445 sd->packet = 0; /* ignore the first packets */
446
446 return 0; 447 return 0;
447} 448}
448 449
@@ -451,111 +452,36 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
451 reg_w_1(gspca_dev, TV8532_GPIO_OE, 0x0b); 452 reg_w_1(gspca_dev, TV8532_GPIO_OE, 0x0b);
452} 453}
453 454
454static void tv8532_preprocess(struct gspca_dev *gspca_dev)
455{
456 struct sd *sd = (struct sd *) gspca_dev;
457/* we should received a whole frame with header and EOL marker
458 * in gspca_dev->tmpbuf and return a GBRG pattern in gspca_dev->tmpbuf2
459 * sequence 2bytes header the Alternate pixels bayer GB 4 bytes
460 * Alternate pixels bayer RG 4 bytes EOL */
461 int width = gspca_dev->width;
462 int height = gspca_dev->height;
463 unsigned char *dst = sd->tmpbuf2;
464 unsigned char *data = sd->tmpbuf;
465 int i;
466
467 /* precompute where is the good bayer line */
468 if (((data[3] + data[width + 7]) >> 1)
469 + (data[4] >> 2)
470 + (data[width + 6] >> 1) >= ((data[2] + data[width + 6]) >> 1)
471 + (data[3] >> 2)
472 + (data[width + 5] >> 1))
473 data += 3;
474 else
475 data += 2;
476 for (i = 0; i < height / 2; i++) {
477 memcpy(dst, data, width);
478 data += width + 3;
479 dst += width;
480 memcpy(dst, data, width);
481 data += width + 7;
482 dst += width;
483 }
484}
485
486static void sd_pkt_scan(struct gspca_dev *gspca_dev, 455static void sd_pkt_scan(struct gspca_dev *gspca_dev,
487 struct gspca_frame *frame, /* target */ 456 struct gspca_frame *frame, /* target */
488 __u8 *data, /* isoc packet */ 457 __u8 *data, /* isoc packet */
489 int len) /* iso packet length */ 458 int len) /* iso packet length */
490{ 459{
491 struct sd *sd = (struct sd *) gspca_dev; 460 struct sd *sd = (struct sd *) gspca_dev;
492 461 int packet_type0, packet_type1;
493 if (data[0] != 0x80) { 462
494 sd->packet++; 463 packet_type0 = packet_type1 = INTER_PACKET;
495 if (sd->buflen + len > sizeof sd->tmpbuf) { 464 if (gspca_dev->empty_packet) {
496 if (gspca_dev->last_packet_type != DISCARD_PACKET) { 465 gspca_dev->empty_packet = 0;
497 PDEBUG(D_PACK, "buffer overflow"); 466 sd->packet = gspca_dev->height / 2;
498 gspca_dev->last_packet_type = DISCARD_PACKET; 467 packet_type0 = FIRST_PACKET;
499 } 468 } else if (sd->packet == 0)
500 return; 469 return; /* 2 more lines in 352x288 ! */
501 } 470 sd->packet--;
502 memcpy(&sd->tmpbuf[sd->buflen], data, len); 471 if (sd->packet == 0)
503 sd->buflen += len; 472 packet_type1 = LAST_PACKET;
504 return; 473
505 } 474 /* each packet contains:
506 475 * - header 2 bytes
507 /* here we detect 0x80 */ 476 * - RG line
508 /* counter is limited so we need few header for a frame :) */ 477 * - 4 bytes
509 478 * - GB line
510 /* header 0x80 0x80 0x80 0x80 0x80 */ 479 * - 4 bytes
511 /* packet 00 63 127 145 00 */ 480 */
512 /* sof 0 1 1 0 0 */ 481 gspca_frame_add(gspca_dev, packet_type0,
513 482 frame, data + 2, gspca_dev->width);
514 /* update sequence */ 483 gspca_frame_add(gspca_dev, packet_type1,
515 if (sd->packet == 63 || sd->packet == 127) 484 frame, data + gspca_dev->width + 6, gspca_dev->width);
516 sd->synchro = 1;
517
518 /* is there a frame start ? */
519 if (sd->packet >= (gspca_dev->height >> 1) - 1) {
520 PDEBUG(D_PACK, "SOF > %d packet %d", sd->synchro,
521 sd->packet);
522 if (!sd->synchro) { /* start of frame */
523 if (gspca_dev->last_packet_type == FIRST_PACKET) {
524 tv8532_preprocess(gspca_dev);
525 frame = gspca_frame_add(gspca_dev,
526 LAST_PACKET,
527 frame, sd->tmpbuf2,
528 gspca_dev->width *
529 gspca_dev->width);
530 }
531 gspca_frame_add(gspca_dev, FIRST_PACKET,
532 frame, data, 0);
533 memcpy(sd->tmpbuf, data, len);
534 sd->buflen = len;
535 sd->packet = 0;
536 return;
537 }
538 if (gspca_dev->last_packet_type != DISCARD_PACKET) {
539 PDEBUG(D_PACK,
540 "Warning wrong TV8532 frame detection %d",
541 sd->packet);
542 gspca_dev->last_packet_type = DISCARD_PACKET;
543 }
544 return;
545 }
546
547 if (!sd->synchro) {
548 /* Drop packet frame corrupt */
549 PDEBUG(D_PACK, "DROP SOF %d packet %d",
550 sd->synchro, sd->packet);
551 sd->packet = 0;
552 gspca_dev->last_packet_type = DISCARD_PACKET;
553 return;
554 }
555 sd->synchro = 1;
556 sd->packet++;
557 memcpy(&sd->tmpbuf[sd->buflen], data, len);
558 sd->buflen += len;
559} 485}
560 486
561static void setcontrast(struct gspca_dev *gspca_dev) 487static void setcontrast(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 17af353ddd1c..0525ea51a6de 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -32,44 +32,68 @@ MODULE_LICENSE("GPL");
32struct sd { 32struct sd {
33 struct gspca_dev gspca_dev; /* !! must be the first item */ 33 struct gspca_dev gspca_dev; /* !! must be the first item */
34 34
35 unsigned char autogain; 35 __u8 hflip;
36 unsigned char lightfreq; 36 __u8 vflip;
37 __u8 lightfreq;
38 __u8 sharpness;
37 39
38 char qindex;
39 char bridge; 40 char bridge;
40#define BRIDGE_VC0321 0 41#define BRIDGE_VC0321 0
41#define BRIDGE_VC0323 1 42#define BRIDGE_VC0323 1
42 char sensor; 43 char sensor;
43#define SENSOR_HV7131R 0 44#define SENSOR_HV7131R 0
44#define SENSOR_MI1320 1 45#define SENSOR_MI0360 1
45#define SENSOR_MI1310_SOC 2 46#define SENSOR_MI1320 2
46#define SENSOR_OV7660 3 47#define SENSOR_MI1310_SOC 3
47#define SENSOR_OV7670 4 48#define SENSOR_OV7660 4
48#define SENSOR_PO3130NC 5 49#define SENSOR_OV7670 5
50#define SENSOR_PO1200 6
51#define SENSOR_PO3130NC 7
49}; 52};
50 53
51/* V4L2 controls supported by the driver */ 54/* V4L2 controls supported by the driver */
52static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val); 55static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
53static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); 56static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
57static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
58static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
54static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val); 59static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
55static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); 60static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
61static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
62static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
56 63
57static struct ctrl sd_ctrls[] = { 64static struct ctrl sd_ctrls[] = {
65/* next 2 controls work with ov7660 and ov7670 only */
66#define HFLIP_IDX 0
58 { 67 {
59 { 68 {
60 .id = V4L2_CID_AUTOGAIN, 69 .id = V4L2_CID_HFLIP,
61 .type = V4L2_CTRL_TYPE_BOOLEAN, 70 .type = V4L2_CTRL_TYPE_BOOLEAN,
62 .name = "Auto Gain", 71 .name = "Mirror",
63 .minimum = 0, 72 .minimum = 0,
64 .maximum = 1, 73 .maximum = 1,
65 .step = 1, 74 .step = 1,
66#define AUTOGAIN_DEF 1 75#define HFLIP_DEF 0
67 .default_value = AUTOGAIN_DEF, 76 .default_value = HFLIP_DEF,
68 }, 77 },
69 .set = sd_setautogain, 78 .set = sd_sethflip,
70 .get = sd_getautogain, 79 .get = sd_gethflip,
71 }, 80 },
72#define LIGHTFREQ_IDX 1 81#define VFLIP_IDX 1
82 {
83 {
84 .id = V4L2_CID_VFLIP,
85 .type = V4L2_CTRL_TYPE_BOOLEAN,
86 .name = "Vflip",
87 .minimum = 0,
88 .maximum = 1,
89 .step = 1,
90#define VFLIP_DEF 0
91 .default_value = VFLIP_DEF,
92 },
93 .set = sd_setvflip,
94 .get = sd_getvflip,
95 },
96#define LIGHTFREQ_IDX 2
73 { 97 {
74 { 98 {
75 .id = V4L2_CID_POWER_LINE_FREQUENCY, 99 .id = V4L2_CID_POWER_LINE_FREQUENCY,
@@ -84,9 +108,25 @@ static struct ctrl sd_ctrls[] = {
84 .set = sd_setfreq, 108 .set = sd_setfreq,
85 .get = sd_getfreq, 109 .get = sd_getfreq,
86 }, 110 },
111/* po1200 only */
112#define SHARPNESS_IDX 3
113 {
114 {
115 .id = V4L2_CID_SHARPNESS,
116 .type = V4L2_CTRL_TYPE_INTEGER,
117 .name = "Sharpness",
118 .minimum = 0,
119 .maximum = 2,
120 .step = 1,
121#define SHARPNESS_DEF 1
122 .default_value = SHARPNESS_DEF,
123 },
124 .set = sd_setsharpness,
125 .get = sd_getsharpness,
126 },
87}; 127};
88 128
89static struct v4l2_pix_format vc0321_mode[] = { 129static const struct v4l2_pix_format vc0321_mode[] = {
90 {320, 240, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE, 130 {320, 240, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE,
91 .bytesperline = 320, 131 .bytesperline = 320,
92 .sizeimage = 320 * 240 * 2, 132 .sizeimage = 320 * 240 * 2,
@@ -98,7 +138,7 @@ static struct v4l2_pix_format vc0321_mode[] = {
98 .colorspace = V4L2_COLORSPACE_SRGB, 138 .colorspace = V4L2_COLORSPACE_SRGB,
99 .priv = 0}, 139 .priv = 0},
100}; 140};
101static struct v4l2_pix_format vc0323_mode[] = { 141static const struct v4l2_pix_format vc0323_mode[] = {
102 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 142 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
103 .bytesperline = 320, 143 .bytesperline = 320,
104 .sizeimage = 320 * 240 * 3 / 8 + 590, 144 .sizeimage = 320 * 240 * 3 / 8 + 590,
@@ -111,6 +151,252 @@ static struct v4l2_pix_format vc0323_mode[] = {
111 .priv = 0}, 151 .priv = 0},
112}; 152};
113 153
154static const struct v4l2_pix_format svga_mode[] = {
155 {800, 600, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
156 .bytesperline = 800,
157 .sizeimage = 800 * 600 * 1 / 4 + 590,
158 .colorspace = V4L2_COLORSPACE_JPEG,
159 .priv = 0},
160};
161
162/* OV7660/7670 registers */
163#define OV7660_REG_MVFP 0x1e
164#define OV7660_MVFP_MIRROR 0x20
165#define OV7660_MVFP_VFLIP 0x10
166
167static const __u8 mi0360_matrix[9] = {
168 0x50, 0xf8, 0xf8, 0xf5, 0x50, 0xfb, 0xff, 0xf1, 0x50
169};
170
171static const __u8 mi0360_initVGA_JPG[][4] = {
172 {0xb0, 0x03, 0x19, 0xcc},
173 {0xb0, 0x04, 0x02, 0xcc},
174 {0xb3, 0x00, 0x24, 0xcc},
175 {0xb3, 0x00, 0x25, 0xcc},
176 {0xb3, 0x08, 0x01, 0xcc},
177 {0xb3, 0x09, 0x0c, 0xcc},
178 {0xb3, 0x05, 0x01, 0xcc},
179 {0xb3, 0x06, 0x03, 0xcc},
180 {0xb3, 0x03, 0x0a, 0xcc},
181 {0xb3, 0x20, 0x00, 0xcc},
182 {0xb3, 0x21, 0x00, 0xcc},
183 {0xb3, 0x22, 0x01, 0xcc},
184 {0xb3, 0x23, 0xe0, 0xcc},
185 {0xb3, 0x04, 0x05, 0xcc},
186 {0xb3, 0x14, 0x00, 0xcc},
187 {0xb3, 0x15, 0x00, 0xcc},
188 {0xb3, 0x16, 0x02, 0xcc},
189 {0xb3, 0x17, 0x7f, 0xcc},
190 {0xb3, 0x35, 0xdd, 0xcc},
191 {0xb3, 0x34, 0x02, 0xcc},
192 {0xb3, 0x00, 0x25, 0xcc},
193 {0xbc, 0x00, 0x71, 0xcc},
194 {0xb8, 0x00, 0x13, 0xcc},
195 {0xb8, 0x27, 0x20, 0xcc},
196 {0xb8, 0x2c, 0x50, 0xcc},
197 {0xb8, 0x2d, 0xf8, 0xcc},
198 {0xb8, 0x2e, 0xf8, 0xcc},
199 {0xb8, 0x2f, 0xf8, 0xcc},
200 {0xb8, 0x30, 0x50, 0xcc},
201 {0xb8, 0x31, 0xf8, 0xcc},
202 {0xb8, 0x32, 0xf8, 0xcc},
203 {0xb8, 0x33, 0xf8, 0xcc},
204 {0xb8, 0x34, 0x50, 0xcc},
205 {0xb8, 0x35, 0x00, 0xcc},
206 {0xb8, 0x36, 0x00, 0xcc},
207 {0xb8, 0x37, 0x00, 0xcc},
208 {0xb8, 0x01, 0x79, 0xcc},
209 {0xb8, 0x08, 0xe0, 0xcc},
210 {0xb3, 0x01, 0x41, 0xcc},
211 {0xb8, 0x01, 0x79, 0xcc},
212 {0xb8, 0x14, 0x18, 0xcc},
213 {0xb8, 0xb2, 0x0a, 0xcc},
214 {0xb8, 0xb4, 0x0a, 0xcc},
215 {0xb8, 0xb5, 0x0a, 0xcc},
216 {0xb8, 0xfe, 0x00, 0xcc},
217 {0xb8, 0xff, 0x28, 0xcc},
218 {0xb9, 0x00, 0x28, 0xcc},
219 {0xb9, 0x01, 0x28, 0xcc},
220 {0xb9, 0x02, 0x28, 0xcc},
221 {0xb9, 0x03, 0x00, 0xcc},
222 {0xb9, 0x04, 0x00, 0xcc},
223 {0xb9, 0x05, 0x3c, 0xcc},
224 {0xb9, 0x06, 0x3c, 0xcc},
225 {0xb9, 0x07, 0x3c, 0xcc},
226 {0xb9, 0x08, 0x3c, 0xcc},
227 {0xb8, 0x8e, 0x00, 0xcc},
228 {0xb8, 0x8f, 0xff, 0xcc},
229 {0xb8, 0x81, 0x09, 0xcc},
230 {0x31, 0x00, 0x00, 0xbb},
231 {0x09, 0x01, 0xc7, 0xbb},
232 {0x34, 0x01, 0x00, 0xbb},
233 {0x2b, 0x00, 0x28, 0xbb},
234 {0x2c, 0x00, 0x30, 0xbb},
235 {0x2d, 0x00, 0x30, 0xbb},
236 {0x2e, 0x00, 0x28, 0xbb},
237 {0x62, 0x04, 0x11, 0xbb},
238 {0x03, 0x01, 0xe0, 0xbb},
239 {0x2c, 0x00, 0x2c, 0xbb},
240 {0x20, 0xd0, 0x00, 0xbb},
241 {0x01, 0x00, 0x08, 0xbb},
242 {0x06, 0x00, 0x10, 0xbb},
243 {0x05, 0x00, 0x20, 0xbb},
244 {0x20, 0x00, 0x00, 0xbb},
245 {0xb6, 0x00, 0x00, 0xcc},
246 {0xb6, 0x03, 0x02, 0xcc},
247 {0xb6, 0x02, 0x80, 0xcc},
248 {0xb6, 0x05, 0x01, 0xcc},
249 {0xb6, 0x04, 0xe0, 0xcc},
250 {0xb6, 0x12, 0x78, 0xcc},
251 {0xb6, 0x18, 0x02, 0xcc},
252 {0xb6, 0x17, 0x58, 0xcc},
253 {0xb6, 0x16, 0x00, 0xcc},
254 {0xb6, 0x22, 0x12, 0xcc},
255 {0xb6, 0x23, 0x0b, 0xcc},
256 {0xb3, 0x02, 0x02, 0xcc},
257 {0xbf, 0xc0, 0x39, 0xcc},
258 {0xbf, 0xc1, 0x04, 0xcc},
259 {0xbf, 0xcc, 0x10, 0xcc},
260 {0xb9, 0x12, 0x00, 0xcc},
261 {0xb9, 0x13, 0x0a, 0xcc},
262 {0xb9, 0x14, 0x0a, 0xcc},
263 {0xb9, 0x15, 0x0a, 0xcc},
264 {0xb9, 0x16, 0x0a, 0xcc},
265 {0xb9, 0x18, 0x00, 0xcc},
266 {0xb9, 0x19, 0x0f, 0xcc},
267 {0xb9, 0x1a, 0x0f, 0xcc},
268 {0xb9, 0x1b, 0x0f, 0xcc},
269 {0xb9, 0x1c, 0x0f, 0xcc},
270 {0xb8, 0x8e, 0x00, 0xcc},
271 {0xb8, 0x8f, 0xff, 0xcc},
272 {0xb6, 0x12, 0xf8, 0xcc},
273 {0xb8, 0x0c, 0x20, 0xcc},
274 {0xb8, 0x0d, 0x70, 0xcc},
275 {0xb6, 0x13, 0x13, 0xcc},
276 {0x35, 0x00, 0x60, 0xbb},
277 {0xb3, 0x5c, 0x01, 0xcc},
278 {}
279};
280static const __u8 mi0360_initQVGA_JPG[][4] = {
281 {0xb0, 0x03, 0x19, 0xcc},
282 {0xb0, 0x04, 0x02, 0xcc},
283 {0xb3, 0x00, 0x24, 0xcc},
284 {0xb3, 0x00, 0x25, 0xcc},
285 {0xb3, 0x08, 0x01, 0xcc},
286 {0xb3, 0x09, 0x0c, 0xcc},
287 {0xb3, 0x05, 0x01, 0xcc},
288 {0xb3, 0x06, 0x03, 0xcc},
289 {0xb3, 0x03, 0x0a, 0xcc},
290 {0xb3, 0x20, 0x00, 0xcc},
291 {0xb3, 0x21, 0x00, 0xcc},
292 {0xb3, 0x22, 0x01, 0xcc},
293 {0xb3, 0x23, 0xe0, 0xcc},
294 {0xb3, 0x04, 0x05, 0xcc},
295 {0xb3, 0x14, 0x00, 0xcc},
296 {0xb3, 0x15, 0x00, 0xcc},
297 {0xb3, 0x16, 0x02, 0xcc},
298 {0xb3, 0x17, 0x7f, 0xcc},
299 {0xb3, 0x35, 0xdd, 0xcc},
300 {0xb3, 0x34, 0x02, 0xcc},
301 {0xb3, 0x00, 0x25, 0xcc},
302 {0xbc, 0x00, 0xd1, 0xcc},
303 {0xb8, 0x00, 0x13, 0xcc},
304 {0xb8, 0x27, 0x20, 0xcc},
305 {0xb8, 0x2c, 0x50, 0xcc},
306 {0xb8, 0x2d, 0xf8, 0xcc},
307 {0xb8, 0x2e, 0xf8, 0xcc},
308 {0xb8, 0x2f, 0xf8, 0xcc},
309 {0xb8, 0x30, 0x50, 0xcc},
310 {0xb8, 0x31, 0xf8, 0xcc},
311 {0xb8, 0x32, 0xf8, 0xcc},
312 {0xb8, 0x33, 0xf8, 0xcc},
313 {0xb8, 0x34, 0x50, 0xcc},
314 {0xb8, 0x35, 0x00, 0xcc},
315 {0xb8, 0x36, 0x00, 0xcc},
316 {0xb8, 0x37, 0x00, 0xcc},
317 {0xb8, 0x01, 0x79, 0xcc},
318 {0xb8, 0x08, 0xe0, 0xcc},
319 {0xb3, 0x01, 0x41, 0xcc},
320 {0xb8, 0x01, 0x79, 0xcc},
321 {0xb8, 0x14, 0x18, 0xcc},
322 {0xb8, 0xb2, 0x0a, 0xcc},
323 {0xb8, 0xb4, 0x0a, 0xcc},
324 {0xb8, 0xb5, 0x0a, 0xcc},
325 {0xb8, 0xfe, 0x00, 0xcc},
326 {0xb8, 0xff, 0x28, 0xcc},
327 {0xb9, 0x00, 0x28, 0xcc},
328 {0xb9, 0x01, 0x28, 0xcc},
329 {0xb9, 0x02, 0x28, 0xcc},
330 {0xb9, 0x03, 0x00, 0xcc},
331 {0xb9, 0x04, 0x00, 0xcc},
332 {0xb9, 0x05, 0x3c, 0xcc},
333 {0xb9, 0x06, 0x3c, 0xcc},
334 {0xb9, 0x07, 0x3c, 0xcc},
335 {0xb9, 0x08, 0x3c, 0xcc},
336 {0xb8, 0x8e, 0x00, 0xcc},
337 {0xb8, 0x8f, 0xff, 0xcc},
338 {0xb8, 0x81, 0x09, 0xcc},
339 {0x31, 0x00, 0x00, 0xbb},
340 {0x09, 0x01, 0xc7, 0xbb},
341 {0x34, 0x01, 0x00, 0xbb},
342 {0x2b, 0x00, 0x28, 0xbb},
343 {0x2c, 0x00, 0x30, 0xbb},
344 {0x2d, 0x00, 0x30, 0xbb},
345 {0x2e, 0x00, 0x28, 0xbb},
346 {0x62, 0x04, 0x11, 0xbb},
347 {0x03, 0x01, 0xe0, 0xbb},
348 {0x2c, 0x00, 0x2c, 0xbb},
349 {0x20, 0xd0, 0x00, 0xbb},
350 {0x01, 0x00, 0x08, 0xbb},
351 {0x06, 0x00, 0x10, 0xbb},
352 {0x05, 0x00, 0x20, 0xbb},
353 {0x20, 0x00, 0x00, 0xbb},
354 {0xb6, 0x00, 0x00, 0xcc},
355 {0xb6, 0x03, 0x01, 0xcc},
356 {0xb6, 0x02, 0x40, 0xcc},
357 {0xb6, 0x05, 0x00, 0xcc},
358 {0xb6, 0x04, 0xf0, 0xcc},
359 {0xb6, 0x12, 0x78, 0xcc},
360 {0xb6, 0x18, 0x00, 0xcc},
361 {0xb6, 0x17, 0x96, 0xcc},
362 {0xb6, 0x16, 0x00, 0xcc},
363 {0xb6, 0x22, 0x12, 0xcc},
364 {0xb6, 0x23, 0x0b, 0xcc},
365 {0xb3, 0x02, 0x02, 0xcc},
366 {0xbf, 0xc0, 0x39, 0xcc},
367 {0xbf, 0xc1, 0x04, 0xcc},
368 {0xbf, 0xcc, 0x10, 0xcc},
369 {0xb9, 0x12, 0x00, 0xcc},
370 {0xb9, 0x13, 0x0a, 0xcc},
371 {0xb9, 0x14, 0x0a, 0xcc},
372 {0xb9, 0x15, 0x0a, 0xcc},
373 {0xb9, 0x16, 0x0a, 0xcc},
374 {0xb9, 0x18, 0x00, 0xcc},
375 {0xb9, 0x19, 0x0f, 0xcc},
376 {0xb9, 0x1a, 0x0f, 0xcc},
377 {0xb9, 0x1b, 0x0f, 0xcc},
378 {0xb9, 0x1c, 0x0f, 0xcc},
379 {0xb8, 0x8e, 0x00, 0xcc},
380 {0xb8, 0x8f, 0xff, 0xcc},
381 {0xb6, 0x12, 0xf8, 0xcc},
382 {0xb6, 0x13, 0x13, 0xcc},
383 {0xbc, 0x02, 0x18, 0xcc},
384 {0xbc, 0x03, 0x50, 0xcc},
385 {0xbc, 0x04, 0x18, 0xcc},
386 {0xbc, 0x05, 0x00, 0xcc},
387 {0xbc, 0x06, 0x00, 0xcc},
388 {0xbc, 0x08, 0x30, 0xcc},
389 {0xbc, 0x09, 0x40, 0xcc},
390 {0xbc, 0x0a, 0x10, 0xcc},
391 {0xb8, 0x0c, 0x20, 0xcc},
392 {0xb8, 0x0d, 0x70, 0xcc},
393 {0xbc, 0x0b, 0x00, 0xcc},
394 {0xbc, 0x0c, 0x00, 0xcc},
395 {0x35, 0x00, 0xef, 0xbb},
396 {0xb3, 0x5c, 0x01, 0xcc},
397 {}
398};
399
114static const __u8 mi1310_socinitVGA_JPG[][4] = { 400static const __u8 mi1310_socinitVGA_JPG[][4] = {
115 {0xb0, 0x03, 0x19, 0xcc}, 401 {0xb0, 0x03, 0x19, 0xcc},
116 {0xb0, 0x04, 0x02, 0xcc}, 402 {0xb0, 0x04, 0x02, 0xcc},
@@ -823,7 +1109,7 @@ static const __u8 ov7660_initVGA_data[][4] = {
823 {0x00, 0x01, 0x80, 0xaa}, {0x00, 0x02, 0x80, 0xaa}, 1109 {0x00, 0x01, 0x80, 0xaa}, {0x00, 0x02, 0x80, 0xaa},
824 {0x00, 0x12, 0x80, 0xaa}, 1110 {0x00, 0x12, 0x80, 0xaa},
825 {0x00, 0x12, 0x05, 0xaa}, 1111 {0x00, 0x12, 0x05, 0xaa},
826 {0x00, 0x1e, 0x01, 0xaa}, 1112 {0x00, 0x1e, 0x01, 0xaa}, /* MVFP */
827 {0x00, 0x3d, 0x40, 0xaa}, /* 0x3d <-40 gamma 01 */ 1113 {0x00, 0x3d, 0x40, 0xaa}, /* 0x3d <-40 gamma 01 */
828 {0x00, 0x41, 0x00, 0xaa}, /* edge 00 */ 1114 {0x00, 0x41, 0x00, 0xaa}, /* edge 00 */
829 {0x00, 0x0d, 0x48, 0xaa}, {0x00, 0x0e, 0x04, 0xaa}, 1115 {0x00, 0x0d, 0x48, 0xaa}, {0x00, 0x0e, 0x04, 0xaa},
@@ -877,7 +1163,7 @@ static const __u8 ov7660_initQVGA_data[][4] = {
877 {0xb8, 0x27, 0x20, 0xcc}, {0xb8, 0x8f, 0x50, 0xcc}, 1163 {0xb8, 0x27, 0x20, 0xcc}, {0xb8, 0x8f, 0x50, 0xcc},
878 {0x00, 0x01, 0x80, 0xaa}, {0x00, 0x02, 0x80, 0xaa}, 1164 {0x00, 0x01, 0x80, 0xaa}, {0x00, 0x02, 0x80, 0xaa},
879 {0x00, 0x12, 0x80, 0xaa}, {0x00, 0x12, 0x05, 0xaa}, 1165 {0x00, 0x12, 0x80, 0xaa}, {0x00, 0x12, 0x05, 0xaa},
880 {0x00, 0x1e, 0x01, 0xaa}, 1166 {0x00, 0x1e, 0x01, 0xaa}, /* MVFP */
881 {0x00, 0x3d, 0x40, 0xaa}, /* 0x3d <-40 gamma 01 */ 1167 {0x00, 0x3d, 0x40, 0xaa}, /* 0x3d <-40 gamma 01 */
882 {0x00, 0x41, 0x00, 0xaa}, /* edge 00 */ 1168 {0x00, 0x41, 0x00, 0xaa}, /* edge 00 */
883 {0x00, 0x0d, 0x48, 0xaa}, {0x00, 0x0e, 0x04, 0xaa}, 1169 {0x00, 0x0d, 0x48, 0xaa}, {0x00, 0x0e, 0x04, 0xaa},
@@ -983,7 +1269,8 @@ static const __u8 ov7670_initVGA_JPG[][4] = {
983 {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa}, 1269 {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa},
984 {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa}, 1270 {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa},
985 {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa}, 1271 {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa},
986 {0x00, 0x1e, 0x07, 0xaa}, {0x00, 0x21, 0x02, 0xaa}, 1272 {0x00, 0x1e, 0x07, 0xaa}, /* MVFP */
1273 {0x00, 0x21, 0x02, 0xaa},
987 {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa}, 1274 {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa},
988 {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa}, 1275 {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa},
989 {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa}, 1276 {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa},
@@ -1048,7 +1335,8 @@ static const __u8 ov7670_initVGA_JPG[][4] = {
1048 {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa}, 1335 {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa},
1049 {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa}, 1336 {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa},
1050 {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa}, 1337 {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
1051 {0x00, 0x1e, 0x37, 0xaa}, {0x00, 0xaa, 0x14, 0xaa}, 1338 {0x00, 0x1e, 0x37, 0xaa}, /* MVFP */
1339 {0x00, 0xaa, 0x14, 0xaa},
1052 {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa}, 1340 {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa},
1053 {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa}, 1341 {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa},
1054 {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa}, 1342 {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa},
@@ -1110,7 +1398,8 @@ static const __u8 ov7670_initQVGA_JPG[][4] = {
1110 {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa}, 1398 {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa},
1111 {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa}, 1399 {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa},
1112 {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa}, 1400 {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa},
1113 {0x00, 0x1e, 0x07, 0xaa}, {0x00, 0x21, 0x02, 0xaa}, 1401 {0x00, 0x1e, 0x07, 0xaa}, /* MVFP */
1402 {0x00, 0x21, 0x02, 0xaa},
1114 {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa}, 1403 {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa},
1115 {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa}, 1404 {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa},
1116 {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa}, 1405 {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa},
@@ -1175,7 +1464,8 @@ static const __u8 ov7670_initQVGA_JPG[][4] = {
1175 {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa}, 1464 {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa},
1176 {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa}, 1465 {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa},
1177 {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa}, 1466 {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
1178 {0x00, 0x1e, 0x37, 0xaa}, {0x00, 0xaa, 0x14, 0xaa}, 1467 {0x00, 0x1e, 0x37, 0xaa}, /* MVFP */
1468 {0x00, 0xaa, 0x14, 0xaa},
1179 {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa}, 1469 {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa},
1180 {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa}, 1470 {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa},
1181 {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa}, 1471 {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa},
@@ -1204,6 +1494,275 @@ static const __u8 ov7670_initQVGA_JPG[][4] = {
1204 {}, 1494 {},
1205}; 1495};
1206 1496
1497/* PO1200 - values from usbvm326.inf and ms-win trace */
1498static const __u8 po1200_gamma[17] = {
1499 0x00, 0x13, 0x38, 0x59, 0x79, 0x92, 0xa7, 0xb9, 0xc8,
1500 0xd4, 0xdf, 0xe7, 0xee, 0xf4, 0xf9, 0xfc, 0xff
1501};
1502static const __u8 po1200_matrix[9] = {
1503 0x60, 0xf9, 0xe5, 0xe7, 0x50, 0x05, 0xf3, 0xe6, 0x5e
1504};
1505static const __u8 po1200_initVGA_data[][4] = {
1506 {0xb0, 0x03, 0x19, 0xcc}, /* reset? */
1507 {0xb0, 0x03, 0x19, 0xcc},
1508/* {0x00, 0x00, 0x33, 0xdd}, */
1509 {0xb0, 0x04, 0x02, 0xcc},
1510 {0xb0, 0x02, 0x02, 0xcc},
1511 {0xb3, 0x5d, 0x00, 0xcc},
1512 {0xb3, 0x01, 0x01, 0xcc},
1513 {0xb3, 0x00, 0x64, 0xcc},
1514 {0xb3, 0x00, 0x65, 0xcc},
1515 {0xb3, 0x05, 0x01, 0xcc},
1516 {0xb3, 0x06, 0x01, 0xcc},
1517 {0xb3, 0x5c, 0x01, 0xcc},
1518 {0xb3, 0x08, 0x01, 0xcc},
1519 {0xb3, 0x09, 0x0c, 0xcc},
1520 {0xb3, 0x00, 0x67, 0xcc},
1521 {0xb3, 0x02, 0xb2, 0xcc},
1522 {0xb3, 0x03, 0x18, 0xcc},
1523 {0xb3, 0x04, 0x15, 0xcc},
1524 {0xb3, 0x20, 0x00, 0xcc},
1525 {0xb3, 0x21, 0x00, 0xcc},
1526 {0xb3, 0x22, 0x02, 0xcc},
1527 {0xb3, 0x23, 0x58, 0xcc},
1528 {0xb3, 0x14, 0x00, 0xcc},
1529 {0xb3, 0x15, 0x00, 0xcc},
1530 {0xb3, 0x16, 0x03, 0xcc},
1531 {0xb3, 0x17, 0x1f, 0xcc},
1532 {0xbc, 0x00, 0x71, 0xcc},
1533 {0xbc, 0x01, 0x01, 0xcc},
1534 {0xb0, 0x54, 0x13, 0xcc},
1535 {0xb3, 0x00, 0x67, 0xcc},
1536 {0xb3, 0x34, 0x01, 0xcc},
1537 {0xb3, 0x35, 0xdc, 0xcc},
1538 {0x00, 0x03, 0x00, 0xaa},
1539 {0x00, 0x12, 0x05, 0xaa},
1540 {0x00, 0x13, 0x02, 0xaa},
1541 {0x00, 0x1e, 0xc6, 0xaa}, /* h/v flip */
1542 {0x00, 0x21, 0x00, 0xaa},
1543 {0x00, 0x25, 0x02, 0xaa},
1544 {0x00, 0x3c, 0x4f, 0xaa},
1545 {0x00, 0x3f, 0xe0, 0xaa},
1546 {0x00, 0x42, 0xff, 0xaa},
1547 {0x00, 0x45, 0x34, 0xaa},
1548 {0x00, 0x55, 0xfe, 0xaa},
1549 {0x00, 0x59, 0xd3, 0xaa},
1550 {0x00, 0x5e, 0x04, 0xaa},
1551 {0x00, 0x61, 0xb8, 0xaa}, /* sharpness */
1552 {0x00, 0x62, 0x02, 0xaa},
1553 {0x00, 0xa7, 0x31, 0xaa},
1554 {0x00, 0xa9, 0x66, 0xaa},
1555 {0x00, 0xb0, 0x00, 0xaa},
1556 {0x00, 0xb1, 0x00, 0xaa},
1557 {0x00, 0xb3, 0x11, 0xaa},
1558 {0x00, 0xb6, 0x26, 0xaa},
1559 {0x00, 0xb7, 0x20, 0xaa},
1560 {0x00, 0xba, 0x04, 0xaa},
1561 {0x00, 0x88, 0x42, 0xaa},
1562 {0x00, 0x89, 0x9a, 0xaa},
1563 {0x00, 0x8a, 0x88, 0xaa},
1564 {0x00, 0x8b, 0x8e, 0xaa},
1565 {0x00, 0x8c, 0x3e, 0xaa},
1566 {0x00, 0x8d, 0x90, 0xaa},
1567 {0x00, 0x8e, 0x87, 0xaa},
1568 {0x00, 0x8f, 0x96, 0xaa},
1569 {0x00, 0x90, 0x3d, 0xaa},
1570 {0x00, 0x64, 0x00, 0xaa},
1571 {0x00, 0x65, 0x10, 0xaa},
1572 {0x00, 0x66, 0x20, 0xaa},
1573 {0x00, 0x67, 0x2b, 0xaa},
1574 {0x00, 0x68, 0x36, 0xaa},
1575 {0x00, 0x69, 0x49, 0xaa},
1576 {0x00, 0x6a, 0x5a, 0xaa},
1577 {0x00, 0x6b, 0x7f, 0xaa},
1578 {0x00, 0x6c, 0x9b, 0xaa},
1579 {0x00, 0x6d, 0xba, 0xaa},
1580 {0x00, 0x6e, 0xd4, 0xaa},
1581 {0x00, 0x6f, 0xea, 0xaa},
1582 {0x00, 0x70, 0x00, 0xaa},
1583 {0x00, 0x71, 0x10, 0xaa},
1584 {0x00, 0x72, 0x20, 0xaa},
1585 {0x00, 0x73, 0x2b, 0xaa},
1586 {0x00, 0x74, 0x36, 0xaa},
1587 {0x00, 0x75, 0x49, 0xaa},
1588 {0x00, 0x76, 0x5a, 0xaa},
1589 {0x00, 0x77, 0x7f, 0xaa},
1590 {0x00, 0x78, 0x9b, 0xaa},
1591 {0x00, 0x79, 0xba, 0xaa},
1592 {0x00, 0x7a, 0xd4, 0xaa},
1593 {0x00, 0x7b, 0xea, 0xaa},
1594 {0x00, 0x7c, 0x00, 0xaa},
1595 {0x00, 0x7d, 0x10, 0xaa},
1596 {0x00, 0x7e, 0x20, 0xaa},
1597 {0x00, 0x7f, 0x2b, 0xaa},
1598 {0x00, 0x80, 0x36, 0xaa},
1599 {0x00, 0x81, 0x49, 0xaa},
1600 {0x00, 0x82, 0x5a, 0xaa},
1601 {0x00, 0x83, 0x7f, 0xaa},
1602 {0x00, 0x84, 0x9b, 0xaa},
1603 {0x00, 0x85, 0xba, 0xaa},
1604 {0x00, 0x86, 0xd4, 0xaa},
1605 {0x00, 0x87, 0xea, 0xaa},
1606 {0x00, 0x57, 0x2a, 0xaa},
1607 {0x00, 0x03, 0x01, 0xaa},
1608 {0x00, 0x04, 0x10, 0xaa},
1609 {0x00, 0x05, 0x10, 0xaa},
1610 {0x00, 0x06, 0x10, 0xaa},
1611 {0x00, 0x07, 0x10, 0xaa},
1612 {0x00, 0x08, 0x13, 0xaa},
1613 {0x00, 0x0a, 0x00, 0xaa},
1614 {0x00, 0x0b, 0x10, 0xaa},
1615 {0x00, 0x0c, 0x20, 0xaa},
1616 {0x00, 0x0d, 0x18, 0xaa},
1617 {0x00, 0x22, 0x01, 0xaa},
1618 {0x00, 0x23, 0x60, 0xaa},
1619 {0x00, 0x25, 0x08, 0xaa},
1620 {0x00, 0x26, 0x82, 0xaa},
1621 {0x00, 0x2e, 0x0f, 0xaa},
1622 {0x00, 0x2f, 0x1e, 0xaa},
1623 {0x00, 0x30, 0x2d, 0xaa},
1624 {0x00, 0x31, 0x3c, 0xaa},
1625 {0x00, 0x32, 0x4b, 0xaa},
1626 {0x00, 0x33, 0x5a, 0xaa},
1627 {0x00, 0x34, 0x69, 0xaa},
1628 {0x00, 0x35, 0x78, 0xaa},
1629 {0x00, 0x36, 0x87, 0xaa},
1630 {0x00, 0x37, 0x96, 0xaa},
1631 {0x00, 0x38, 0xa5, 0xaa},
1632 {0x00, 0x39, 0xb4, 0xaa},
1633 {0x00, 0x3a, 0xc3, 0xaa},
1634 {0x00, 0x3b, 0xd2, 0xaa},
1635 {0x00, 0x3c, 0xe1, 0xaa},
1636 {0x00, 0x3e, 0xff, 0xaa},
1637 {0x00, 0x3f, 0xff, 0xaa},
1638 {0x00, 0x40, 0xff, 0xaa},
1639 {0x00, 0x41, 0xff, 0xaa},
1640 {0x00, 0x42, 0xff, 0xaa},
1641 {0x00, 0x43, 0xff, 0xaa},
1642 {0x00, 0x03, 0x00, 0xaa},
1643 {0x00, 0x03, 0x00, 0xaa},
1644 {0x00, 0x20, 0xc4, 0xaa},
1645 {0x00, 0x13, 0x03, 0xaa},
1646 {0x00, 0x3c, 0x50, 0xaa},
1647 {0x00, 0x61, 0x6a, 0xaa}, /* sharpness? */
1648 {0x00, 0x51, 0x5b, 0xaa},
1649 {0x00, 0x52, 0x91, 0xaa},
1650 {0x00, 0x53, 0x4c, 0xaa},
1651 {0x00, 0x54, 0x50, 0xaa},
1652 {0x00, 0x56, 0x02, 0xaa},
1653 {0xb6, 0x00, 0x00, 0xcc},
1654 {0xb6, 0x03, 0x03, 0xcc},
1655 {0xb6, 0x02, 0x20, 0xcc},
1656 {0xb6, 0x05, 0x02, 0xcc},
1657 {0xb6, 0x04, 0x58, 0xcc},
1658 {0xb6, 0x12, 0xf8, 0xcc},
1659 {0xb6, 0x13, 0x21, 0xcc},
1660 {0xb6, 0x18, 0x03, 0xcc},
1661 {0xb6, 0x17, 0xa9, 0xcc},
1662 {0xb6, 0x16, 0x80, 0xcc},
1663 {0xb6, 0x22, 0x12, 0xcc},
1664 {0xb6, 0x23, 0x0b, 0xcc},
1665 {0xbf, 0xc0, 0x39, 0xcc},
1666 {0xbf, 0xc1, 0x04, 0xcc},
1667 {0xbf, 0xcc, 0x00, 0xcc},
1668 {0xb8, 0x06, 0x20, 0xcc},
1669 {0xb8, 0x07, 0x03, 0xcc},
1670 {0xb8, 0x08, 0x58, 0xcc},
1671 {0xb8, 0x09, 0x02, 0xcc},
1672 {0xb3, 0x01, 0x41, 0xcc},
1673 {0x00, 0x03, 0x00, 0xaa},
1674 {0x00, 0xd9, 0x0f, 0xaa},
1675 {0x00, 0xda, 0xaa, 0xaa},
1676 {0x00, 0xd9, 0x10, 0xaa},
1677 {0x00, 0xda, 0xaa, 0xaa},
1678 {0x00, 0xd9, 0x11, 0xaa},
1679 {0x00, 0xda, 0x00, 0xaa},
1680 {0x00, 0xd9, 0x12, 0xaa},
1681 {0x00, 0xda, 0xff, 0xaa},
1682 {0x00, 0xd9, 0x13, 0xaa},
1683 {0x00, 0xda, 0xff, 0xaa},
1684 {0x00, 0xe8, 0x11, 0xaa},
1685 {0x00, 0xe9, 0x12, 0xaa},
1686 {0x00, 0xea, 0x5c, 0xaa},
1687 {0x00, 0xeb, 0xff, 0xaa},
1688 {0x00, 0xd8, 0x80, 0xaa},
1689 {0x00, 0xe6, 0x02, 0xaa},
1690 {0x00, 0xd6, 0x40, 0xaa},
1691 {0x00, 0xe3, 0x05, 0xaa},
1692 {0x00, 0xe0, 0x40, 0xaa},
1693 {0x00, 0xde, 0x03, 0xaa},
1694 {0x00, 0xdf, 0x03, 0xaa},
1695 {0x00, 0xdb, 0x02, 0xaa},
1696 {0x00, 0xdc, 0x00, 0xaa},
1697 {0x00, 0xdd, 0x03, 0xaa},
1698 {0x00, 0xe1, 0x08, 0xaa},
1699 {0x00, 0xe2, 0x01, 0xaa},
1700 {0x00, 0xd6, 0x40, 0xaa},
1701 {0x00, 0xe4, 0x40, 0xaa},
1702 {0x00, 0xa8, 0x8f, 0xaa},
1703 {0x00, 0xb4, 0x16, 0xaa},
1704 {0xb0, 0x02, 0x06, 0xcc},
1705 {0xb0, 0x18, 0x06, 0xcc},
1706 {0xb0, 0x19, 0x06, 0xcc},
1707 {0xb3, 0x5d, 0x18, 0xcc},
1708 {0xb3, 0x05, 0x00, 0xcc},
1709 {0xb3, 0x06, 0x00, 0xcc},
1710 {0x00, 0xb4, 0x0e, 0xaa},
1711 {0x00, 0xb5, 0x49, 0xaa},
1712 {0x00, 0xb6, 0x1c, 0xaa},
1713 {0x00, 0xb7, 0x96, 0xaa},
1714/* end of usbvm326.inf - start of ms-win trace */
1715 {0xb6, 0x12, 0xf8, 0xcc},
1716 {0xb6, 0x13, 0x3d, 0xcc},
1717/*read b306*/
1718 {0x00, 0x03, 0x00, 0xaa},
1719 {0x00, 0x1a, 0x09, 0xaa},
1720 {0x00, 0x1b, 0x8a, 0xaa},
1721/*read b827*/
1722 {0xb8, 0x27, 0x00, 0xcc},
1723 {0xb8, 0x26, 0x60, 0xcc},
1724 {0xb8, 0x26, 0x60, 0xcc},
1725/*gamma - to do?*/
1726 {0x00, 0x03, 0x00, 0xaa},
1727 {0x00, 0xae, 0x84, 0xaa},
1728/*gamma again*/
1729 {0x00, 0x03, 0x00, 0xaa},
1730 {0x00, 0x96, 0xa0, 0xaa},
1731/*matrix*/
1732 {0x00, 0x03, 0x00, 0xaa},
1733 {0x00, 0x91, 0x35, 0xaa},
1734 {0x00, 0x92, 0x22, 0xaa},
1735/*gamma*/
1736 {0x00, 0x03, 0x00, 0xaa},
1737 {0x00, 0x95, 0x85, 0xaa},
1738/*matrix*/
1739 {0x00, 0x03, 0x00, 0xaa},
1740 {0x00, 0x4d, 0x20, 0xaa},
1741 {0xb8, 0x22, 0x40, 0xcc},
1742 {0xb8, 0x23, 0x40, 0xcc},
1743 {0xb8, 0x24, 0x40, 0xcc},
1744 {0xb8, 0x81, 0x09, 0xcc},
1745 {0x00, 0x00, 0x64, 0xdd},
1746 {0x00, 0x03, 0x01, 0xaa},
1747/*read 46*/
1748 {0x00, 0x46, 0x3c, 0xaa},
1749 {0x00, 0x03, 0x00, 0xaa},
1750 {0x00, 0x16, 0x40, 0xaa},
1751 {0x00, 0x17, 0x40, 0xaa},
1752 {0x00, 0x18, 0x40, 0xaa},
1753 {0x00, 0x19, 0x41, 0xaa},
1754 {0x00, 0x03, 0x01, 0xaa},
1755 {0x00, 0x46, 0x3c, 0xaa},
1756 {0x00, 0x00, 0x18, 0xdd},
1757/*read bfff*/
1758 {0x00, 0x03, 0x00, 0xaa},
1759 {0x00, 0xb4, 0x1c, 0xaa},
1760 {0x00, 0xb5, 0x92, 0xaa},
1761 {0x00, 0xb6, 0x39, 0xaa},
1762 {0x00, 0xb7, 0x24, 0xaa},
1763/*write 89 0400 1415*/
1764};
1765
1207struct sensor_info { 1766struct sensor_info {
1208 int sensorId; 1767 int sensorId;
1209 __u8 I2cAdd; 1768 __u8 I2cAdd;
@@ -1222,6 +1781,9 @@ static const struct sensor_info sensor_info_data[] = {
1222 {SENSOR_MI1320, 0x80 | 0xc8, 0x00, 0x148c, 0x64, 0x65, 0x01}, 1781 {SENSOR_MI1320, 0x80 | 0xc8, 0x00, 0x148c, 0x64, 0x65, 0x01},
1223 {SENSOR_OV7670, 0x80 | 0x21, 0x0a, 0x7673, 0x66, 0x67, 0x05}, 1782 {SENSOR_OV7670, 0x80 | 0x21, 0x0a, 0x7673, 0x66, 0x67, 0x05},
1224 {SENSOR_MI1310_SOC, 0x80 | 0x5d, 0x00, 0x143a, 0x24, 0x25, 0x01}, 1783 {SENSOR_MI1310_SOC, 0x80 | 0x5d, 0x00, 0x143a, 0x24, 0x25, 0x01},
1784/* (tested in vc032x_probe_sensor) */
1785/* {SENSOR_MI0360, 0x80 | 0x5d, 0x00, 0x8243, 0x24, 0x25, 0x01}, */
1786 {SENSOR_PO1200, 0x80 | 0x5c, 0x00, 0x1200, 0x67, 0x67, 0x01},
1225}; 1787};
1226 1788
1227/* read 'len' bytes in gspca_dev->usb_buf */ 1789/* read 'len' bytes in gspca_dev->usb_buf */
@@ -1278,18 +1840,18 @@ static void read_sensor_register(struct gspca_dev *gspca_dev,
1278 msleep(1); 1840 msleep(1);
1279 } 1841 }
1280 reg_r(gspca_dev, 0xa1, 0xb33e, 1); 1842 reg_r(gspca_dev, 0xa1, 0xb33e, 1);
1281 hdata = gspca_dev->usb_buf[0]; 1843 ldata = gspca_dev->usb_buf[0];
1282 reg_r(gspca_dev, 0xa1, 0xb33d, 1); 1844 reg_r(gspca_dev, 0xa1, 0xb33d, 1);
1283 mdata = gspca_dev->usb_buf[0]; 1845 mdata = gspca_dev->usb_buf[0];
1284 reg_r(gspca_dev, 0xa1, 0xb33c, 1); 1846 reg_r(gspca_dev, 0xa1, 0xb33c, 1);
1285 ldata = gspca_dev->usb_buf[0]; 1847 hdata = gspca_dev->usb_buf[0];
1286 PDEBUG(D_PROBE, "Read Sensor h (0x%02X) m (0x%02X) l (0x%02X)", 1848 PDEBUG(D_PROBE, "Read Sensor %02x%02x %02x",
1287 hdata, mdata, ldata); 1849 hdata, mdata, ldata);
1288 reg_r(gspca_dev, 0xa1, 0xb334, 1); 1850 reg_r(gspca_dev, 0xa1, 0xb334, 1);
1289 if (gspca_dev->usb_buf[0] == 0x02) 1851 if (gspca_dev->usb_buf[0] == 0x02)
1290 *value = (ldata << 8) + mdata; 1852 *value = (hdata << 8) + mdata;
1291 else 1853 else
1292 *value = ldata; 1854 *value = hdata;
1293} 1855}
1294 1856
1295static int vc032x_probe_sensor(struct gspca_dev *gspca_dev) 1857static int vc032x_probe_sensor(struct gspca_dev *gspca_dev)
@@ -1300,7 +1862,7 @@ static int vc032x_probe_sensor(struct gspca_dev *gspca_dev)
1300 const struct sensor_info *ptsensor_info; 1862 const struct sensor_info *ptsensor_info;
1301 1863
1302 reg_r(gspca_dev, 0xa1, 0xbfcf, 1); 1864 reg_r(gspca_dev, 0xa1, 0xbfcf, 1);
1303 PDEBUG(D_PROBE, "check sensor header %d", gspca_dev->usb_buf[0]); 1865 PDEBUG(D_PROBE, "check sensor header %02x", gspca_dev->usb_buf[0]);
1304 for (i = 0; i < ARRAY_SIZE(sensor_info_data); i++) { 1866 for (i = 0; i < ARRAY_SIZE(sensor_info_data); i++) {
1305 ptsensor_info = &sensor_info_data[i]; 1867 ptsensor_info = &sensor_info_data[i];
1306 reg_w(dev, 0xa0, 0x02, 0xb334); 1868 reg_w(dev, 0xa0, 0x02, 0xb334);
@@ -1309,16 +1871,15 @@ static int vc032x_probe_sensor(struct gspca_dev *gspca_dev)
1309 reg_w(dev, 0xa0, 0x01, 0xb308); 1871 reg_w(dev, 0xa0, 0x01, 0xb308);
1310 reg_w(dev, 0xa0, 0x0c, 0xb309); 1872 reg_w(dev, 0xa0, 0x0c, 0xb309);
1311 reg_w(dev, 0xa0, ptsensor_info->I2cAdd, 0xb335); 1873 reg_w(dev, 0xa0, ptsensor_info->I2cAdd, 0xb335);
1312/* PDEBUG(D_PROBE,
1313 "check sensor VC032X -> %d Add -> ox%02X!",
1314 i, ptsensor_info->I2cAdd); */
1315 reg_w(dev, 0xa0, ptsensor_info->op, 0xb301); 1874 reg_w(dev, 0xa0, ptsensor_info->op, 0xb301);
1316 read_sensor_register(gspca_dev, ptsensor_info->IdAdd, &value); 1875 read_sensor_register(gspca_dev, ptsensor_info->IdAdd, &value);
1317 if (value == ptsensor_info->VpId) { 1876 if (value == ptsensor_info->VpId)
1318/* PDEBUG(D_PROBE, "find sensor VC032X -> ox%04X!",
1319 ptsensor_info->VpId); */
1320 return ptsensor_info->sensorId; 1877 return ptsensor_info->sensorId;
1321 } 1878
1879 /* special case for MI0360 */
1880 if (ptsensor_info->sensorId == SENSOR_MI1310_SOC
1881 && value == 0x8243)
1882 return SENSOR_MI0360;
1322 } 1883 }
1323 return -1; 1884 return -1;
1324} 1885}
@@ -1420,13 +1981,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
1420 cam = &gspca_dev->cam; 1981 cam = &gspca_dev->cam;
1421 cam->epaddr = 0x02; 1982 cam->epaddr = 0x02;
1422 sd->bridge = id->driver_info; 1983 sd->bridge = id->driver_info;
1423 if (sd->bridge == BRIDGE_VC0321) {
1424 cam->cam_mode = vc0321_mode;
1425 cam->nmodes = ARRAY_SIZE(vc0321_mode);
1426 } else {
1427 cam->cam_mode = vc0323_mode;
1428 cam->nmodes = ARRAY_SIZE(vc0323_mode);
1429 }
1430 1984
1431 vc0321_reset(gspca_dev); 1985 vc0321_reset(gspca_dev);
1432 sensor = vc032x_probe_sensor(gspca_dev); 1986 sensor = vc032x_probe_sensor(gspca_dev);
@@ -1436,35 +1990,66 @@ static int sd_config(struct gspca_dev *gspca_dev,
1436 return -EINVAL; 1990 return -EINVAL;
1437 case SENSOR_HV7131R: 1991 case SENSOR_HV7131R:
1438 PDEBUG(D_PROBE, "Find Sensor HV7131R"); 1992 PDEBUG(D_PROBE, "Find Sensor HV7131R");
1439 sd->sensor = SENSOR_HV7131R; 1993 break;
1994 case SENSOR_MI0360:
1995 PDEBUG(D_PROBE, "Find Sensor MI0360");
1996 sd->bridge = BRIDGE_VC0323;
1440 break; 1997 break;
1441 case SENSOR_MI1310_SOC: 1998 case SENSOR_MI1310_SOC:
1442 PDEBUG(D_PROBE, "Find Sensor MI1310_SOC"); 1999 PDEBUG(D_PROBE, "Find Sensor MI1310_SOC");
1443 sd->sensor = SENSOR_MI1310_SOC;
1444 break; 2000 break;
1445 case SENSOR_MI1320: 2001 case SENSOR_MI1320:
1446 PDEBUG(D_PROBE, "Find Sensor MI1320"); 2002 PDEBUG(D_PROBE, "Find Sensor MI1320");
1447 sd->sensor = SENSOR_MI1320;
1448 break; 2003 break;
1449 case SENSOR_OV7660: 2004 case SENSOR_OV7660:
1450 PDEBUG(D_PROBE, "Find Sensor OV7660"); 2005 PDEBUG(D_PROBE, "Find Sensor OV7660");
1451 sd->sensor = SENSOR_OV7660;
1452 break; 2006 break;
1453 case SENSOR_OV7670: 2007 case SENSOR_OV7670:
1454 PDEBUG(D_PROBE, "Find Sensor OV7670"); 2008 PDEBUG(D_PROBE, "Find Sensor OV7670");
1455 sd->sensor = SENSOR_OV7670; 2009 break;
2010 case SENSOR_PO1200:
2011 PDEBUG(D_PROBE, "Find Sensor PO1200");
1456 break; 2012 break;
1457 case SENSOR_PO3130NC: 2013 case SENSOR_PO3130NC:
1458 PDEBUG(D_PROBE, "Find Sensor PO3130NC"); 2014 PDEBUG(D_PROBE, "Find Sensor PO3130NC");
1459 sd->sensor = SENSOR_PO3130NC;
1460 break; 2015 break;
1461 } 2016 }
2017 sd->sensor = sensor;
1462 2018
1463 sd->qindex = 7; 2019 if (sd->bridge == BRIDGE_VC0321) {
1464 sd->autogain = AUTOGAIN_DEF; 2020 cam->cam_mode = vc0321_mode;
2021 cam->nmodes = ARRAY_SIZE(vc0321_mode);
2022 } else {
2023 if (sensor != SENSOR_PO1200) {
2024 cam->cam_mode = vc0323_mode;
2025 cam->nmodes = ARRAY_SIZE(vc0323_mode);
2026 } else {
2027 cam->cam_mode = svga_mode;
2028 cam->nmodes = ARRAY_SIZE(svga_mode);
2029 }
2030 }
2031
2032 sd->hflip = HFLIP_DEF;
2033 sd->vflip = VFLIP_DEF;
2034 if (sd->sensor == SENSOR_OV7670) {
2035 sd->hflip = 1;
2036 sd->vflip = 1;
2037 }
1465 sd->lightfreq = FREQ_DEF; 2038 sd->lightfreq = FREQ_DEF;
1466 if (sd->sensor != SENSOR_OV7670) 2039 if (sd->sensor != SENSOR_OV7670)
1467 gspca_dev->ctrl_dis = (1 << LIGHTFREQ_IDX); 2040 gspca_dev->ctrl_dis = (1 << LIGHTFREQ_IDX);
2041 switch (sd->sensor) {
2042 case SENSOR_OV7660:
2043 case SENSOR_OV7670:
2044 case SENSOR_PO1200:
2045 break;
2046 default:
2047 gspca_dev->ctrl_dis = (1 << HFLIP_IDX)
2048 | (1 << VFLIP_IDX);
2049 break;
2050 }
2051
2052 sd->sharpness = SHARPNESS_DEF;
1468 2053
1469 if (sd->bridge == BRIDGE_VC0321) { 2054 if (sd->bridge == BRIDGE_VC0321) {
1470 reg_r(gspca_dev, 0x8a, 0, 3); 2055 reg_r(gspca_dev, 0x8a, 0, 3);
@@ -1482,12 +2067,33 @@ static int sd_init(struct gspca_dev *gspca_dev)
1482 return 0; 2067 return 0;
1483} 2068}
1484 2069
1485static void setquality(struct gspca_dev *gspca_dev) 2070/* for OV7660 and OV7670 only */
2071static void sethvflip(struct gspca_dev *gspca_dev)
1486{ 2072{
1487} 2073 struct sd *sd = (struct sd *) gspca_dev;
2074 __u8 data;
1488 2075
1489static void setautogain(struct gspca_dev *gspca_dev) 2076 switch (sd->sensor) {
1490{ 2077 case SENSOR_OV7660:
2078 data = 1;
2079 break;
2080 case SENSOR_OV7670:
2081 data = 7;
2082 break;
2083 case SENSOR_PO1200:
2084 data = 0;
2085 i2c_write(gspca_dev, 0x03, &data, 1);
2086 data = 0x80 * sd->hflip
2087 | 0x40 * sd->vflip
2088 | 0x06;
2089 i2c_write(gspca_dev, 0x1e, &data, 1);
2090 return;
2091 default:
2092 return;
2093 }
2094 data |= OV7660_MVFP_MIRROR * sd->hflip
2095 | OV7660_MVFP_VFLIP * sd->vflip;
2096 i2c_write(gspca_dev, OV7660_REG_MVFP, &data, 1);
1491} 2097}
1492 2098
1493static void setlightfreq(struct gspca_dev *gspca_dev) 2099static void setlightfreq(struct gspca_dev *gspca_dev)
@@ -1501,6 +2107,20 @@ static void setlightfreq(struct gspca_dev *gspca_dev)
1501 usb_exchange(gspca_dev, ov7660_freq_tb[sd->lightfreq]); 2107 usb_exchange(gspca_dev, ov7660_freq_tb[sd->lightfreq]);
1502} 2108}
1503 2109
2110/* po1200 only */
2111static void setsharpness(struct gspca_dev *gspca_dev)
2112{
2113 struct sd *sd = (struct sd *) gspca_dev;
2114 __u8 data;
2115
2116 if (sd->sensor != SENSOR_PO1200)
2117 return;
2118 data = 0;
2119 i2c_write(gspca_dev, 0x03, &data, 1);
2120 data = 0xb5 + sd->sharpness * 3;
2121 i2c_write(gspca_dev, 0x61, &data, 1);
2122}
2123
1504static int sd_start(struct gspca_dev *gspca_dev) 2124static int sd_start(struct gspca_dev *gspca_dev)
1505{ 2125{
1506 struct sd *sd = (struct sd *) gspca_dev; 2126 struct sd *sd = (struct sd *) gspca_dev;
@@ -1551,6 +2171,17 @@ static int sd_start(struct gspca_dev *gspca_dev)
1551 usb_exchange(gspca_dev, ov7670_initVGA_JPG); 2171 usb_exchange(gspca_dev, ov7670_initVGA_JPG);
1552 } 2172 }
1553 break; 2173 break;
2174 case SENSOR_MI0360:
2175 GammaT = mi1320_gamma;
2176 MatrixT = mi0360_matrix;
2177 if (mode) {
2178 /* 320x240 */
2179 usb_exchange(gspca_dev, mi0360_initQVGA_JPG);
2180 } else {
2181 /* 640x480 */
2182 usb_exchange(gspca_dev, mi0360_initVGA_JPG);
2183 }
2184 break;
1554 case SENSOR_MI1310_SOC: 2185 case SENSOR_MI1310_SOC:
1555 if (mode) { 2186 if (mode) {
1556 /* 320x240 */ 2187 /* 320x240 */
@@ -1583,6 +2214,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
1583 } 2214 }
1584 usb_exchange(gspca_dev, po3130_rundata); 2215 usb_exchange(gspca_dev, po3130_rundata);
1585 break; 2216 break;
2217 case SENSOR_PO1200:
2218 GammaT = po1200_gamma;
2219 MatrixT = po1200_matrix;
2220 usb_exchange(gspca_dev, po1200_initVGA_data);
2221 break;
1586 default: 2222 default:
1587 PDEBUG(D_PROBE, "Damned !! no sensor found Bye"); 2223 PDEBUG(D_PROBE, "Damned !! no sensor found Bye");
1588 return -EMEDIUMTYPE; 2224 return -EMEDIUMTYPE;
@@ -1615,11 +2251,16 @@ static int sd_start(struct gspca_dev *gspca_dev)
1615 reg_w(gspca_dev->dev, 0xa0, 0x23, 0xb800); * ISP CTRL_BAS 2251 reg_w(gspca_dev->dev, 0xa0, 0x23, 0xb800); * ISP CTRL_BAS
1616 */ 2252 */
1617 /* set the led on 0x0892 0x0896 */ 2253 /* set the led on 0x0892 0x0896 */
1618 reg_w(gspca_dev->dev, 0x89, 0xffff, 0xfdff); 2254 if (sd->sensor != SENSOR_PO1200) {
1619 msleep(100); 2255 reg_w(gspca_dev->dev, 0x89, 0xffff, 0xfdff);
1620 setquality(gspca_dev); 2256 msleep(100);
1621 setautogain(gspca_dev); 2257 sethvflip(gspca_dev);
1622 setlightfreq(gspca_dev); 2258 setlightfreq(gspca_dev);
2259 } else {
2260 setsharpness(gspca_dev);
2261 sethvflip(gspca_dev);
2262 reg_w(gspca_dev->dev, 0x89, 0x0400, 0x1415);
2263 }
1623 } 2264 }
1624 return 0; 2265 return 0;
1625} 2266}
@@ -1665,24 +2306,48 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1665 data, len); 2306 data, len);
1666 return; 2307 return;
1667 } 2308 }
2309
2310 /* The vc0321 sends some additional data after sending the complete
2311 * frame, we ignore this. */
2312 if (sd->bridge == BRIDGE_VC0321
2313 && len > frame->v4l2_buf.length - (frame->data_end - frame->data))
2314 len = frame->v4l2_buf.length - (frame->data_end - frame->data);
1668 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len); 2315 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
1669} 2316}
1670 2317
1671static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) 2318static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
2319{
2320 struct sd *sd = (struct sd *) gspca_dev;
2321
2322 sd->hflip = val;
2323 if (gspca_dev->streaming)
2324 sethvflip(gspca_dev);
2325 return 0;
2326}
2327
2328static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val)
2329{
2330 struct sd *sd = (struct sd *) gspca_dev;
2331
2332 *val = sd->hflip;
2333 return 0;
2334}
2335
2336static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val)
1672{ 2337{
1673 struct sd *sd = (struct sd *) gspca_dev; 2338 struct sd *sd = (struct sd *) gspca_dev;
1674 2339
1675 sd->autogain = val; 2340 sd->vflip = val;
1676 if (gspca_dev->streaming) 2341 if (gspca_dev->streaming)
1677 setautogain(gspca_dev); 2342 sethvflip(gspca_dev);
1678 return 0; 2343 return 0;
1679} 2344}
1680 2345
1681static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val) 2346static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
1682{ 2347{
1683 struct sd *sd = (struct sd *) gspca_dev; 2348 struct sd *sd = (struct sd *) gspca_dev;
1684 2349
1685 *val = sd->autogain; 2350 *val = sd->vflip;
1686 return 0; 2351 return 0;
1687} 2352}
1688 2353
@@ -1704,6 +2369,24 @@ static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
1704 return 0; 2369 return 0;
1705} 2370}
1706 2371
2372static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
2373{
2374 struct sd *sd = (struct sd *) gspca_dev;
2375
2376 sd->sharpness = val;
2377 if (gspca_dev->streaming)
2378 setsharpness(gspca_dev);
2379 return 0;
2380}
2381
2382static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
2383{
2384 struct sd *sd = (struct sd *) gspca_dev;
2385
2386 *val = sd->sharpness;
2387 return 0;
2388}
2389
1707static int sd_querymenu(struct gspca_dev *gspca_dev, 2390static int sd_querymenu(struct gspca_dev *gspca_dev,
1708 struct v4l2_querymenu *menu) 2391 struct v4l2_querymenu *menu)
1709{ 2392{
@@ -1743,11 +2426,13 @@ static const struct sd_desc sd_desc = {
1743static const __devinitdata struct usb_device_id device_table[] = { 2426static const __devinitdata struct usb_device_id device_table[] = {
1744 {USB_DEVICE(0x046d, 0x0892), .driver_info = BRIDGE_VC0321}, 2427 {USB_DEVICE(0x046d, 0x0892), .driver_info = BRIDGE_VC0321},
1745 {USB_DEVICE(0x046d, 0x0896), .driver_info = BRIDGE_VC0321}, 2428 {USB_DEVICE(0x046d, 0x0896), .driver_info = BRIDGE_VC0321},
2429 {USB_DEVICE(0x046d, 0x0897), .driver_info = BRIDGE_VC0321},
1746 {USB_DEVICE(0x0ac8, 0x0321), .driver_info = BRIDGE_VC0321}, 2430 {USB_DEVICE(0x0ac8, 0x0321), .driver_info = BRIDGE_VC0321},
1747 {USB_DEVICE(0x0ac8, 0x0323), .driver_info = BRIDGE_VC0323}, 2431 {USB_DEVICE(0x0ac8, 0x0323), .driver_info = BRIDGE_VC0323},
1748 {USB_DEVICE(0x0ac8, 0x0328), .driver_info = BRIDGE_VC0321}, 2432 {USB_DEVICE(0x0ac8, 0x0328), .driver_info = BRIDGE_VC0321},
1749 {USB_DEVICE(0x0ac8, 0xc001), .driver_info = BRIDGE_VC0321}, 2433 {USB_DEVICE(0x0ac8, 0xc001), .driver_info = BRIDGE_VC0321},
1750 {USB_DEVICE(0x0ac8, 0xc002), .driver_info = BRIDGE_VC0321}, 2434 {USB_DEVICE(0x0ac8, 0xc002), .driver_info = BRIDGE_VC0321},
2435 {USB_DEVICE(0x15b8, 0x6002), .driver_info = BRIDGE_VC0323},
1751 {USB_DEVICE(0x17ef, 0x4802), .driver_info = BRIDGE_VC0323}, 2436 {USB_DEVICE(0x17ef, 0x4802), .driver_info = BRIDGE_VC0323},
1752 {} 2437 {}
1753}; 2438};
diff --git a/drivers/media/video/gspca/zc3xx-reg.h b/drivers/media/video/gspca/zc3xx-reg.h
index f52e09c2cc19..bfb559c3b713 100644
--- a/drivers/media/video/gspca/zc3xx-reg.h
+++ b/drivers/media/video/gspca/zc3xx-reg.h
@@ -244,14 +244,6 @@
244#define ZC3XX_R1CA_SHARPNESS04 0x01ca 244#define ZC3XX_R1CA_SHARPNESS04 0x01ca
245#define ZC3XX_R1CB_SHARPNESS05 0x01cb 245#define ZC3XX_R1CB_SHARPNESS05 0x01cb
246 246
247/* Synchronization */
248#define ZC3XX_R190_SYNC00LOW 0x0190
249#define ZC3XX_R191_SYNC00MID 0x0191
250#define ZC3XX_R192_SYNC00HIGH 0x0192
251#define ZC3XX_R195_SYNC01LOW 0x0195
252#define ZC3XX_R196_SYNC01MID 0x0196
253#define ZC3XX_R197_SYNC01HIGH 0x0197
254
255/* Dead pixels */ 247/* Dead pixels */
256#define ZC3XX_R250_DEADPIXELSMODE 0x0250 248#define ZC3XX_R250_DEADPIXELSMODE 0x0250
257 249
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 0befacf49855..ec2a53d53fe2 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -51,16 +51,16 @@ struct sd {
51#define SENSOR_CS2102 0 51#define SENSOR_CS2102 0
52#define SENSOR_CS2102K 1 52#define SENSOR_CS2102K 1
53#define SENSOR_GC0305 2 53#define SENSOR_GC0305 2
54#define SENSOR_HDCS2020 3 54#define SENSOR_HDCS2020b 3
55#define SENSOR_HDCS2020b 4 55#define SENSOR_HV7131B 4
56#define SENSOR_HV7131B 5 56#define SENSOR_HV7131C 5
57#define SENSOR_HV7131C 6 57#define SENSOR_ICM105A 6
58#define SENSOR_ICM105A 7 58#define SENSOR_MC501CB 7
59#define SENSOR_MC501CB 8 59#define SENSOR_OV7620 8
60#define SENSOR_OV7620 9 60/*#define SENSOR_OV7648 8 - same values */
61/*#define SENSOR_OV7648 9 - same values */ 61#define SENSOR_OV7630C 9
62#define SENSOR_OV7630C 10 62#define SENSOR_PAS106 10
63#define SENSOR_PAS106 11 63#define SENSOR_PAS202B 11
64#define SENSOR_PB0330 12 64#define SENSOR_PB0330 12
65#define SENSOR_PO2030 13 65#define SENSOR_PO2030 13
66#define SENSOR_TAS5130CK 14 66#define SENSOR_TAS5130CK 14
@@ -173,7 +173,7 @@ static struct ctrl sd_ctrls[] = {
173 }, 173 },
174}; 174};
175 175
176static struct v4l2_pix_format vga_mode[] = { 176static const struct v4l2_pix_format vga_mode[] = {
177 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 177 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
178 .bytesperline = 320, 178 .bytesperline = 320,
179 .sizeimage = 320 * 240 * 3 / 8 + 590, 179 .sizeimage = 320 * 240 * 3 / 8 + 590,
@@ -186,7 +186,7 @@ static struct v4l2_pix_format vga_mode[] = {
186 .priv = 0}, 186 .priv = 0},
187}; 187};
188 188
189static struct v4l2_pix_format sif_mode[] = { 189static const struct v4l2_pix_format sif_mode[] = {
190 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 190 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
191 .bytesperline = 176, 191 .bytesperline = 176,
192 .sizeimage = 176 * 144 * 3 / 8 + 590, 192 .sizeimage = 176 * 144 * 3 / 8 + 590,
@@ -1653,295 +1653,6 @@ static const struct usb_action gc0305_NoFliker[] = {
1653 {} 1653 {}
1654}; 1654};
1655 1655
1656/* play poker with registers at your own risk !! */
1657static const struct usb_action hdcs2020xx_Initial[] = {
1658 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
1659 {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
1660 {0xa0, 0x0e, ZC3XX_R010_CMOSSENSORSELECT},
1661 {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT},
1662 {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
1663 {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
1664 {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
1665 {0xa0, 0xd0, ZC3XX_R006_FRAMEHEIGHTLOW},
1666 /* D0 ?? E0 did not start */
1667 {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
1668 {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC},
1669 {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC},
1670 {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE},
1671 {0xa0, 0x08, ZC3XX_R098_WINYSTARTLOW},
1672 {0xa0, 0x02, ZC3XX_R09A_WINXSTARTLOW},
1673 {0xa0, 0x08, ZC3XX_R11A_FIRSTYLOW},
1674 {0xa0, 0x02, ZC3XX_R11C_FIRSTXLOW},
1675 {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH},
1676 {0xa0, 0xd8, ZC3XX_R09C_WINHEIGHTLOW},
1677 {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH},
1678 {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW},
1679 {0xaa, 0x02, 0x0002},
1680 {0xaa, 0x07, 0x0006},
1681 {0xaa, 0x08, 0x0002},
1682 {0xaa, 0x09, 0x0006},
1683 {0xaa, 0x0a, 0x0001},
1684 {0xaa, 0x0b, 0x0001},
1685 {0xaa, 0x0c, 0x0008},
1686 {0xaa, 0x0d, 0x0000},
1687 {0xaa, 0x10, 0x0000},
1688 {0xaa, 0x12, 0x0005},
1689 {0xaa, 0x13, 0x0063},
1690 {0xaa, 0x15, 0x0070},
1691 {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
1692 {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
1693 {0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
1694 {0xa0, 0x00, 0x01ad},
1695 {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
1696 {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
1697 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
1698 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
1699 {0xa0, 0x70, ZC3XX_R18D_YTARGET},
1700 {0xa1, 0x01, 0x0002},
1701 {0xa1, 0x01, 0x0008},
1702 {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */
1703 {0xa0, 0x04, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */
1704 {0xa1, 0x01, 0x01c8},
1705 {0xa1, 0x01, 0x01c9},
1706 {0xa1, 0x01, 0x01ca},
1707 {0xa0, 0x07, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */
1708 {0xa0, 0x11, ZC3XX_R120_GAMMA00}, /* gamma ~4 */
1709 {0xa0, 0x37, ZC3XX_R121_GAMMA01},
1710 {0xa0, 0x58, ZC3XX_R122_GAMMA02},
1711 {0xa0, 0x79, ZC3XX_R123_GAMMA03},
1712 {0xa0, 0x91, ZC3XX_R124_GAMMA04},
1713 {0xa0, 0xa6, ZC3XX_R125_GAMMA05},
1714 {0xa0, 0xb8, ZC3XX_R126_GAMMA06},
1715 {0xa0, 0xc7, ZC3XX_R127_GAMMA07},
1716 {0xa0, 0xd3, ZC3XX_R128_GAMMA08},
1717 {0xa0, 0xde, ZC3XX_R129_GAMMA09},
1718 {0xa0, 0xe6, ZC3XX_R12A_GAMMA0A},
1719 {0xa0, 0xed, ZC3XX_R12B_GAMMA0B},
1720 {0xa0, 0xf3, ZC3XX_R12C_GAMMA0C},
1721 {0xa0, 0xf8, ZC3XX_R12D_GAMMA0D},
1722 {0xa0, 0xfb, ZC3XX_R12E_GAMMA0E},
1723 {0xa0, 0xff, ZC3XX_R12F_GAMMA0F},
1724 {0xa0, 0x26, ZC3XX_R130_GAMMA10},
1725 {0xa0, 0x23, ZC3XX_R131_GAMMA11},
1726 {0xa0, 0x20, ZC3XX_R132_GAMMA12},
1727 {0xa0, 0x1c, ZC3XX_R133_GAMMA13},
1728 {0xa0, 0x16, ZC3XX_R134_GAMMA14},
1729 {0xa0, 0x13, ZC3XX_R135_GAMMA15},
1730 {0xa0, 0x10, ZC3XX_R136_GAMMA16},
1731 {0xa0, 0x0d, ZC3XX_R137_GAMMA17},
1732 {0xa0, 0x0b, ZC3XX_R138_GAMMA18},
1733 {0xa0, 0x09, ZC3XX_R139_GAMMA19},
1734 {0xa0, 0x07, ZC3XX_R13A_GAMMA1A},
1735 {0xa0, 0x06, ZC3XX_R13B_GAMMA1B},
1736 {0xa0, 0x05, ZC3XX_R13C_GAMMA1C},
1737 {0xa0, 0x04, ZC3XX_R13D_GAMMA1D},
1738 {0xa0, 0x03, ZC3XX_R13E_GAMMA1E},
1739 {0xa0, 0x02, ZC3XX_R13F_GAMMA1F},
1740
1741 {0xa0, 0x4c, ZC3XX_R10A_RGB00}, /* matrix */
1742 {0xa0, 0xf5, ZC3XX_R10B_RGB01},
1743 {0xa0, 0xff, ZC3XX_R10C_RGB02},
1744 {0xa0, 0xf9, ZC3XX_R10D_RGB10},
1745 {0xa0, 0x51, ZC3XX_R10E_RGB11},
1746 {0xa0, 0xf5, ZC3XX_R10F_RGB12},
1747 {0xa0, 0xfb, ZC3XX_R110_RGB20},
1748 {0xa0, 0xed, ZC3XX_R111_RGB21},
1749 {0xa0, 0x5f, ZC3XX_R112_RGB22},
1750
1751 {0xa1, 0x01, 0x0180},
1752 {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
1753 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
1754 {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID},
1755 {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW},
1756 {0xaa, 0x20, 0x0004},
1757 {0xaa, 0x21, 0x003d},
1758 {0xaa, 0x03, 0x0041},
1759 {0xaa, 0x04, 0x0010},
1760 {0xaa, 0x05, 0x003d},
1761 {0xaa, 0x0e, 0x0001},
1762 {0xaa, 0x0f, 0x0000},
1763 {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF},
1764 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP},
1765 {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
1766 {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID},
1767 {0xa0, 0x3d, ZC3XX_R192_EXPOSURELIMITLOW},
1768 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
1769 {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID},
1770 {0xa0, 0x9b, ZC3XX_R197_ANTIFLICKERLOW},
1771 {0xa0, 0x10, ZC3XX_R18C_AEFREEZE},
1772 {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE},
1773 {0xa0, 0x41, ZC3XX_R01D_HSYNC_0},
1774 {0xa0, 0x6f, ZC3XX_R01E_HSYNC_1},
1775 {0xa0, 0xad, ZC3XX_R01F_HSYNC_2},
1776 {0xa0, 0xff, ZC3XX_R020_HSYNC_3},
1777 {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID},
1778 {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW},
1779 {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE},
1780 {0xa1, 0x01, 0x0195},
1781 {0xa1, 0x01, 0x0196},
1782 {0xa1, 0x01, 0x0197},
1783 {0xa0, 0x3d, ZC3XX_R192_EXPOSURELIMITLOW},
1784 {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID},
1785 {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
1786 {0xa0, 0x1d, ZC3XX_R116_RGAIN},
1787 {0xa0, 0x40, ZC3XX_R117_GGAIN},
1788 {0xa0, 0x85, ZC3XX_R118_BGAIN},
1789 {0xa1, 0x01, 0x0116},
1790 {0xa1, 0x01, 0x0118},
1791 {0xa1, 0x01, 0x0180},
1792 {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
1793 {0xa0, 0x1d, ZC3XX_R116_RGAIN},
1794 {0xa0, 0x40, ZC3XX_R117_GGAIN},
1795 {0xa0, 0x85, ZC3XX_R118_BGAIN},
1796 {0xa1, 0x01, 0x0116},
1797 {0xa1, 0x01, 0x0118},
1798/* {0xa0, 0x02, ZC3XX_R008_CLOCKSETTING}, */
1799 {0xa0, 0x00, 0x0007},
1800 {}
1801};
1802
1803static const struct usb_action hdcs2020xx_InitialScale[] = {
1804 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
1805 {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
1806 {0xa0, 0x0e, ZC3XX_R010_CMOSSENSORSELECT},
1807 {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT},
1808 {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
1809 {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
1810 {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
1811 {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW},
1812 {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
1813 {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC},
1814 {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC},
1815 {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE},
1816 {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW},
1817 {0xa0, 0x03, ZC3XX_R09A_WINXSTARTLOW},
1818 {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW},
1819 {0xa0, 0x03, ZC3XX_R11C_FIRSTXLOW},
1820 {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH},
1821 {0xa0, 0xe6, ZC3XX_R09C_WINHEIGHTLOW},
1822 {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH},
1823 {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW},
1824 {0xaa, 0x02, 0x0002},
1825 {0xaa, 0x07, 0x0006},
1826 {0xaa, 0x08, 0x0002},
1827 {0xaa, 0x09, 0x0006},
1828 {0xaa, 0x0a, 0x0001},
1829 {0xaa, 0x0b, 0x0001},
1830 {0xaa, 0x0c, 0x0008},
1831 {0xaa, 0x0d, 0x0000},
1832 {0xaa, 0x10, 0x0000},
1833 {0xaa, 0x12, 0x0005},
1834 {0xaa, 0x13, 0x0063},
1835 {0xaa, 0x15, 0x0070},
1836 {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION},
1837 {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
1838 {0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
1839 {0xa0, 0x00, 0x01ad},
1840 {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
1841 {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
1842 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
1843 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
1844 {0xa0, 0x70, ZC3XX_R18D_YTARGET},
1845 {0xa1, 0x01, 0x0002},
1846 {0xa1, 0x01, 0x0008},
1847 {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */
1848 {0xa0, 0x04, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */
1849 {0xa1, 0x01, 0x01c8},
1850 {0xa1, 0x01, 0x01c9},
1851 {0xa1, 0x01, 0x01ca},
1852 {0xa0, 0x07, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */
1853 {0xa0, 0x11, ZC3XX_R120_GAMMA00}, /* gamma ~4*/
1854 {0xa0, 0x37, ZC3XX_R121_GAMMA01},
1855 {0xa0, 0x58, ZC3XX_R122_GAMMA02},
1856 {0xa0, 0x79, ZC3XX_R123_GAMMA03},
1857 {0xa0, 0x91, ZC3XX_R124_GAMMA04},
1858 {0xa0, 0xa6, ZC3XX_R125_GAMMA05},
1859 {0xa0, 0xb8, ZC3XX_R126_GAMMA06},
1860 {0xa0, 0xc7, ZC3XX_R127_GAMMA07},
1861 {0xa0, 0xd3, ZC3XX_R128_GAMMA08},
1862 {0xa0, 0xde, ZC3XX_R129_GAMMA09},
1863 {0xa0, 0xe6, ZC3XX_R12A_GAMMA0A},
1864 {0xa0, 0xed, ZC3XX_R12B_GAMMA0B},
1865 {0xa0, 0xf3, ZC3XX_R12C_GAMMA0C},
1866 {0xa0, 0xf8, ZC3XX_R12D_GAMMA0D},
1867 {0xa0, 0xfb, ZC3XX_R12E_GAMMA0E},
1868 {0xa0, 0xff, ZC3XX_R12F_GAMMA0F},
1869 {0xa0, 0x26, ZC3XX_R130_GAMMA10},
1870 {0xa0, 0x23, ZC3XX_R131_GAMMA11},
1871 {0xa0, 0x20, ZC3XX_R132_GAMMA12},
1872 {0xa0, 0x1c, ZC3XX_R133_GAMMA13},
1873 {0xa0, 0x16, ZC3XX_R134_GAMMA14},
1874 {0xa0, 0x13, ZC3XX_R135_GAMMA15},
1875 {0xa0, 0x10, ZC3XX_R136_GAMMA16},
1876 {0xa0, 0x0d, ZC3XX_R137_GAMMA17},
1877 {0xa0, 0x0b, ZC3XX_R138_GAMMA18},
1878 {0xa0, 0x09, ZC3XX_R139_GAMMA19},
1879 {0xa0, 0x07, ZC3XX_R13A_GAMMA1A},
1880 {0xa0, 0x06, ZC3XX_R13B_GAMMA1B},
1881 {0xa0, 0x05, ZC3XX_R13C_GAMMA1C},
1882 {0xa0, 0x04, ZC3XX_R13D_GAMMA1D},
1883 {0xa0, 0x03, ZC3XX_R13E_GAMMA1E},
1884 {0xa0, 0x02, ZC3XX_R13F_GAMMA1F},
1885 {0xa0, 0x60, ZC3XX_R10A_RGB00}, /* matrix */
1886 {0xa0, 0xff, ZC3XX_R10B_RGB01},
1887 {0xa0, 0xff, ZC3XX_R10C_RGB02},
1888 {0xa0, 0xff, ZC3XX_R10D_RGB10},
1889 {0xa0, 0x60, ZC3XX_R10E_RGB11},
1890 {0xa0, 0xff, ZC3XX_R10F_RGB12},
1891 {0xa0, 0xff, ZC3XX_R110_RGB20},
1892 {0xa0, 0xff, ZC3XX_R111_RGB21},
1893 {0xa0, 0x60, ZC3XX_R112_RGB22},
1894
1895 {0xa1, 0x01, 0x0180},
1896 {0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
1897 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
1898 {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID},
1899 {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW},
1900 {0xaa, 0x20, 0x0002},
1901 {0xaa, 0x21, 0x001b},
1902 {0xaa, 0x03, 0x0044},
1903 {0xaa, 0x04, 0x0008},
1904 {0xaa, 0x05, 0x001b},
1905 {0xaa, 0x0e, 0x0001},
1906 {0xaa, 0x0f, 0x0000},
1907 {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF},
1908 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP},
1909 {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
1910 {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID},
1911 {0xa0, 0x1b, ZC3XX_R192_EXPOSURELIMITLOW},
1912 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH},
1913 {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID},
1914 {0xa0, 0x4d, ZC3XX_R197_ANTIFLICKERLOW},
1915 {0xa0, 0x10, ZC3XX_R18C_AEFREEZE},
1916 {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE},
1917 {0xa0, 0x44, ZC3XX_R01D_HSYNC_0},
1918 {0xa0, 0x6f, ZC3XX_R01E_HSYNC_1},
1919 {0xa0, 0xad, ZC3XX_R01F_HSYNC_2},
1920 {0xa0, 0xeb, ZC3XX_R020_HSYNC_3},
1921 {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID},
1922 {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW},
1923 {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE},
1924 {0xa1, 0x01, 0x0195},
1925 {0xa1, 0x01, 0x0196},
1926 {0xa1, 0x01, 0x0197},
1927 {0xa0, 0x1b, ZC3XX_R192_EXPOSURELIMITLOW},
1928 {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID},
1929 {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
1930 {0xa0, 0x1d, ZC3XX_R116_RGAIN},
1931 {0xa0, 0x40, ZC3XX_R117_GGAIN},
1932 {0xa0, 0x99, ZC3XX_R118_BGAIN},
1933 {0xa1, 0x01, 0x0116},
1934 {0xa1, 0x01, 0x0118},
1935 {0xa1, 0x01, 0x0180},
1936 {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
1937 {0xa0, 0x1d, ZC3XX_R116_RGAIN},
1938 {0xa0, 0x40, ZC3XX_R117_GGAIN},
1939 {0xa0, 0x99, ZC3XX_R118_BGAIN},
1940/* {0xa0, 0x02, ZC3XX_R008_CLOCKSETTING}, */
1941 {0xa0, 0x00, 0x0007},
1942/* {0xa0, 0x18, 0x00fe}, */
1943 {}
1944};
1945static const struct usb_action hdcs2020xb_Initial[] = { 1656static const struct usb_action hdcs2020xb_Initial[] = {
1946 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, 1657 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
1947 {0xa0, 0x11, ZC3XX_R002_CLOCKSELECT}, 1658 {0xa0, 0x11, ZC3XX_R002_CLOCKSELECT},
@@ -2310,67 +2021,6 @@ static const struct usb_action hv7131bxx_Initial[] = { /* 320x240 */
2310 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, 2021 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
2311 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, 2022 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
2312 {0xaa, 0x02, 0x0090}, /* 00,02,80,aa */ 2023 {0xaa, 0x02, 0x0090}, /* 00,02,80,aa */
2313 {0xa1, 0x01, 0x0002},
2314 {0xa0, 0x00, ZC3XX_R092_I2CADDRESSSELECT},
2315 {0xa0, 0x02, ZC3XX_R090_I2CCOMMAND},
2316 {0xa1, 0x01, 0x0091},
2317 {0xa1, 0x01, 0x0095},
2318 {0xa1, 0x01, 0x0096},
2319
2320 {0xa1, 0x01, 0x0008},
2321 {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */
2322 {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */
2323 {0xa1, 0x01, 0x01c8},
2324 {0xa1, 0x01, 0x01c9},
2325 {0xa1, 0x01, 0x01ca},
2326 {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */
2327
2328 {0xa0, 0x50, ZC3XX_R10A_RGB00}, /* matrix */
2329 {0xa0, 0xf8, ZC3XX_R10B_RGB01},
2330 {0xa0, 0xf8, ZC3XX_R10C_RGB02},
2331 {0xa0, 0xf8, ZC3XX_R10D_RGB10},
2332 {0xa0, 0x50, ZC3XX_R10E_RGB11},
2333 {0xa0, 0xf8, ZC3XX_R10F_RGB12},
2334 {0xa0, 0xf8, ZC3XX_R110_RGB20},
2335 {0xa0, 0xf8, ZC3XX_R111_RGB21},
2336 {0xa0, 0x50, ZC3XX_R112_RGB22},
2337 {0xa1, 0x01, 0x0180},
2338 {0xa0, 0x10, ZC3XX_R180_AUTOCORRECTENABLE},
2339 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
2340 {0xaa, 0x25, 0x0007},
2341 {0xaa, 0x26, 0x00a1},
2342 {0xaa, 0x27, 0x0020},
2343 {0xaa, 0x20, 0x0000},
2344 {0xaa, 0x21, 0x00a0},
2345 {0xaa, 0x22, 0x0016},
2346 {0xaa, 0x23, 0x0040},
2347
2348 {0xa0, 0x10, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 2F */
2349 {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 4d */
2350 {0xa0, 0x60, ZC3XX_R192_EXPOSURELIMITLOW},
2351 {0xa0, 0x01, ZC3XX_R195_ANTIFLICKERHIGH},
2352 {0xa0, 0x86, ZC3XX_R196_ANTIFLICKERMID},
2353 {0xa0, 0xa0, ZC3XX_R197_ANTIFLICKERLOW},
2354 {0xa0, 0x07, ZC3XX_R18C_AEFREEZE},
2355 {0xa0, 0x0f, ZC3XX_R18F_AEUNFREEZE},
2356 {0xa0, 0x18, ZC3XX_R1A9_DIGITALLIMITDIFF},
2357 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP},
2358 {0xa0, 0x00, ZC3XX_R01D_HSYNC_0},
2359 {0xa0, 0xa0, ZC3XX_R01E_HSYNC_1},
2360 {0xa0, 0x16, ZC3XX_R01F_HSYNC_2},
2361 {0xa0, 0x40, ZC3XX_R020_HSYNC_3},
2362 {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN},
2363 {0xa1, 0x01, 0x001d},
2364 {0xa1, 0x01, 0x001e},
2365 {0xa1, 0x01, 0x001f},
2366 {0xa1, 0x01, 0x0020},
2367 {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE},
2368 {0xa1, 0x01, 0x0180},
2369 {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE},
2370 {0xa0, 0x40, ZC3XX_R116_RGAIN},
2371 {0xa0, 0x40, ZC3XX_R117_GGAIN},
2372 {0xa0, 0x40, ZC3XX_R118_BGAIN},
2373/* {0xa0, 0x02, ZC3XX_R008_CLOCKSETTING}, */
2374 {} 2024 {}
2375}; 2025};
2376 2026
@@ -2418,65 +2068,156 @@ static const struct usb_action hv7131bxx_InitialScale[] = { /* 640x480*/
2418 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, 2068 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
2419 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, 2069 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
2420 {0xaa, 0x02, 0x0090}, /* {0xaa, 0x02, 0x0080}, */ 2070 {0xaa, 0x02, 0x0090}, /* {0xaa, 0x02, 0x0080}, */
2421 {0xa1, 0x01, 0x0002}, 2071 {}
2422 {0xa0, 0x00, ZC3XX_R092_I2CADDRESSSELECT}, 2072};
2423 {0xa0, 0x02, ZC3XX_R090_I2CCOMMAND}, 2073static const struct usb_action hv7131b_50HZ[] = { /* 640x480*/
2424 {0xa1, 0x01, 0x0091}, 2074 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
2425 {0xa1, 0x01, 0x0095}, 2075 {0xaa, 0x25, 0x0007}, /* 00,25,07,aa */
2426 {0xa1, 0x01, 0x0096}, 2076 {0xaa, 0x26, 0x0053}, /* 00,26,53,aa */
2427 {0xa1, 0x01, 0x0008}, 2077 {0xaa, 0x27, 0x0000}, /* 00,27,00,aa */
2428 {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* clock ? */ 2078 {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */
2429 {0xa0, 0x08, ZC3XX_R1C6_SHARPNESS00}, /* sharpness+ */ 2079 {0xaa, 0x21, 0x0050}, /* 00,21,50,aa */
2430 {0xa1, 0x01, 0x01c8}, 2080 {0xaa, 0x22, 0x001b}, /* 00,22,1b,aa */
2431 {0xa1, 0x01, 0x01c9}, 2081 {0xaa, 0x23, 0x00fc}, /* 00,23,fc,aa */
2432 {0xa1, 0x01, 0x01ca}, 2082 {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */
2433 {0xa0, 0x0f, ZC3XX_R1CB_SHARPNESS05}, /* sharpness- */ 2083 {0xa0, 0x9b, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,9b,cc */
2434 2084 {0xa0, 0x80, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,80,cc */
2435 {0xa0, 0x50, ZC3XX_R10A_RGB00}, /* matrix */ 2085 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */
2436 {0xa0, 0xf8, ZC3XX_R10B_RGB01}, 2086 {0xa0, 0xea, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,ea,cc */
2437 {0xa0, 0xf8, ZC3XX_R10C_RGB02}, 2087 {0xa0, 0x60, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,60,cc */
2438 {0xa0, 0xf8, ZC3XX_R10D_RGB10}, 2088 {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0c,cc */
2439 {0xa0, 0x50, ZC3XX_R10E_RGB11}, 2089 {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,18,cc */
2440 {0xa0, 0xf8, ZC3XX_R10F_RGB12}, 2090 {0xa0, 0x18, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,18,cc */
2441 {0xa0, 0xf8, ZC3XX_R110_RGB20}, 2091 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */
2442 {0xa0, 0xf8, ZC3XX_R111_RGB21}, 2092 {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */
2443 {0xa0, 0x50, ZC3XX_R112_RGB22}, 2093 {0xa0, 0x50, ZC3XX_R01E_HSYNC_1}, /* 00,1e,50,cc */
2444 {0xa1, 0x01, 0x0180}, 2094 {0xa0, 0x1b, ZC3XX_R01F_HSYNC_2}, /* 00,1f,1b,cc */
2445 {0xa0, 0x10, ZC3XX_R180_AUTOCORRECTENABLE}, 2095 {0xa0, 0xfc, ZC3XX_R020_HSYNC_3}, /* 00,20,fc,cc */
2446 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, 2096 {}
2447 {0xaa, 0x25, 0x0007}, 2097};
2448 {0xaa, 0x26, 0x00a1}, 2098static const struct usb_action hv7131b_50HZScale[] = { /* 320x240 */
2449 {0xaa, 0x27, 0x0020}, 2099 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
2450 {0xaa, 0x20, 0x0000}, 2100 {0xaa, 0x25, 0x0007}, /* 00,25,07,aa */
2451 {0xaa, 0x21, 0x0040}, 2101 {0xaa, 0x26, 0x0053}, /* 00,26,53,aa */
2452 {0xaa, 0x22, 0x0013}, 2102 {0xaa, 0x27, 0x0000}, /* 00,27,00,aa */
2453 {0xaa, 0x23, 0x004c}, 2103 {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */
2454 {0xa0, 0x10, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 2f */ 2104 {0xaa, 0x21, 0x0050}, /* 00,21,50,aa */
2455 {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, /* 4d */ 2105 {0xaa, 0x22, 0x0012}, /* 00,22,12,aa */
2456 {0xa0, 0x60, ZC3XX_R192_EXPOSURELIMITLOW}, /* 60 */ 2106 {0xaa, 0x23, 0x0080}, /* 00,23,80,aa */
2457 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, 2107 {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */
2458 {0xa0, 0xc3, ZC3XX_R196_ANTIFLICKERMID}, 2108 {0xa0, 0x9b, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,9b,cc */
2459 {0xa0, 0x50, ZC3XX_R197_ANTIFLICKERLOW}, 2109 {0xa0, 0x80, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,80,cc */
2460 {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, 2110 {0xa0, 0x01, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,01,cc */
2461 {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, 2111 {0xa0, 0xd4, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,d4,cc */
2462 {0xa0, 0x18, ZC3XX_R1A9_DIGITALLIMITDIFF}, 2112 {0xa0, 0xc0, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,c0,cc */
2463 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, 2113 {0xa0, 0x07, ZC3XX_R18C_AEFREEZE}, /* 01,8c,07,cc */
2464 {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, 2114 {0xa0, 0x0f, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,0f,cc */
2465 {0xa0, 0x40, ZC3XX_R01E_HSYNC_1}, 2115 {0xa0, 0x18, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,18,cc */
2466 {0xa0, 0x13, ZC3XX_R01F_HSYNC_2}, 2116 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */
2467 {0xa0, 0x4c, ZC3XX_R020_HSYNC_3}, 2117 {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */
2468 {0xa0, 0x60, ZC3XX_R11D_GLOBALGAIN}, 2118 {0xa0, 0x50, ZC3XX_R01E_HSYNC_1}, /* 00,1e,50,cc */
2469 {0xa1, 0x01, 0x001d}, 2119 {0xa0, 0x12, ZC3XX_R01F_HSYNC_2}, /* 00,1f,12,cc */
2470 {0xa1, 0x01, 0x001e}, 2120 {0xa0, 0x80, ZC3XX_R020_HSYNC_3}, /* 00,20,80,cc */
2471 {0xa1, 0x01, 0x001f}, 2121 {}
2472 {0xa1, 0x01, 0x0020}, 2122};
2473 {0xa0, 0x40, ZC3XX_R180_AUTOCORRECTENABLE}, 2123static const struct usb_action hv7131b_60HZ[] = { /* 640x480*/
2474 {0xa1, 0x01, 0x0180}, 2124 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
2475 {0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, 2125 {0xaa, 0x25, 0x0007}, /* 00,25,07,aa */
2476 {0xa0, 0x40, ZC3XX_R116_RGAIN}, 2126 {0xaa, 0x26, 0x00a1}, /* 00,26,a1,aa */
2477 {0xa0, 0x40, ZC3XX_R117_GGAIN}, 2127 {0xaa, 0x27, 0x0020}, /* 00,27,20,aa */
2478 {0xa0, 0x40, ZC3XX_R118_BGAIN}, 2128 {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */
2479/* {0xa0, 0x02, ZC3XX_R008_CLOCKSETTING}, */ 2129 {0xaa, 0x21, 0x0040}, /* 00,21,40,aa */
2130 {0xaa, 0x22, 0x0013}, /* 00,22,13,aa */
2131 {0xaa, 0x23, 0x004c}, /* 00,23,4c,aa */
2132 {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */
2133 {0xa0, 0x4d, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,4d,cc */
2134 {0xa0, 0x60, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,60,cc */
2135 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */
2136 {0xa0, 0xc3, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,c3,cc */
2137 {0xa0, 0x50, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,50,cc */
2138 {0xa0, 0x0c, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0c,cc */
2139 {0xa0, 0x18, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,18,cc */
2140 {0xa0, 0x18, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,18,cc */
2141 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */
2142 {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */
2143 {0xa0, 0x40, ZC3XX_R01E_HSYNC_1}, /* 00,1e,40,cc */
2144 {0xa0, 0x13, ZC3XX_R01F_HSYNC_2}, /* 00,1f,13,cc */
2145 {0xa0, 0x4c, ZC3XX_R020_HSYNC_3}, /* 00,20,4c,cc */
2146 {}
2147};
2148static const struct usb_action hv7131b_60HZScale[] = { /* 320x240 */
2149 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
2150 {0xaa, 0x25, 0x0007}, /* 00,25,07,aa */
2151 {0xaa, 0x26, 0x00a1}, /* 00,26,a1,aa */
2152 {0xaa, 0x27, 0x0020}, /* 00,27,20,aa */
2153 {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */
2154 {0xaa, 0x21, 0x00a0}, /* 00,21,a0,aa */
2155 {0xaa, 0x22, 0x0016}, /* 00,22,16,aa */
2156 {0xaa, 0x23, 0x0040}, /* 00,23,40,aa */
2157 {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */
2158 {0xa0, 0x4d, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,4d,cc */
2159 {0xa0, 0x60, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,60,cc */
2160 {0xa0, 0x01, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,01,cc */
2161 {0xa0, 0x86, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,86,cc */
2162 {0xa0, 0xa0, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,a0,cc */
2163 {0xa0, 0x07, ZC3XX_R18C_AEFREEZE}, /* 01,8c,07,cc */
2164 {0xa0, 0x0f, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,0f,cc */
2165 {0xa0, 0x18, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,18,cc */
2166 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */
2167 {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */
2168 {0xa0, 0xa0, ZC3XX_R01E_HSYNC_1}, /* 00,1e,a0,cc */
2169 {0xa0, 0x16, ZC3XX_R01F_HSYNC_2}, /* 00,1f,16,cc */
2170 {0xa0, 0x40, ZC3XX_R020_HSYNC_3}, /* 00,20,40,cc */
2171 {}
2172};
2173static const struct usb_action hv7131b_NoFliker[] = { /* 640x480*/
2174 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
2175 {0xaa, 0x25, 0x0003}, /* 00,25,03,aa */
2176 {0xaa, 0x26, 0x0000}, /* 00,26,00,aa */
2177 {0xaa, 0x27, 0x0000}, /* 00,27,00,aa */
2178 {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */
2179 {0xaa, 0x21, 0x0010}, /* 00,21,10,aa */
2180 {0xaa, 0x22, 0x0000}, /* 00,22,00,aa */
2181 {0xaa, 0x23, 0x0003}, /* 00,23,03,aa */
2182 {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */
2183 {0xa0, 0xf8, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,f8,cc */
2184 {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,00,cc */
2185 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */
2186 {0xa0, 0x02, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,02,cc */
2187 {0xa0, 0x00, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,00,cc */
2188 {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */
2189 {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */
2190 {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */
2191 {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */
2192 {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */
2193 {0xa0, 0x10, ZC3XX_R01E_HSYNC_1}, /* 00,1e,10,cc */
2194 {0xa0, 0x00, ZC3XX_R01F_HSYNC_2}, /* 00,1f,00,cc */
2195 {0xa0, 0x03, ZC3XX_R020_HSYNC_3}, /* 00,20,03,cc */
2196 {}
2197};
2198static const struct usb_action hv7131b_NoFlikerScale[] = { /* 320x240 */
2199 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
2200 {0xaa, 0x25, 0x0003}, /* 00,25,03,aa */
2201 {0xaa, 0x26, 0x0000}, /* 00,26,00,aa */
2202 {0xaa, 0x27, 0x0000}, /* 00,27,00,aa */
2203 {0xaa, 0x20, 0x0000}, /* 00,20,00,aa */
2204 {0xaa, 0x21, 0x00a0}, /* 00,21,a0,aa */
2205 {0xaa, 0x22, 0x0016}, /* 00,22,16,aa */
2206 {0xaa, 0x23, 0x0040}, /* 00,23,40,aa */
2207 {0xa0, 0x2f, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,2f,cc */
2208 {0xa0, 0xf8, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,f8,cc */
2209 {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,00,cc */
2210 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */
2211 {0xa0, 0x02, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,02,cc */
2212 {0xa0, 0x00, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,00,cc */
2213 {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */
2214 {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */
2215 {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */
2216 {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */
2217 {0xa0, 0x00, ZC3XX_R01D_HSYNC_0}, /* 00,1d,00,cc */
2218 {0xa0, 0xa0, ZC3XX_R01E_HSYNC_1}, /* 00,1e,a0,cc */
2219 {0xa0, 0x16, ZC3XX_R01F_HSYNC_2}, /* 00,1f,16,cc */
2220 {0xa0, 0x40, ZC3XX_R020_HSYNC_3}, /* 00,20,40,cc */
2480 {} 2221 {}
2481}; 2222};
2482 2223
@@ -4389,6 +4130,270 @@ static const struct usb_action pas106b_NoFliker[] = {
4389 {} 4130 {}
4390}; 4131};
4391 4132
4133/* from usbvm31b.inf */
4134static const struct usb_action pas202b_Initial[] = { /* 640x480 */
4135 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */
4136 {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING}, /* 00,08,00,cc */
4137 {0xa0, 0x0e, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0e,cc */
4138 {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, /* 00,02,00,cc */
4139 {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */
4140 {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */
4141 {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */
4142 {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,e0,cc */
4143 {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */
4144 {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */
4145 {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */
4146 {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE}, /* 00,8d,08,cc */
4147 {0xa0, 0x00, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,00,cc */
4148 {0xa0, 0x03, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,03,cc */
4149 {0xa0, 0x00, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,00,cc */
4150 {0xa0, 0x03, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,03,cc */
4151 {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH}, /* 00,9b,01,cc */
4152 {0xa0, 0xe6, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,e6,cc */
4153 {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, /* 00,9d,02,cc */
4154 {0xa0, 0x86, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,86,cc */
4155 {0xaa, 0x02, 0x0002}, /* 00,02,04,aa --> 02 */
4156 {0xaa, 0x07, 0x0006}, /* 00,07,06,aa */
4157 {0xaa, 0x08, 0x0002}, /* 00,08,02,aa */
4158 {0xaa, 0x09, 0x0006}, /* 00,09,06,aa */
4159 {0xaa, 0x0a, 0x0001}, /* 00,0a,01,aa */
4160 {0xaa, 0x0b, 0x0001}, /* 00,0b,01,aa */
4161 {0xaa, 0x0c, 0x0008}, /* 00,0c,08,aa */
4162 {0xaa, 0x0d, 0x0000}, /* 00,0d,00,aa */
4163 {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */
4164 {0xaa, 0x12, 0x0005}, /* 00,12,05,aa */
4165 {0xaa, 0x13, 0x0063}, /* 00,13,63,aa */
4166 {0xaa, 0x15, 0x0070}, /* 00,15,70,aa */
4167 {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,b7,cc */
4168 {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */
4169 {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */
4170 {0xa0, 0x00, 0x01ad}, /* 01,ad,00,cc */
4171 {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */
4172 {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */
4173 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */
4174 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */
4175 {0xa0, 0x70, ZC3XX_R18D_YTARGET}, /* 01,8d,70,cc */
4176 {}
4177};
4178static const struct usb_action pas202b_InitialScale[] = { /* 320x240 */
4179 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */
4180 {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING}, /* 00,08,00,cc */
4181 {0xa0, 0x0e, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0e,cc */
4182 {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */
4183 {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */
4184 {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */
4185 {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */
4186 {0xa0, 0xd0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,d0,cc */
4187 {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */
4188 {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */
4189 {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */
4190 {0xa0, 0x08, ZC3XX_R08D_COMPABILITYMODE}, /* 00,8d,08,cc */
4191 {0xa0, 0x08, ZC3XX_R098_WINYSTARTLOW}, /* 00,98,08,cc */
4192 {0xa0, 0x02, ZC3XX_R09A_WINXSTARTLOW}, /* 00,9a,02,cc */
4193 {0xa0, 0x08, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,08,cc */
4194 {0xa0, 0x02, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,02,cc */
4195 {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH}, /* 00,9b,01,cc */
4196 {0xa0, 0xd8, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,d8,cc */
4197 {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, /* 00,9d,02,cc */
4198 {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,88,cc */
4199 {0xaa, 0x02, 0x0002}, /* 00,02,02,aa */
4200 {0xaa, 0x07, 0x0006}, /* 00,07,06,aa */
4201 {0xaa, 0x08, 0x0002}, /* 00,08,02,aa */
4202 {0xaa, 0x09, 0x0006}, /* 00,09,06,aa */
4203 {0xaa, 0x0a, 0x0001}, /* 00,0a,01,aa */
4204 {0xaa, 0x0b, 0x0001}, /* 00,0b,01,aa */
4205 {0xaa, 0x0c, 0x0008}, /* 00,0c,08,aa */
4206 {0xaa, 0x0d, 0x0000}, /* 00,0d,00,aa */
4207 {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */
4208 {0xaa, 0x12, 0x0005}, /* 00,12,05,aa */
4209 {0xaa, 0x13, 0x0063}, /* 00,13,63,aa */
4210 {0xaa, 0x15, 0x0070}, /* 00,15,70,aa */
4211 {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,37,cc */
4212 {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */
4213 {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */
4214 {0xa0, 0x00, 0x01ad}, /* 01,ad,00,cc */
4215 {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */
4216 {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc */
4217 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */
4218 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */
4219 {0xa0, 0x70, ZC3XX_R18D_YTARGET}, /* 01,8d,70,cc */
4220 {}
4221};
4222static const struct usb_action pas202b_50HZ[] = {
4223 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
4224 {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */
4225 {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */
4226 {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */
4227 {0xaa, 0x21, 0x0068}, /* 00,21,68,aa */
4228 {0xaa, 0x03, 0x0044}, /* 00,03,44,aa */
4229 {0xaa, 0x04, 0x0009}, /* 00,04,09,aa */
4230 {0xaa, 0x05, 0x0028}, /* 00,05,28,aa */
4231 {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */
4232 {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */
4233 {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,14,cc */
4234 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */
4235 {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */
4236 {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,07,cc */
4237 {0xa0, 0xd2, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,d2,cc */
4238 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */
4239 {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */
4240 {0xa0, 0x4d, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,4d,cc */
4241 {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */
4242 {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */
4243 {0xa0, 0x44, ZC3XX_R01D_HSYNC_0}, /* 00,1d,44,cc */
4244 {0xa0, 0x6f, ZC3XX_R01E_HSYNC_1}, /* 00,1e,6f,cc */
4245 {0xa0, 0xad, ZC3XX_R01F_HSYNC_2}, /* 00,1f,ad,cc */
4246 {0xa0, 0xeb, ZC3XX_R020_HSYNC_3}, /* 00,20,eb,cc */
4247 {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */
4248 {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */
4249 {}
4250};
4251static const struct usb_action pas202b_50HZScale[] = {
4252 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
4253 {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */
4254 {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */
4255 {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */
4256 {0xaa, 0x21, 0x006c}, /* 00,21,6c,aa */
4257 {0xaa, 0x03, 0x0041}, /* 00,03,41,aa */
4258 {0xaa, 0x04, 0x0009}, /* 00,04,09,aa */
4259 {0xaa, 0x05, 0x002c}, /* 00,05,2c,aa */
4260 {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */
4261 {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */
4262 {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,14,cc */
4263 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */
4264 {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */
4265 {0xa0, 0x0f, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0f,cc */
4266 {0xa0, 0xbe, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,be,cc */
4267 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */
4268 {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */
4269 {0xa0, 0x9b, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,9b,cc */
4270 {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */
4271 {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */
4272 {0xa0, 0x41, ZC3XX_R01D_HSYNC_0}, /* 00,1d,41,cc */
4273 {0xa0, 0x6f, ZC3XX_R01E_HSYNC_1}, /* 00,1e,6f,cc */
4274 {0xa0, 0xad, ZC3XX_R01F_HSYNC_2}, /* 00,1f,ad,cc */
4275 {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */
4276 {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */
4277 {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */
4278 {}
4279};
4280static const struct usb_action pas202b_60HZ[] = {
4281 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
4282 {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */
4283 {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */
4284 {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */
4285 {0xaa, 0x21, 0x0000}, /* 00,21,00,aa */
4286 {0xaa, 0x03, 0x0045}, /* 00,03,45,aa */
4287 {0xaa, 0x04, 0x0008}, /* 00,04,08,aa */
4288 {0xaa, 0x05, 0x0000}, /* 00,05,00,aa */
4289 {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */
4290 {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */
4291 {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,14,cc */
4292 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */
4293 {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */
4294 {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,07,cc */
4295 {0xa0, 0xc0, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,c0,cc */
4296 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */
4297 {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */
4298 {0xa0, 0x40, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,40,cc */
4299 {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */
4300 {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */
4301 {0xa0, 0x45, ZC3XX_R01D_HSYNC_0}, /* 00,1d,45,cc */
4302 {0xa0, 0x8e, ZC3XX_R01E_HSYNC_1}, /* 00,1e,8e,cc */
4303 {0xa0, 0xc1, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c1,cc */
4304 {0xa0, 0xf5, ZC3XX_R020_HSYNC_3}, /* 00,20,f5,cc */
4305 {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */
4306 {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */
4307 {}
4308};
4309static const struct usb_action pas202b_60HZScale[] = {
4310 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
4311 {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */
4312 {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */
4313 {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */
4314 {0xaa, 0x21, 0x0004}, /* 00,21,04,aa */
4315 {0xaa, 0x03, 0x0042}, /* 00,03,42,aa */
4316 {0xaa, 0x04, 0x0008}, /* 00,04,08,aa */
4317 {0xaa, 0x05, 0x0004}, /* 00,05,04,aa */
4318 {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */
4319 {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */
4320 {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,14,cc */
4321 {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */
4322 {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */
4323 {0xa0, 0x0f, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0f,cc */
4324 {0xa0, 0x9f, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,9f,cc */
4325 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */
4326 {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */
4327 {0xa0, 0x81, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,81,cc */
4328 {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */
4329 {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */
4330 {0xa0, 0x42, ZC3XX_R01D_HSYNC_0}, /* 00,1d,42,cc */
4331 {0xa0, 0x6f, ZC3XX_R01E_HSYNC_1}, /* 00,1e,6f,cc */
4332 {0xa0, 0xaf, ZC3XX_R01F_HSYNC_2}, /* 00,1f,af,cc */
4333 {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */
4334 {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */
4335 {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */
4336 {}
4337};
4338static const struct usb_action pas202b_NoFliker[] = {
4339 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
4340 {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */
4341 {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */
4342 {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */
4343 {0xaa, 0x21, 0x0020}, /* 00,21,20,aa */
4344 {0xaa, 0x03, 0x0040}, /* 00,03,40,aa */
4345 {0xaa, 0x04, 0x0008}, /* 00,04,08,aa */
4346 {0xaa, 0x05, 0x0020}, /* 00,05,20,aa */
4347 {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */
4348 {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */
4349 {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */
4350 {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,07,cc */
4351 {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,f0,cc */
4352 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */
4353 {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */
4354 {0xa0, 0x02, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,02,cc */
4355 {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */
4356 {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */
4357 {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */
4358 {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */
4359 {0xa0, 0x40, ZC3XX_R01D_HSYNC_0}, /* 00,1d,40,cc */
4360 {0xa0, 0x60, ZC3XX_R01E_HSYNC_1}, /* 00,1e,60,cc */
4361 {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, /* 00,1f,90,cc */
4362 {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */
4363 {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */
4364 {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */
4365 {}
4366};
4367static const struct usb_action pas202b_NoFlikerScale[] = {
4368 {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */
4369 {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */
4370 {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */
4371 {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */
4372 {0xaa, 0x21, 0x0010}, /* 00,21,10,aa */
4373 {0xaa, 0x03, 0x0040}, /* 00,03,40,aa */
4374 {0xaa, 0x04, 0x0008}, /* 00,04,08,aa */
4375 {0xaa, 0x05, 0x0010}, /* 00,05,10,aa */
4376 {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */
4377 {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */
4378 {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */
4379 {0xa0, 0x0f, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0f,cc */
4380 {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,f0,cc */
4381 {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */
4382 {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */
4383 {0xa0, 0x02, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,02,cc */
4384 {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */
4385 {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */
4386 {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */
4387 {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */
4388 {0xa0, 0x40, ZC3XX_R01D_HSYNC_0}, /* 00,1d,40,cc */
4389 {0xa0, 0x60, ZC3XX_R01E_HSYNC_1}, /* 00,1e,60,cc */
4390 {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, /* 00,1f,90,cc */
4391 {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */
4392 {0xa0, 0x0f, ZC3XX_R087_EXPTIMEMID}, /* 00,87,0f,cc */
4393 {0xa0, 0x0e, ZC3XX_R088_EXPTIMELOW}, /* 00,88,0e,cc */
4394 {}
4395};
4396
4392static const struct usb_action pb03303x_Initial[] = { 4397static const struct usb_action pb03303x_Initial[] = {
4393 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, 4398 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
4394 {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, 4399 {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
@@ -5725,7 +5730,7 @@ static const struct usb_action tas5130cxx_Initial[] = {
5725 {} 5730 {}
5726}; 5731};
5727static const struct usb_action tas5130cxx_InitialScale[] = { 5732static const struct usb_action tas5130cxx_InitialScale[] = {
5728 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, 5733/*?? {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, */
5729 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, 5734 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
5730 {0xa0, 0x40, ZC3XX_R002_CLOCKSELECT}, 5735 {0xa0, 0x40, ZC3XX_R002_CLOCKSELECT},
5731 5736
@@ -6049,7 +6054,7 @@ static const struct usb_action tas5130c_vf0250_InitialScale[] = {
6049 {0xaa, 0x1b, 0x0000}, /* 00,1b,00,aa, */ 6054 {0xaa, 0x1b, 0x0000}, /* 00,1b,00,aa, */
6050 {0xaa, 0x13, 0x0002}, /* 00,13,02,aa, */ 6055 {0xaa, 0x13, 0x0002}, /* 00,13,02,aa, */
6051 {0xaa, 0x15, 0x0004}, /* 00,15,04,aa */ 6056 {0xaa, 0x15, 0x0004}, /* 00,15,04,aa */
6052 {0xaa, 0x01, 0x0000}, 6057/*?? {0xaa, 0x01, 0x0000}, */
6053 {0xaa, 0x01, 0x0000}, 6058 {0xaa, 0x01, 0x0000},
6054 {0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa, */ 6059 {0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa, */
6055 {0xaa, 0x1c, 0x0017}, /* 00,1c,17,aa, */ 6060 {0xaa, 0x1c, 0x0017}, /* 00,1c,17,aa, */
@@ -6065,8 +6070,8 @@ static const struct usb_action tas5130c_vf0250_InitialScale[] = {
6065 {0xaa, 0x0f, 0x00a0}, /* 00,0f,a0,aa, */ 6070 {0xaa, 0x0f, 0x00a0}, /* 00,0f,a0,aa, */
6066 {0xaa, 0x10, 0x0000}, /* 00,10,00,aa, */ 6071 {0xaa, 0x10, 0x0000}, /* 00,10,00,aa, */
6067 {0xaa, 0x11, 0x00a0}, /* 00,11,a0,aa, */ 6072 {0xaa, 0x11, 0x00a0}, /* 00,11,a0,aa, */
6068 {0xa0, 0x00, 0x0039}, 6073/*?? {0xa0, 0x00, 0x0039},
6069 {0xa1, 0x01, 0x0037}, 6074 {0xa1, 0x01, 0x0037}, */
6070 {0xaa, 0x16, 0x0001}, /* 00,16,01,aa, */ 6075 {0xaa, 0x16, 0x0001}, /* 00,16,01,aa, */
6071 {0xaa, 0x17, 0x00e8}, /* 00,17,e6,aa (e6 -> e8) */ 6076 {0xaa, 0x17, 0x00e8}, /* 00,17,e6,aa (e6 -> e8) */
6072 {0xaa, 0x18, 0x0002}, /* 00,18,02,aa, */ 6077 {0xaa, 0x18, 0x0002}, /* 00,18,02,aa, */
@@ -6303,7 +6308,7 @@ static __u8 i2c_write(struct gspca_dev *gspca_dev,
6303 reg_w_i(gspca_dev->dev, valL, 0x93); 6308 reg_w_i(gspca_dev->dev, valL, 0x93);
6304 reg_w_i(gspca_dev->dev, valH, 0x94); 6309 reg_w_i(gspca_dev->dev, valH, 0x94);
6305 reg_w_i(gspca_dev->dev, 0x01, 0x90); /* <- write command */ 6310 reg_w_i(gspca_dev->dev, 0x01, 0x90); /* <- write command */
6306 msleep(5); 6311 msleep(15);
6307 retbyte = reg_r_i(gspca_dev, 0x0091); /* read status */ 6312 retbyte = reg_r_i(gspca_dev, 0x0091); /* read status */
6308 PDEBUG(D_USBO, "i2c w [%02x] = %02x%02x (%02x)", 6313 PDEBUG(D_USBO, "i2c w [%02x] = %02x%02x (%02x)",
6309 reg, valH, valL, retbyte); 6314 reg, valH, valL, retbyte);
@@ -6346,30 +6351,35 @@ static void setmatrix(struct gspca_dev *gspca_dev)
6346 {0x50, 0xf8, 0xf8, 0xf8, 0x50, 0xf8, 0xf8, 0xf8, 0x50}; 6351 {0x50, 0xf8, 0xf8, 0xf8, 0x50, 0xf8, 0xf8, 0xf8, 0x50};
6347 static const __u8 ov7620_matrix[9] = 6352 static const __u8 ov7620_matrix[9] =
6348 {0x58, 0xf4, 0xf4, 0xf4, 0x58, 0xf4, 0xf4, 0xf4, 0x58}; 6353 {0x58, 0xf4, 0xf4, 0xf4, 0x58, 0xf4, 0xf4, 0xf4, 0x58};
6354 static const __u8 pas202b_matrix[9] =
6355 {0x4c, 0xf5, 0xff, 0xf9, 0x51, 0xf5, 0xfb, 0xed, 0x5f};
6349 static const __u8 po2030_matrix[9] = 6356 static const __u8 po2030_matrix[9] =
6350 {0x60, 0xf0, 0xf0, 0xf0, 0x60, 0xf0, 0xf0, 0xf0, 0x60}; 6357 {0x60, 0xf0, 0xf0, 0xf0, 0x60, 0xf0, 0xf0, 0xf0, 0x60};
6351 static const __u8 vf0250_matrix[9] = 6358 static const __u8 vf0250_matrix[9] =
6352 {0x7b, 0xea, 0xea, 0xea, 0x7b, 0xea, 0xea, 0xea, 0x7b}; 6359 {0x7b, 0xea, 0xea, 0xea, 0x7b, 0xea, 0xea, 0xea, 0x7b};
6360 static const __u8 *matrix_tb[SENSOR_MAX] = {
6361 NULL, /* SENSOR_CS2102 0 */
6362 NULL, /* SENSOR_CS2102K 1 */
6363 gc0305_matrix, /* SENSOR_GC0305 2 */
6364 NULL, /* SENSOR_HDCS2020b 3 */
6365 NULL, /* SENSOR_HV7131B 4 */
6366 NULL, /* SENSOR_HV7131C 5 */
6367 NULL, /* SENSOR_ICM105A 6 */
6368 NULL, /* SENSOR_MC501CB 7 */
6369 ov7620_matrix, /* SENSOR_OV7620 8 */
6370 NULL, /* SENSOR_OV7630C 9 */
6371 NULL, /* SENSOR_PAS106 10 */
6372 pas202b_matrix, /* SENSOR_PAS202B 11 */
6373 NULL, /* SENSOR_PB0330 12 */
6374 po2030_matrix, /* SENSOR_PO2030 13 */
6375 NULL, /* SENSOR_TAS5130CK 14 */
6376 NULL, /* SENSOR_TAS5130CXX 15 */
6377 vf0250_matrix, /* SENSOR_TAS5130C_VF0250 16 */
6378 };
6353 6379
6354 switch (sd->sensor) { 6380 matrix = matrix_tb[sd->sensor];
6355 case SENSOR_GC0305: 6381 if (matrix == NULL)
6356 matrix = gc0305_matrix; 6382 return; /* matrix already loaded */
6357 break;
6358 case SENSOR_MC501CB:
6359 return; /* no matrix? */
6360 case SENSOR_OV7620:
6361/* case SENSOR_OV7648: */
6362 matrix = ov7620_matrix;
6363 break;
6364 case SENSOR_PO2030:
6365 matrix = po2030_matrix;
6366 break;
6367 case SENSOR_TAS5130C_VF0250:
6368 matrix = vf0250_matrix;
6369 break;
6370 default: /* matrix already loaded */
6371 return;
6372 }
6373 for (i = 0; i < ARRAY_SIZE(ov7620_matrix); i++) 6383 for (i = 0; i < ARRAY_SIZE(ov7620_matrix); i++)
6374 reg_w(gspca_dev->dev, matrix[i], 0x010a + i); 6384 reg_w(gspca_dev->dev, matrix[i], 0x010a + i);
6375} 6385}
@@ -6585,42 +6595,42 @@ static int setlightfreq(struct gspca_dev *gspca_dev)
6585 {gc0305_NoFliker, gc0305_NoFliker, 6595 {gc0305_NoFliker, gc0305_NoFliker,
6586 gc0305_50HZ, gc0305_50HZ, 6596 gc0305_50HZ, gc0305_50HZ,
6587 gc0305_60HZ, gc0305_60HZ}, 6597 gc0305_60HZ, gc0305_60HZ},
6588/* SENSOR_HDCS2020 3 */ 6598/* SENSOR_HDCS2020b 3 */
6589 {NULL, NULL,
6590 NULL, NULL,
6591 NULL, NULL},
6592/* SENSOR_HDCS2020b 4 */
6593 {hdcs2020b_NoFliker, hdcs2020b_NoFliker, 6599 {hdcs2020b_NoFliker, hdcs2020b_NoFliker,
6594 hdcs2020b_50HZ, hdcs2020b_50HZ, 6600 hdcs2020b_50HZ, hdcs2020b_50HZ,
6595 hdcs2020b_60HZ, hdcs2020b_60HZ}, 6601 hdcs2020b_60HZ, hdcs2020b_60HZ},
6596/* SENSOR_HV7131B 5 */ 6602/* SENSOR_HV7131B 4 */
6603 {hv7131b_NoFlikerScale, hv7131b_NoFliker,
6604 hv7131b_50HZScale, hv7131b_50HZ,
6605 hv7131b_60HZScale, hv7131b_60HZ},
6606/* SENSOR_HV7131C 5 */
6597 {NULL, NULL, 6607 {NULL, NULL,
6598 NULL, NULL, 6608 NULL, NULL,
6599 NULL, NULL}, 6609 NULL, NULL},
6600/* SENSOR_HV7131C 6 */ 6610/* SENSOR_ICM105A 6 */
6601 {NULL, NULL,
6602 NULL, NULL,
6603 NULL, NULL},
6604/* SENSOR_ICM105A 7 */
6605 {icm105a_NoFliker, icm105a_NoFlikerScale, 6611 {icm105a_NoFliker, icm105a_NoFlikerScale,
6606 icm105a_50HZ, icm105a_50HZScale, 6612 icm105a_50HZ, icm105a_50HZScale,
6607 icm105a_60HZ, icm105a_60HZScale}, 6613 icm105a_60HZ, icm105a_60HZScale},
6608/* SENSOR_MC501CB 8 */ 6614/* SENSOR_MC501CB 7 */
6609 {MC501CB_NoFliker, MC501CB_NoFlikerScale, 6615 {MC501CB_NoFliker, MC501CB_NoFlikerScale,
6610 MC501CB_50HZ, MC501CB_50HZScale, 6616 MC501CB_50HZ, MC501CB_50HZScale,
6611 MC501CB_60HZ, MC501CB_60HZScale}, 6617 MC501CB_60HZ, MC501CB_60HZScale},
6612/* SENSOR_OV7620 9 */ 6618/* SENSOR_OV7620 8 */
6613 {OV7620_NoFliker, OV7620_NoFliker, 6619 {OV7620_NoFliker, OV7620_NoFliker,
6614 OV7620_50HZ, OV7620_50HZ, 6620 OV7620_50HZ, OV7620_50HZ,
6615 OV7620_60HZ, OV7620_60HZ}, 6621 OV7620_60HZ, OV7620_60HZ},
6616/* SENSOR_OV7630C 10 */ 6622/* SENSOR_OV7630C 9 */
6617 {NULL, NULL, 6623 {NULL, NULL,
6618 NULL, NULL, 6624 NULL, NULL,
6619 NULL, NULL}, 6625 NULL, NULL},
6620/* SENSOR_PAS106 11 */ 6626/* SENSOR_PAS106 10 */
6621 {pas106b_NoFliker, pas106b_NoFliker, 6627 {pas106b_NoFliker, pas106b_NoFliker,
6622 pas106b_50HZ, pas106b_50HZ, 6628 pas106b_50HZ, pas106b_50HZ,
6623 pas106b_60HZ, pas106b_60HZ}, 6629 pas106b_60HZ, pas106b_60HZ},
6630/* SENSOR_PAS202B 11 */
6631 {pas202b_NoFlikerScale, pas202b_NoFliker,
6632 pas202b_50HZScale, pas202b_50HZ,
6633 pas202b_60HZScale, pas202b_60HZ},
6624/* SENSOR_PB0330 12 */ 6634/* SENSOR_PB0330 12 */
6625 {pb0330_NoFliker, pb0330_NoFlikerScale, 6635 {pb0330_NoFliker, pb0330_NoFlikerScale,
6626 pb0330_50HZ, pb0330_50HZScale, 6636 pb0330_50HZ, pb0330_50HZScale,
@@ -7002,15 +7012,15 @@ static int sd_config(struct gspca_dev *gspca_dev,
7002 5, /* SENSOR_CS2102 0 */ 7012 5, /* SENSOR_CS2102 0 */
7003 5, /* SENSOR_CS2102K 1 */ 7013 5, /* SENSOR_CS2102K 1 */
7004 4, /* SENSOR_GC0305 2 */ 7014 4, /* SENSOR_GC0305 2 */
7005 4, /* SENSOR_HDCS2020 3 */ 7015 4, /* SENSOR_HDCS2020b 3 */
7006 4, /* SENSOR_HDCS2020b 4 */ 7016 4, /* SENSOR_HV7131B 4 */
7007 4, /* SENSOR_HV7131B 5 */ 7017 4, /* SENSOR_HV7131C 5 */
7008 4, /* SENSOR_HV7131C 6 */ 7018 4, /* SENSOR_ICM105A 6 */
7009 4, /* SENSOR_ICM105A 7 */ 7019 4, /* SENSOR_MC501CB 7 */
7010 4, /* SENSOR_MC501CB 8 */ 7020 3, /* SENSOR_OV7620 8 */
7011 3, /* SENSOR_OV7620 9 */ 7021 4, /* SENSOR_OV7630C 9 */
7012 4, /* SENSOR_OV7630C 10 */ 7022 4, /* SENSOR_PAS106 10 */
7013 4, /* SENSOR_PAS106 11 */ 7023 4, /* SENSOR_PAS202B 11 */
7014 4, /* SENSOR_PB0330 12 */ 7024 4, /* SENSOR_PB0330 12 */
7015 4, /* SENSOR_PO2030 13 */ 7025 4, /* SENSOR_PO2030 13 */
7016 4, /* SENSOR_TAS5130CK 14 */ 7026 4, /* SENSOR_TAS5130CK 14 */
@@ -7066,8 +7076,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
7066 sd->sensor = SENSOR_ICM105A; 7076 sd->sensor = SENSOR_ICM105A;
7067 break; 7077 break;
7068 case 0x0e: 7078 case 0x0e:
7069 PDEBUG(D_PROBE, "Find Sensor HDCS2020"); 7079 PDEBUG(D_PROBE, "Find Sensor PAS202B");
7070 sd->sensor = SENSOR_HDCS2020; 7080 sd->sensor = SENSOR_PAS202B;
7071 sd->sharpness = 1; 7081 sd->sharpness = 1;
7072 break; 7082 break;
7073 case 0x0f: 7083 case 0x0f:
@@ -7153,7 +7163,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
7153 sd->gamma = gamma[(int) sd->sensor]; 7163 sd->gamma = gamma[(int) sd->sensor];
7154 sd->autogain = sd_ctrls[SD_AUTOGAIN].qctrl.default_value; 7164 sd->autogain = sd_ctrls[SD_AUTOGAIN].qctrl.default_value;
7155 sd->lightfreq = sd_ctrls[SD_FREQ].qctrl.default_value; 7165 sd->lightfreq = sd_ctrls[SD_FREQ].qctrl.default_value;
7156 sd->sharpness = sd_ctrls[SD_SHARPNESS].qctrl.default_value;
7157 7166
7158 switch (sd->sensor) { 7167 switch (sd->sensor) {
7159 case SENSOR_GC0305: 7168 case SENSOR_GC0305:
@@ -7161,7 +7170,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
7161 case SENSOR_PO2030: 7170 case SENSOR_PO2030:
7162 gspca_dev->ctrl_dis = (1 << BRIGHTNESS_IDX); 7171 gspca_dev->ctrl_dis = (1 << BRIGHTNESS_IDX);
7163 break; 7172 break;
7164 case SENSOR_HDCS2020:
7165 case SENSOR_HV7131B: 7173 case SENSOR_HV7131B:
7166 case SENSOR_HV7131C: 7174 case SENSOR_HV7131C:
7167 case SENSOR_OV7630C: 7175 case SENSOR_OV7630C:
@@ -7191,15 +7199,15 @@ static int sd_start(struct gspca_dev *gspca_dev)
7191 {cs2102_InitialScale, cs2102_Initial}, /* 0 */ 7199 {cs2102_InitialScale, cs2102_Initial}, /* 0 */
7192 {cs2102K_InitialScale, cs2102K_Initial}, /* 1 */ 7200 {cs2102K_InitialScale, cs2102K_Initial}, /* 1 */
7193 {gc0305_Initial, gc0305_InitialScale}, /* 2 */ 7201 {gc0305_Initial, gc0305_InitialScale}, /* 2 */
7194 {hdcs2020xx_InitialScale, hdcs2020xx_Initial}, /* 3 */ 7202 {hdcs2020xb_InitialScale, hdcs2020xb_Initial}, /* 3 */
7195 {hdcs2020xb_InitialScale, hdcs2020xb_Initial}, /* 4 */ 7203 {hv7131bxx_InitialScale, hv7131bxx_Initial}, /* 4 */
7196 {hv7131bxx_InitialScale, hv7131bxx_Initial}, /* 5 */ 7204 {hv7131cxx_InitialScale, hv7131cxx_Initial}, /* 5 */
7197 {hv7131cxx_InitialScale, hv7131cxx_Initial}, /* 6 */ 7205 {icm105axx_InitialScale, icm105axx_Initial}, /* 6 */
7198 {icm105axx_InitialScale, icm105axx_Initial}, /* 7 */ 7206 {MC501CB_InitialScale, MC501CB_Initial}, /* 7 */
7199 {MC501CB_InitialScale, MC501CB_Initial}, /* 9 */ 7207 {OV7620_mode0, OV7620_mode1}, /* 8 */
7200 {OV7620_mode0, OV7620_mode1}, /* 9 */ 7208 {ov7630c_InitialScale, ov7630c_Initial}, /* 9 */
7201 {ov7630c_InitialScale, ov7630c_Initial}, /* 10 */ 7209 {pas106b_InitialScale, pas106b_Initial}, /* 10 */
7202 {pas106b_InitialScale, pas106b_Initial}, /* 11 */ 7210 {pas202b_Initial, pas202b_InitialScale}, /* 11 */
7203 {pb0330xx_InitialScale, pb0330xx_Initial}, /* 12 */ 7211 {pb0330xx_InitialScale, pb0330xx_Initial}, /* 12 */
7204/* or {pb03303x_InitialScale, pb03303x_Initial}, */ 7212/* or {pb03303x_InitialScale, pb03303x_Initial}, */
7205 {PO2030_mode0, PO2030_mode1}, /* 13 */ 7213 {PO2030_mode0, PO2030_mode1}, /* 13 */
@@ -7256,6 +7264,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
7256 reg_r(gspca_dev, 0x0008); 7264 reg_r(gspca_dev, 0x0008);
7257 reg_w(dev, 0x00, 0x0008); 7265 reg_w(dev, 0x00, 0x0008);
7258 break; 7266 break;
7267 case SENSOR_PAS202B:
7259 case SENSOR_GC0305: 7268 case SENSOR_GC0305:
7260 reg_r(gspca_dev, 0x0008); 7269 reg_r(gspca_dev, 0x0008);
7261 /* fall thru */ 7270 /* fall thru */
@@ -7269,7 +7278,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
7269 switch (sd->sensor) { 7278 switch (sd->sensor) {
7270 case SENSOR_CS2102: /* gamma set in xxx_Initial */ 7279 case SENSOR_CS2102: /* gamma set in xxx_Initial */
7271 case SENSOR_CS2102K: 7280 case SENSOR_CS2102K:
7272 case SENSOR_HDCS2020:
7273 case SENSOR_HDCS2020b: 7281 case SENSOR_HDCS2020b:
7274 case SENSOR_PB0330: /* pb with chip_revision - see above */ 7282 case SENSOR_PB0330: /* pb with chip_revision - see above */
7275 case SENSOR_OV7630C: 7283 case SENSOR_OV7630C:
@@ -7282,6 +7290,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
7282 setmatrix(gspca_dev); /* one more time? */ 7290 setmatrix(gspca_dev); /* one more time? */
7283 switch (sd->sensor) { 7291 switch (sd->sensor) {
7284 case SENSOR_OV7620: 7292 case SENSOR_OV7620:
7293 case SENSOR_PAS202B:
7285 reg_r(gspca_dev, 0x0180); /* from win */ 7294 reg_r(gspca_dev, 0x0180); /* from win */
7286 reg_w(dev, 0x00, 0x0180); 7295 reg_w(dev, 0x00, 0x0180);
7287 break; 7296 break;
@@ -7293,37 +7302,29 @@ static int sd_start(struct gspca_dev *gspca_dev)
7293 7302
7294 switch (sd->sensor) { 7303 switch (sd->sensor) {
7295 case SENSOR_GC0305: 7304 case SENSOR_GC0305:
7296 case SENSOR_OV7620:
7297 reg_w(dev, 0x09, 0x01ad); /* (from win traces) */ 7305 reg_w(dev, 0x09, 0x01ad); /* (from win traces) */
7298 reg_w(dev, 0x15, 0x01ae); 7306 reg_w(dev, 0x15, 0x01ae);
7299 sd->autogain = 0; 7307 /* fall thru */
7300 break; 7308 case SENSOR_PAS202B:
7301 case SENSOR_PO2030: 7309 case SENSOR_PO2030:
7302 reg_w(dev, 0x40, 0x0117); /* (from win traces) */ 7310/* reg_w(dev, 0x40, ZC3XX_R117_GGAIN); * (from win traces) */
7303 reg_r(gspca_dev, 0x0180); 7311 reg_r(gspca_dev, 0x0180);
7304 break; 7312 break;
7305 }
7306
7307 setautogain(gspca_dev);
7308 switch (sd->sensor) {
7309 case SENSOR_GC0305:
7310/* setlightfreq(gspca_dev); ?? (end: 80 -> [18d]) */
7311 reg_w(dev, 0x09, 0x01ad); /* (from win traces) */
7312 reg_w(dev, 0x15, 0x01ae);
7313 reg_w(dev, 0x40, 0x0180);
7314 reg_w(dev, 0x40, 0x0117);
7315 reg_r(gspca_dev, 0x0180);
7316 sd->autogain = 1;
7317 setautogain(gspca_dev);
7318 break;
7319 case SENSOR_OV7620: 7313 case SENSOR_OV7620:
7314 reg_w(dev, 0x09, 0x01ad);
7315 reg_w(dev, 0x15, 0x01ae);
7320 i2c_read(gspca_dev, 0x13); /*fixme: returns 0xa3 */ 7316 i2c_read(gspca_dev, 0x13); /*fixme: returns 0xa3 */
7321 i2c_write(gspca_dev, 0x13, 0xa3, 0x00); 7317 i2c_write(gspca_dev, 0x13, 0xa3, 0x00);
7322 /*fixme: returned value to send? */ 7318 /*fixme: returned value to send? */
7323 reg_w(dev, 0x40, 0x0117); /* (from win traces) */ 7319 reg_w(dev, 0x40, 0x0117);
7324 reg_r(gspca_dev, 0x0180); 7320 reg_r(gspca_dev, 0x0180);
7325 setautogain(gspca_dev); 7321 break;
7326 msleep(500); 7322 }
7323
7324 setautogain(gspca_dev);
7325 switch (sd->sensor) {
7326 case SENSOR_PAS202B:
7327 reg_w(dev, 0x00, 0x0007); /* (from win traces) */
7327 break; 7328 break;
7328 case SENSOR_PO2030: 7329 case SENSOR_PO2030:
7329 msleep(500); 7330 msleep(500);
@@ -7333,6 +7334,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
7333 reg_w(dev, 0x02, 0x0008); 7334 reg_w(dev, 0x02, 0x0008);
7334 break; 7335 break;
7335 } 7336 }
7337 if (sd->sensor == SENSOR_PAS202B)
7338 reg_w(dev, 0x02, ZC3XX_R008_CLOCKSETTING);
7336 return 0; 7339 return 0;
7337} 7340}
7338 7341
@@ -7530,6 +7533,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
7530 {USB_DEVICE(0x0458, 0x700c)}, 7533 {USB_DEVICE(0x0458, 0x700c)},
7531 {USB_DEVICE(0x0458, 0x700f)}, 7534 {USB_DEVICE(0x0458, 0x700f)},
7532 {USB_DEVICE(0x0461, 0x0a00)}, 7535 {USB_DEVICE(0x0461, 0x0a00)},
7536 {USB_DEVICE(0x046d, 0x089d), .driver_info = SENSOR_MC501CB},
7533 {USB_DEVICE(0x046d, 0x08a0)}, 7537 {USB_DEVICE(0x046d, 0x08a0)},
7534 {USB_DEVICE(0x046d, 0x08a1)}, 7538 {USB_DEVICE(0x046d, 0x08a1)},
7535 {USB_DEVICE(0x046d, 0x08a2)}, 7539 {USB_DEVICE(0x046d, 0x08a2)},
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index efe849981ab7..d4658c56eddc 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -385,10 +385,10 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
385 goto err_out_detach; 385 goto err_out_detach;
386 } 386 }
387 387
388 /* Phys addr can only be set after attaching (for ir->c.dev.bus_id) */ 388 /* Phys addr can only be set after attaching (for ir->c.dev) */
389 snprintf(ir->phys, sizeof(ir->phys), "%s/%s/ir0", 389 snprintf(ir->phys, sizeof(ir->phys), "%s/%s/ir0",
390 ir->c.adapter->dev.bus_id, 390 dev_name(&ir->c.adapter->dev),
391 ir->c.dev.bus_id); 391 dev_name(&ir->c.dev));
392 392
393 /* init + register input device */ 393 /* init + register input device */
394 ir_input_init(input_dev, &ir->ir, ir_type, ir->ir_codes); 394 ir_input_init(input_dev, &ir->ir, ir_type, ir->ir_codes);
diff --git a/drivers/media/video/ivtv/ivtv-cards.c b/drivers/media/video/ivtv/ivtv-cards.c
index 4e05f91a9100..2883c8780760 100644
--- a/drivers/media/video/ivtv/ivtv-cards.c
+++ b/drivers/media/video/ivtv/ivtv-cards.c
@@ -877,20 +877,28 @@ static const struct ivtv_card_pci_info ivtv_pci_pg600v2[] = {
877static const struct ivtv_card ivtv_card_pg600v2 = { 877static const struct ivtv_card ivtv_card_pg600v2 = {
878 .type = IVTV_CARD_PG600V2, 878 .type = IVTV_CARD_PG600V2,
879 .name = "Yuan PG600-2, GotView PCI DVD Lite", 879 .name = "Yuan PG600-2, GotView PCI DVD Lite",
880 .comment = "only Composite and S-Video inputs are supported, not the tuner\n",
881 .v4l2_capabilities = IVTV_CAP_ENCODER, 880 .v4l2_capabilities = IVTV_CAP_ENCODER,
882 .hw_video = IVTV_HW_CX25840, 881 .hw_video = IVTV_HW_CX25840,
883 .hw_audio = IVTV_HW_CX25840, 882 .hw_audio = IVTV_HW_CX25840,
884 .hw_audio_ctrl = IVTV_HW_CX25840, 883 .hw_audio_ctrl = IVTV_HW_CX25840,
885 .hw_all = IVTV_HW_CX25840, 884 .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER,
885 /* XC2028 support apparently works for the Yuan, it's still
886 uncertain whether it also works with the GotView. */
886 .video_inputs = { 887 .video_inputs = {
887 { IVTV_CARD_INPUT_SVIDEO1, 0, 888 { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
889 { IVTV_CARD_INPUT_SVIDEO1, 1,
888 CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 }, 890 CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 },
889 { IVTV_CARD_INPUT_COMPOSITE1, 0, CX25840_COMPOSITE1 }, 891 { IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
890 }, 892 },
891 .audio_inputs = { 893 .audio_inputs = {
894 { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
892 { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL }, 895 { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL },
893 }, 896 },
897 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
898 .xceive_pin = 12,
899 .tuners = {
900 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
901 },
894 .pci_list = ivtv_pci_pg600v2, 902 .pci_list = ivtv_pci_pg600v2,
895 .i2c = &ivtv_i2c_std, 903 .i2c = &ivtv_i2c_std,
896}; 904};
diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c
index 48e103be7183..62aa06f5d168 100644
--- a/drivers/media/video/ivtv/ivtv-controls.c
+++ b/drivers/media/video/ivtv/ivtv-controls.c
@@ -63,7 +63,7 @@ int ivtv_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qctrl)
63 case V4L2_CID_HUE: 63 case V4L2_CID_HUE:
64 case V4L2_CID_SATURATION: 64 case V4L2_CID_SATURATION:
65 case V4L2_CID_CONTRAST: 65 case V4L2_CID_CONTRAST:
66 if (itv->video_dec_func(itv, VIDIOC_QUERYCTRL, qctrl)) 66 if (v4l2_subdev_call(itv->sd_video, core, queryctrl, qctrl))
67 qctrl->flags |= V4L2_CTRL_FLAG_DISABLED; 67 qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
68 return 0; 68 return 0;
69 69
@@ -73,7 +73,7 @@ int ivtv_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qctrl)
73 case V4L2_CID_AUDIO_BASS: 73 case V4L2_CID_AUDIO_BASS:
74 case V4L2_CID_AUDIO_TREBLE: 74 case V4L2_CID_AUDIO_TREBLE:
75 case V4L2_CID_AUDIO_LOUDNESS: 75 case V4L2_CID_AUDIO_LOUDNESS:
76 if (ivtv_i2c_hw(itv, itv->card->hw_audio_ctrl, VIDIOC_QUERYCTRL, qctrl)) 76 if (v4l2_subdev_call(itv->sd_audio, core, queryctrl, qctrl))
77 qctrl->flags |= V4L2_CTRL_FLAG_DISABLED; 77 qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
78 return 0; 78 return 0;
79 79
@@ -122,7 +122,7 @@ static int ivtv_s_ctrl(struct ivtv *itv, struct v4l2_control *vctrl)
122 case V4L2_CID_HUE: 122 case V4L2_CID_HUE:
123 case V4L2_CID_SATURATION: 123 case V4L2_CID_SATURATION:
124 case V4L2_CID_CONTRAST: 124 case V4L2_CID_CONTRAST:
125 return itv->video_dec_func(itv, VIDIOC_S_CTRL, vctrl); 125 return v4l2_subdev_call(itv->sd_video, core, s_ctrl, vctrl);
126 126
127 case V4L2_CID_AUDIO_VOLUME: 127 case V4L2_CID_AUDIO_VOLUME:
128 case V4L2_CID_AUDIO_MUTE: 128 case V4L2_CID_AUDIO_MUTE:
@@ -130,7 +130,7 @@ static int ivtv_s_ctrl(struct ivtv *itv, struct v4l2_control *vctrl)
130 case V4L2_CID_AUDIO_BASS: 130 case V4L2_CID_AUDIO_BASS:
131 case V4L2_CID_AUDIO_TREBLE: 131 case V4L2_CID_AUDIO_TREBLE:
132 case V4L2_CID_AUDIO_LOUDNESS: 132 case V4L2_CID_AUDIO_LOUDNESS:
133 return ivtv_i2c_hw(itv, itv->card->hw_audio_ctrl, VIDIOC_S_CTRL, vctrl); 133 return v4l2_subdev_call(itv->sd_audio, core, s_ctrl, vctrl);
134 134
135 default: 135 default:
136 IVTV_DEBUG_IOCTL("invalid control 0x%x\n", vctrl->id); 136 IVTV_DEBUG_IOCTL("invalid control 0x%x\n", vctrl->id);
@@ -147,7 +147,7 @@ static int ivtv_g_ctrl(struct ivtv *itv, struct v4l2_control *vctrl)
147 case V4L2_CID_HUE: 147 case V4L2_CID_HUE:
148 case V4L2_CID_SATURATION: 148 case V4L2_CID_SATURATION:
149 case V4L2_CID_CONTRAST: 149 case V4L2_CID_CONTRAST:
150 return itv->video_dec_func(itv, VIDIOC_G_CTRL, vctrl); 150 return v4l2_subdev_call(itv->sd_video, core, g_ctrl, vctrl);
151 151
152 case V4L2_CID_AUDIO_VOLUME: 152 case V4L2_CID_AUDIO_VOLUME:
153 case V4L2_CID_AUDIO_MUTE: 153 case V4L2_CID_AUDIO_MUTE:
@@ -155,7 +155,7 @@ static int ivtv_g_ctrl(struct ivtv *itv, struct v4l2_control *vctrl)
155 case V4L2_CID_AUDIO_BASS: 155 case V4L2_CID_AUDIO_BASS:
156 case V4L2_CID_AUDIO_TREBLE: 156 case V4L2_CID_AUDIO_TREBLE:
157 case V4L2_CID_AUDIO_LOUDNESS: 157 case V4L2_CID_AUDIO_LOUDNESS:
158 return ivtv_i2c_hw(itv, itv->card->hw_audio_ctrl, VIDIOC_G_CTRL, vctrl); 158 return v4l2_subdev_call(itv->sd_audio, core, g_ctrl, vctrl);
159 default: 159 default:
160 IVTV_DEBUG_IOCTL("invalid control 0x%x\n", vctrl->id); 160 IVTV_DEBUG_IOCTL("invalid control 0x%x\n", vctrl->id);
161 return -EINVAL; 161 return -EINVAL;
@@ -268,7 +268,7 @@ int ivtv_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
268 fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 268 fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
269 fmt.fmt.pix.width = itv->params.width / (is_mpeg1 ? 2 : 1); 269 fmt.fmt.pix.width = itv->params.width / (is_mpeg1 ? 2 : 1);
270 fmt.fmt.pix.height = itv->params.height; 270 fmt.fmt.pix.height = itv->params.height;
271 itv->video_dec_func(itv, VIDIOC_S_FMT, &fmt); 271 v4l2_subdev_call(itv->sd_video, video, s_fmt, &fmt);
272 } 272 }
273 err = cx2341x_update(itv, ivtv_api_func, &itv->params, &p); 273 err = cx2341x_update(itv, ivtv_api_func, &itv->params, &p);
274 if (!err && itv->params.stream_vbi_fmt != p.stream_vbi_fmt) 274 if (!err && itv->params.stream_vbi_fmt != p.stream_vbi_fmt)
@@ -279,7 +279,7 @@ int ivtv_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
279 /* The audio clock of the digitizer must match the codec sample 279 /* The audio clock of the digitizer must match the codec sample
280 rate otherwise you get some very strange effects. */ 280 rate otherwise you get some very strange effects. */
281 if (idx < sizeof(freqs)) 281 if (idx < sizeof(freqs))
282 ivtv_call_i2c_clients(itv, VIDIOC_INT_AUDIO_CLOCK_FREQ, &freqs[idx]); 282 ivtv_call_all(itv, audio, s_clock_freq, freqs[idx]);
283 return err; 283 return err;
284 } 284 }
285 return -EINVAL; 285 return -EINVAL;
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index b69cc1d55e5b..08b762951759 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -60,9 +60,6 @@
60#include <media/v4l2-chip-ident.h> 60#include <media/v4l2-chip-ident.h>
61#include "tuner-xc2028.h" 61#include "tuner-xc2028.h"
62 62
63/* var to keep track of the number of array elements in use */
64int ivtv_cards_active;
65
66/* If you have already X v4l cards, then set this to X. This way 63/* If you have already X v4l cards, then set this to X. This way
67 the device numbers stay matched. Example: you have a WinTV card 64 the device numbers stay matched. Example: you have a WinTV card
68 without radio and a PVR-350 with. Normally this would give a 65 without radio and a PVR-350 with. Normally this would give a
@@ -70,12 +67,6 @@ int ivtv_cards_active;
70 setting this to 1 you ensure that radio0 is now also radio1. */ 67 setting this to 1 you ensure that radio0 is now also radio1. */
71int ivtv_first_minor; 68int ivtv_first_minor;
72 69
73/* Master variable for all ivtv info */
74struct ivtv *ivtv_cards[IVTV_MAX_CARDS];
75
76/* Protects ivtv_cards_active */
77DEFINE_SPINLOCK(ivtv_cards_lock);
78
79/* add your revision and whatnot here */ 70/* add your revision and whatnot here */
80static struct pci_device_id ivtv_pci_tbl[] __devinitdata = { 71static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
81 {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15, 72 {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15,
@@ -87,6 +78,9 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
87 78
88MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl); 79MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
89 80
81/* ivtv instance counter */
82static atomic_t ivtv_instance = ATOMIC_INIT(0);
83
90/* Parameter declarations */ 84/* Parameter declarations */
91static int cardtype[IVTV_MAX_CARDS]; 85static int cardtype[IVTV_MAX_CARDS];
92static int tuner[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1, 86static int tuner[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
@@ -599,9 +593,9 @@ static void ivtv_process_options(struct ivtv *itv)
599 itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_MPG] = dec_mpg_buffers * 1024; 593 itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_MPG] = dec_mpg_buffers * 1024;
600 itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_YUV] = dec_yuv_buffers * 1024; 594 itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_YUV] = dec_yuv_buffers * 1024;
601 itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_VBI] = dec_vbi_buffers; 595 itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_VBI] = dec_vbi_buffers;
602 itv->options.cardtype = cardtype[itv->num]; 596 itv->options.cardtype = cardtype[itv->instance];
603 itv->options.tuner = tuner[itv->num]; 597 itv->options.tuner = tuner[itv->instance];
604 itv->options.radio = radio[itv->num]; 598 itv->options.radio = radio[itv->instance];
605 itv->options.newi2c = newi2c; 599 itv->options.newi2c = newi2c;
606 if (tunertype < -1 || tunertype > 1) { 600 if (tunertype < -1 || tunertype > 1) {
607 IVTV_WARN("Invalid tunertype argument, will autodetect instead\n"); 601 IVTV_WARN("Invalid tunertype argument, will autodetect instead\n");
@@ -688,7 +682,7 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
688 spin_lock_init(&itv->lock); 682 spin_lock_init(&itv->lock);
689 spin_lock_init(&itv->dma_reg_lock); 683 spin_lock_init(&itv->dma_reg_lock);
690 684
691 itv->irq_work_queues = create_singlethread_workqueue(itv->name); 685 itv->irq_work_queues = create_singlethread_workqueue(itv->device.name);
692 if (itv->irq_work_queues == NULL) { 686 if (itv->irq_work_queues == NULL) {
693 IVTV_ERR("Could not create ivtv workqueue\n"); 687 IVTV_ERR("Could not create ivtv workqueue\n");
694 return -1; 688 return -1;
@@ -770,12 +764,6 @@ static void __devinit ivtv_init_struct2(struct ivtv *itv)
770 i = 0; 764 i = 0;
771 itv->active_input = i; 765 itv->active_input = i;
772 itv->audio_input = itv->card->video_inputs[i].audio_index; 766 itv->audio_input = itv->card->video_inputs[i].audio_index;
773 if (itv->card->hw_all & IVTV_HW_CX25840)
774 itv->video_dec_func = ivtv_cx25840;
775 else if (itv->card->hw_all & IVTV_HW_SAA717X)
776 itv->video_dec_func = ivtv_saa717x;
777 else
778 itv->video_dec_func = ivtv_saa7115;
779} 767}
780 768
781static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *dev, 769static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *dev,
@@ -788,21 +776,21 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *dev,
788 IVTV_DEBUG_INFO("Enabling pci device\n"); 776 IVTV_DEBUG_INFO("Enabling pci device\n");
789 777
790 if (pci_enable_device(dev)) { 778 if (pci_enable_device(dev)) {
791 IVTV_ERR("Can't enable device %d!\n", itv->num); 779 IVTV_ERR("Can't enable device!\n");
792 return -EIO; 780 return -EIO;
793 } 781 }
794 if (pci_set_dma_mask(dev, 0xffffffff)) { 782 if (pci_set_dma_mask(dev, 0xffffffff)) {
795 IVTV_ERR("No suitable DMA available on card %d.\n", itv->num); 783 IVTV_ERR("No suitable DMA available.\n");
796 return -EIO; 784 return -EIO;
797 } 785 }
798 if (!request_mem_region(itv->base_addr, IVTV_ENCODER_SIZE, "ivtv encoder")) { 786 if (!request_mem_region(itv->base_addr, IVTV_ENCODER_SIZE, "ivtv encoder")) {
799 IVTV_ERR("Cannot request encoder memory region on card %d.\n", itv->num); 787 IVTV_ERR("Cannot request encoder memory region.\n");
800 return -EIO; 788 return -EIO;
801 } 789 }
802 790
803 if (!request_mem_region(itv->base_addr + IVTV_REG_OFFSET, 791 if (!request_mem_region(itv->base_addr + IVTV_REG_OFFSET,
804 IVTV_REG_SIZE, "ivtv registers")) { 792 IVTV_REG_SIZE, "ivtv registers")) {
805 IVTV_ERR("Cannot request register memory region on card %d.\n", itv->num); 793 IVTV_ERR("Cannot request register memory region.\n");
806 release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); 794 release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
807 return -EIO; 795 return -EIO;
808 } 796 }
@@ -810,7 +798,7 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *dev,
810 if (itv->has_cx23415 && 798 if (itv->has_cx23415 &&
811 !request_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, 799 !request_mem_region(itv->base_addr + IVTV_DECODER_OFFSET,
812 IVTV_DECODER_SIZE, "ivtv decoder")) { 800 IVTV_DECODER_SIZE, "ivtv decoder")) {
813 IVTV_ERR("Cannot request decoder memory region on card %d.\n", itv->num); 801 IVTV_ERR("Cannot request decoder memory region.\n");
814 release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); 802 release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
815 release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); 803 release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
816 return -EIO; 804 return -EIO;
@@ -853,69 +841,11 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *dev,
853 return 0; 841 return 0;
854} 842}
855 843
856#ifdef MODULE
857static u32 ivtv_request_module(struct ivtv *itv, u32 hw,
858 const char *name, u32 id)
859{
860 if ((hw & id) == 0)
861 return hw;
862 if (request_module(name) != 0) {
863 IVTV_ERR("Failed to load module %s\n", name);
864 return hw & ~id;
865 }
866 IVTV_DEBUG_INFO("Loaded module %s\n", name);
867 return hw;
868}
869#endif
870
871static void ivtv_load_and_init_modules(struct ivtv *itv) 844static void ivtv_load_and_init_modules(struct ivtv *itv)
872{ 845{
873 u32 hw = itv->card->hw_all; 846 u32 hw = itv->card->hw_all;
874 unsigned i; 847 unsigned i;
875 848
876#ifdef MODULE
877 /* load modules */
878#ifdef CONFIG_MEDIA_TUNER_MODULE
879 hw = ivtv_request_module(itv, hw, "tuner", IVTV_HW_TUNER);
880#endif
881#ifdef CONFIG_VIDEO_CX25840_MODULE
882 hw = ivtv_request_module(itv, hw, "cx25840", IVTV_HW_CX25840);
883#endif
884#ifdef CONFIG_VIDEO_SAA711X_MODULE
885 hw = ivtv_request_module(itv, hw, "saa7115", IVTV_HW_SAA711X);
886#endif
887#ifdef CONFIG_VIDEO_SAA7127_MODULE
888 hw = ivtv_request_module(itv, hw, "saa7127", IVTV_HW_SAA7127);
889#endif
890#ifdef CONFIG_VIDEO_SAA717X_MODULE
891 hw = ivtv_request_module(itv, hw, "saa717x", IVTV_HW_SAA717X);
892#endif
893#ifdef CONFIG_VIDEO_UPD64031A_MODULE
894 hw = ivtv_request_module(itv, hw, "upd64031a", IVTV_HW_UPD64031A);
895#endif
896#ifdef CONFIG_VIDEO_UPD64083_MODULE
897 hw = ivtv_request_module(itv, hw, "upd64083", IVTV_HW_UPD6408X);
898#endif
899#ifdef CONFIG_VIDEO_MSP3400_MODULE
900 hw = ivtv_request_module(itv, hw, "msp3400", IVTV_HW_MSP34XX);
901#endif
902#ifdef CONFIG_VIDEO_VP27SMPX_MODULE
903 hw = ivtv_request_module(itv, hw, "vp27smpx", IVTV_HW_VP27SMPX);
904#endif
905#ifdef CONFIG_VIDEO_WM8775_MODULE
906 hw = ivtv_request_module(itv, hw, "wm8775", IVTV_HW_WM8775);
907#endif
908#ifdef CONFIG_VIDEO_WM8739_MODULE
909 hw = ivtv_request_module(itv, hw, "wm8739", IVTV_HW_WM8739);
910#endif
911#ifdef CONFIG_VIDEO_CS53L32A_MODULE
912 hw = ivtv_request_module(itv, hw, "cs53l32a", IVTV_HW_CS53L32A);
913#endif
914#ifdef CONFIG_VIDEO_M52790_MODULE
915 hw = ivtv_request_module(itv, hw, "m52790", IVTV_HW_M52790);
916#endif
917#endif
918
919 /* check which i2c devices are actually found */ 849 /* check which i2c devices are actually found */
920 for (i = 0; i < 32; i++) { 850 for (i = 0; i < 32; i++) {
921 u32 device = 1 << i; 851 u32 device = 1 << i;
@@ -927,11 +857,21 @@ static void ivtv_load_and_init_modules(struct ivtv *itv)
927 itv->hw_flags |= device; 857 itv->hw_flags |= device;
928 continue; 858 continue;
929 } 859 }
930 ivtv_i2c_register(itv, i); 860 if (ivtv_i2c_register(itv, i) == 0)
931 if (ivtv_i2c_hw_addr(itv, device) > 0)
932 itv->hw_flags |= device; 861 itv->hw_flags |= device;
933 } 862 }
934 863
864 if (itv->card->hw_all & IVTV_HW_CX25840)
865 itv->sd_video = ivtv_find_hw(itv, IVTV_HW_CX25840);
866 else if (itv->card->hw_all & IVTV_HW_SAA717X)
867 itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA717X);
868 else if (itv->card->hw_all & IVTV_HW_SAA7114)
869 itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA7114);
870 else
871 itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA7115);
872 itv->sd_audio = ivtv_find_hw(itv, itv->card->hw_audio_ctrl);
873 itv->sd_muxer = ivtv_find_hw(itv, itv->card->hw_muxer);
874
935 hw = itv->hw_flags; 875 hw = itv->hw_flags;
936 876
937 if (itv->card->type == IVTV_CARD_CX23416GYC) { 877 if (itv->card->type == IVTV_CARD_CX23416GYC) {
@@ -949,7 +889,7 @@ static void ivtv_load_and_init_modules(struct ivtv *itv)
949 /* The crystal frequency of GVMVPRX is 24.576MHz */ 889 /* The crystal frequency of GVMVPRX is 24.576MHz */
950 crystal_freq.freq = SAA7115_FREQ_24_576_MHZ; 890 crystal_freq.freq = SAA7115_FREQ_24_576_MHZ;
951 crystal_freq.flags = SAA7115_FREQ_FL_UCGC; 891 crystal_freq.flags = SAA7115_FREQ_FL_UCGC;
952 itv->video_dec_func(itv, VIDIOC_INT_S_CRYSTAL_FREQ, &crystal_freq); 892 v4l2_subdev_call(itv->sd_video, video, s_crystal_freq, &crystal_freq);
953 } 893 }
954 894
955 if (hw & IVTV_HW_CX25840) { 895 if (hw & IVTV_HW_CX25840) {
@@ -967,7 +907,7 @@ static void ivtv_load_and_init_modules(struct ivtv *itv)
967 /* determine the exact saa711x model */ 907 /* determine the exact saa711x model */
968 itv->hw_flags &= ~IVTV_HW_SAA711X; 908 itv->hw_flags &= ~IVTV_HW_SAA711X;
969 909
970 ivtv_saa7115(itv, VIDIOC_G_CHIP_IDENT, &v); 910 ivtv_call_hw(itv, IVTV_HW_SAA711X, core, g_chip_ident, &v);
971 if (v.ident == V4L2_IDENT_SAA7114) { 911 if (v.ident == V4L2_IDENT_SAA7114) {
972 itv->hw_flags |= IVTV_HW_SAA7114; 912 itv->hw_flags |= IVTV_HW_SAA7114;
973 /* VBI is not yet supported by the saa7114 driver. */ 913 /* VBI is not yet supported by the saa7114 driver. */
@@ -1001,28 +941,20 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1001 int vbi_buf_size; 941 int vbi_buf_size;
1002 struct ivtv *itv; 942 struct ivtv *itv;
1003 943
1004 spin_lock(&ivtv_cards_lock);
1005
1006 /* Make sure we've got a place for this card */
1007 if (ivtv_cards_active == IVTV_MAX_CARDS) {
1008 printk(KERN_ERR "ivtv: Maximum number of cards detected (%d)\n",
1009 ivtv_cards_active);
1010 spin_unlock(&ivtv_cards_lock);
1011 return -ENOMEM;
1012 }
1013
1014 itv = kzalloc(sizeof(struct ivtv), GFP_ATOMIC); 944 itv = kzalloc(sizeof(struct ivtv), GFP_ATOMIC);
1015 if (itv == NULL) { 945 if (itv == NULL)
1016 spin_unlock(&ivtv_cards_lock);
1017 return -ENOMEM; 946 return -ENOMEM;
1018 }
1019 ivtv_cards[ivtv_cards_active] = itv;
1020 itv->dev = dev; 947 itv->dev = dev;
1021 itv->num = ivtv_cards_active++; 948 itv->instance = atomic_inc_return(&ivtv_instance) - 1;
1022 snprintf(itv->name, sizeof(itv->name), "ivtv%d", itv->num);
1023 IVTV_INFO("Initializing card #%d\n", itv->num);
1024 949
1025 spin_unlock(&ivtv_cards_lock); 950 retval = v4l2_device_register(&dev->dev, &itv->device);
951 if (retval)
952 return retval;
953 /* "ivtv + PCI ID" is a bit of a mouthful, so use
954 "ivtv + instance" instead. */
955 snprintf(itv->device.name, sizeof(itv->device.name),
956 "ivtv%d", itv->instance);
957 IVTV_INFO("Initializing card %d\n", itv->instance);
1026 958
1027 ivtv_process_options(itv); 959 ivtv_process_options(itv);
1028 if (itv->options.cardtype == -1) { 960 if (itv->options.cardtype == -1) {
@@ -1043,8 +975,6 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1043 else if (retval == -ENXIO) 975 else if (retval == -ENXIO)
1044 goto free_mem; 976 goto free_mem;
1045 } 977 }
1046 /* save itv in the pci struct for later use */
1047 pci_set_drvdata(dev, itv);
1048 978
1049 /* map io memory */ 979 /* map io memory */
1050 IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n", 980 IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
@@ -1086,7 +1016,9 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1086 goto free_io; 1016 goto free_io;
1087 } 1017 }
1088 1018
1089 ivtv_gpio_init(itv); 1019 retval = ivtv_gpio_init(itv);
1020 if (retval)
1021 goto free_io;
1090 1022
1091 /* active i2c */ 1023 /* active i2c */
1092 IVTV_DEBUG_INFO("activating i2c...\n"); 1024 IVTV_DEBUG_INFO("activating i2c...\n");
@@ -1095,8 +1027,6 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1095 goto free_io; 1027 goto free_io;
1096 } 1028 }
1097 1029
1098 IVTV_DEBUG_INFO("Active card count: %d.\n", ivtv_cards_active);
1099
1100 if (itv->card->hw_all & IVTV_HW_TVEEPROM) { 1030 if (itv->card->hw_all & IVTV_HW_TVEEPROM) {
1101 /* Based on the model number the cardtype may be changed. 1031 /* Based on the model number the cardtype may be changed.
1102 The PCI IDs are not always reliable. */ 1032 The PCI IDs are not always reliable. */
@@ -1191,7 +1121,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1191 setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */ 1121 setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */
1192 setup.tuner_callback = (setup.type == TUNER_XC2028) ? 1122 setup.tuner_callback = (setup.type == TUNER_XC2028) ?
1193 ivtv_reset_tuner_gpio : NULL; 1123 ivtv_reset_tuner_gpio : NULL;
1194 ivtv_call_i2c_clients(itv, TUNER_SET_TYPE_ADDR, &setup); 1124 ivtv_call_all(itv, tuner, s_type_addr, &setup);
1195 if (setup.type == TUNER_XC2028) { 1125 if (setup.type == TUNER_XC2028) {
1196 static struct xc2028_ctrl ctrl = { 1126 static struct xc2028_ctrl ctrl = {
1197 .fname = XC2028_DEFAULT_FIRMWARE, 1127 .fname = XC2028_DEFAULT_FIRMWARE,
@@ -1201,7 +1131,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1201 .tuner = itv->options.tuner, 1131 .tuner = itv->options.tuner,
1202 .priv = &ctrl, 1132 .priv = &ctrl,
1203 }; 1133 };
1204 ivtv_call_i2c_clients(itv, TUNER_SET_CONFIG, &cfg); 1134 ivtv_call_all(itv, tuner, s_config, &cfg);
1205 } 1135 }
1206 } 1136 }
1207 1137
@@ -1210,11 +1140,11 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1210 itv->tuner_std = itv->std; 1140 itv->tuner_std = itv->std;
1211 1141
1212 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { 1142 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
1213 ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std); 1143 ivtv_call_all(itv, video, s_std_output, itv->std);
1214 /* Turn off the output signal. The mpeg decoder is not yet 1144 /* Turn off the output signal. The mpeg decoder is not yet
1215 active so without this you would get a green image until the 1145 active so without this you would get a green image until the
1216 mpeg decoder becomes active. */ 1146 mpeg decoder becomes active. */
1217 ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL); 1147 ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 0);
1218 } 1148 }
1219 1149
1220 /* clear interrupt mask, effectively disabling interrupts */ 1150 /* clear interrupt mask, effectively disabling interrupts */
@@ -1222,7 +1152,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1222 1152
1223 /* Register IRQ */ 1153 /* Register IRQ */
1224 retval = request_irq(itv->dev->irq, ivtv_irq_handler, 1154 retval = request_irq(itv->dev->irq, ivtv_irq_handler,
1225 IRQF_SHARED | IRQF_DISABLED, itv->name, (void *)itv); 1155 IRQF_SHARED | IRQF_DISABLED, itv->device.name, (void *)itv);
1226 if (retval) { 1156 if (retval) {
1227 IVTV_ERR("Failed to register irq %d\n", retval); 1157 IVTV_ERR("Failed to register irq %d\n", retval);
1228 goto free_i2c; 1158 goto free_i2c;
@@ -1238,7 +1168,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1238 IVTV_ERR("Error %d registering devices\n", retval); 1168 IVTV_ERR("Error %d registering devices\n", retval);
1239 goto free_streams; 1169 goto free_streams;
1240 } 1170 }
1241 IVTV_INFO("Initialized card #%d: %s\n", itv->num, itv->card_name); 1171 IVTV_INFO("Initialized card: %s\n", itv->card_name);
1242 return 0; 1172 return 0;
1243 1173
1244free_streams: 1174free_streams:
@@ -1261,10 +1191,8 @@ err:
1261 retval = -ENODEV; 1191 retval = -ENODEV;
1262 IVTV_ERR("Error %d on initialization\n", retval); 1192 IVTV_ERR("Error %d on initialization\n", retval);
1263 1193
1264 spin_lock(&ivtv_cards_lock); 1194 v4l2_device_unregister(&itv->device);
1265 kfree(ivtv_cards[ivtv_cards_active]); 1195 kfree(itv);
1266 ivtv_cards[ivtv_cards_active] = NULL;
1267 spin_unlock(&ivtv_cards_lock);
1268 return retval; 1196 return retval;
1269} 1197}
1270 1198
@@ -1304,10 +1232,11 @@ int ivtv_init_on_first_open(struct ivtv *itv)
1304 if (itv->card->hw_all & IVTV_HW_CX25840) { 1232 if (itv->card->hw_all & IVTV_HW_CX25840) {
1305 struct v4l2_control ctrl; 1233 struct v4l2_control ctrl;
1306 1234
1235 v4l2_subdev_call(itv->sd_video, core, init, 0);
1307 /* CX25840_CID_ENABLE_PVR150_WORKAROUND */ 1236 /* CX25840_CID_ENABLE_PVR150_WORKAROUND */
1308 ctrl.id = V4L2_CID_PRIVATE_BASE; 1237 ctrl.id = V4L2_CID_PRIVATE_BASE;
1309 ctrl.value = itv->pvr150_workaround; 1238 ctrl.value = itv->pvr150_workaround;
1310 itv->video_dec_func(itv, VIDIOC_S_CTRL, &ctrl); 1239 v4l2_subdev_call(itv->sd_video, core, s_ctrl, &ctrl);
1311 } 1240 }
1312 1241
1313 vf.tuner = 0; 1242 vf.tuner = 0;
@@ -1337,7 +1266,7 @@ int ivtv_init_on_first_open(struct ivtv *itv)
1337 /* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes 1266 /* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes
1338 the mpeg decoder so now the saa7127 receives a proper 1267 the mpeg decoder so now the saa7127 receives a proper
1339 signal. */ 1268 signal. */
1340 ivtv_saa7127(itv, VIDIOC_STREAMON, NULL); 1269 ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1);
1341 ivtv_init_mpeg_decoder(itv); 1270 ivtv_init_mpeg_decoder(itv);
1342 } 1271 }
1343 ivtv_s_std(NULL, &fh, &itv->tuner_std); 1272 ivtv_s_std(NULL, &fh, &itv->tuner_std);
@@ -1362,9 +1291,11 @@ int ivtv_init_on_first_open(struct ivtv *itv)
1362 1291
1363static void ivtv_remove(struct pci_dev *pci_dev) 1292static void ivtv_remove(struct pci_dev *pci_dev)
1364{ 1293{
1365 struct ivtv *itv = pci_get_drvdata(pci_dev); 1294 struct v4l2_device *dev = dev_get_drvdata(&pci_dev->dev);
1295 struct ivtv *itv = to_ivtv(dev);
1296 int i;
1366 1297
1367 IVTV_DEBUG_INFO("Removing Card #%d\n", itv->num); 1298 IVTV_DEBUG_INFO("Removing card\n");
1368 1299
1369 if (test_bit(IVTV_F_I_INITED, &itv->i_flags)) { 1300 if (test_bit(IVTV_F_I_INITED, &itv->i_flags)) {
1370 /* Stop all captures */ 1301 /* Stop all captures */
@@ -1377,7 +1308,7 @@ static void ivtv_remove(struct pci_dev *pci_dev)
1377 1308
1378 /* Turn off the TV-out */ 1309 /* Turn off the TV-out */
1379 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) 1310 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
1380 ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL); 1311 ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 0);
1381 if (atomic_read(&itv->decoding) > 0) { 1312 if (atomic_read(&itv->decoding) > 0) {
1382 int type; 1313 int type;
1383 1314
@@ -1402,6 +1333,8 @@ static void ivtv_remove(struct pci_dev *pci_dev)
1402 ivtv_streams_cleanup(itv, 1); 1333 ivtv_streams_cleanup(itv, 1);
1403 ivtv_udma_free(itv); 1334 ivtv_udma_free(itv);
1404 1335
1336 v4l2_device_unregister(&itv->device);
1337
1405 exit_ivtv_i2c(itv); 1338 exit_ivtv_i2c(itv);
1406 1339
1407 free_irq(itv->dev->irq, (void *)itv); 1340 free_irq(itv->dev->irq, (void *)itv);
@@ -1413,8 +1346,11 @@ static void ivtv_remove(struct pci_dev *pci_dev)
1413 release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); 1346 release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
1414 1347
1415 pci_disable_device(itv->dev); 1348 pci_disable_device(itv->dev);
1349 for (i = 0; i < IVTV_VBI_FRAMES; i++)
1350 kfree(itv->vbi.sliced_mpeg_data[i]);
1416 1351
1417 IVTV_INFO("Removed %s, card #%d\n", itv->card_name, itv->num); 1352 printk(KERN_INFO "ivtv: Removed %s\n", itv->card_name);
1353 kfree(itv);
1418} 1354}
1419 1355
1420/* define a pci_driver for card detection */ 1356/* define a pci_driver for card detection */
@@ -1427,54 +1363,36 @@ static struct pci_driver ivtv_pci_driver = {
1427 1363
1428static int module_start(void) 1364static int module_start(void)
1429{ 1365{
1430 printk(KERN_INFO "ivtv: Start initialization, version %s\n", IVTV_VERSION); 1366 printk(KERN_INFO "ivtv: Start initialization, version %s\n", IVTV_VERSION);
1431
1432 memset(ivtv_cards, 0, sizeof(ivtv_cards));
1433 1367
1434 /* Validate parameters */ 1368 /* Validate parameters */
1435 if (ivtv_first_minor < 0 || ivtv_first_minor >= IVTV_MAX_CARDS) { 1369 if (ivtv_first_minor < 0 || ivtv_first_minor >= IVTV_MAX_CARDS) {
1436 printk(KERN_ERR "ivtv: Exiting, ivtv_first_minor must be between 0 and %d\n", 1370 printk(KERN_ERR "ivtv: Exiting, ivtv_first_minor must be between 0 and %d\n",
1437 IVTV_MAX_CARDS - 1); 1371 IVTV_MAX_CARDS - 1);
1438 return -1; 1372 return -1;
1439 } 1373 }
1440 1374
1441 if (ivtv_debug < 0 || ivtv_debug > 2047) { 1375 if (ivtv_debug < 0 || ivtv_debug > 2047) {
1442 ivtv_debug = 0; 1376 ivtv_debug = 0;
1443 printk(KERN_INFO "ivtv: Debug value must be >= 0 and <= 2047\n"); 1377 printk(KERN_INFO "ivtv: Debug value must be >= 0 and <= 2047\n");
1444 } 1378 }
1445 1379
1446 if (pci_register_driver(&ivtv_pci_driver)) { 1380 if (pci_register_driver(&ivtv_pci_driver)) {
1447 printk(KERN_ERR "ivtv: Error detecting PCI card\n"); 1381 printk(KERN_ERR "ivtv: Error detecting PCI card\n");
1448 return -ENODEV; 1382 return -ENODEV;
1449 } 1383 }
1450 printk(KERN_INFO "ivtv: End initialization\n"); 1384 printk(KERN_INFO "ivtv: End initialization\n");
1451 return 0; 1385 return 0;
1452} 1386}
1453 1387
1454static void module_cleanup(void) 1388static void module_cleanup(void)
1455{ 1389{
1456 int i, j;
1457
1458 pci_unregister_driver(&ivtv_pci_driver); 1390 pci_unregister_driver(&ivtv_pci_driver);
1459
1460 spin_lock(&ivtv_cards_lock);
1461 for (i = 0; i < ivtv_cards_active; i++) {
1462 if (ivtv_cards[i] == NULL)
1463 continue;
1464 for (j = 0; j < IVTV_VBI_FRAMES; j++) {
1465 kfree(ivtv_cards[i]->vbi.sliced_mpeg_data[j]);
1466 }
1467 kfree(ivtv_cards[i]);
1468 }
1469 spin_unlock(&ivtv_cards_lock);
1470} 1391}
1471 1392
1472/* Note: These symbols are exported because they are used by the ivtvfb 1393/* Note: These symbols are exported because they are used by the ivtvfb
1473 framebuffer module and an infrared module for the IR-blaster. */ 1394 framebuffer module and an infrared module for the IR-blaster. */
1474EXPORT_SYMBOL(ivtv_set_irq_mask); 1395EXPORT_SYMBOL(ivtv_set_irq_mask);
1475EXPORT_SYMBOL(ivtv_cards_active);
1476EXPORT_SYMBOL(ivtv_cards);
1477EXPORT_SYMBOL(ivtv_cards_lock);
1478EXPORT_SYMBOL(ivtv_api); 1396EXPORT_SYMBOL(ivtv_api);
1479EXPORT_SYMBOL(ivtv_vapi); 1397EXPORT_SYMBOL(ivtv_vapi);
1480EXPORT_SYMBOL(ivtv_vapi_result); 1398EXPORT_SYMBOL(ivtv_vapi_result);
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 3733b2afec5f..ce8d9b74357e 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -61,6 +61,7 @@
61#include <linux/dvb/audio.h> 61#include <linux/dvb/audio.h>
62#include <media/v4l2-common.h> 62#include <media/v4l2-common.h>
63#include <media/v4l2-ioctl.h> 63#include <media/v4l2-ioctl.h>
64#include <media/v4l2-device.h>
64#include <media/tuner.h> 65#include <media/tuner.h>
65#include <media/cx2341x.h> 66#include <media/cx2341x.h>
66 67
@@ -113,9 +114,6 @@
113#define IVTV_REG_VPU (0x9058) 114#define IVTV_REG_VPU (0x9058)
114#define IVTV_REG_APU (0xA064) 115#define IVTV_REG_APU (0xA064)
115 116
116/* i2c stuff */
117#define I2C_CLIENTS_MAX 16
118
119/* debugging */ 117/* debugging */
120extern int ivtv_debug; 118extern int ivtv_debug;
121 119
@@ -132,12 +130,10 @@ extern int ivtv_debug;
132/* Flag to turn on high volume debugging */ 130/* Flag to turn on high volume debugging */
133#define IVTV_DBGFLG_HIGHVOL (1 << 10) 131#define IVTV_DBGFLG_HIGHVOL (1 << 10)
134 132
135/* NOTE: extra space before comma in 'itv->num , ## args' is required for
136 gcc-2.95, otherwise it won't compile. */
137#define IVTV_DEBUG(x, type, fmt, args...) \ 133#define IVTV_DEBUG(x, type, fmt, args...) \
138 do { \ 134 do { \
139 if ((x) & ivtv_debug) \ 135 if ((x) & ivtv_debug) \
140 printk(KERN_INFO "ivtv%d " type ": " fmt, itv->num , ## args); \ 136 v4l2_info(&itv->device, " " type ": " fmt , ##args); \
141 } while (0) 137 } while (0)
142#define IVTV_DEBUG_WARN(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_WARN, "warn", fmt , ## args) 138#define IVTV_DEBUG_WARN(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_WARN, "warn", fmt , ## args)
143#define IVTV_DEBUG_INFO(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_INFO, "info", fmt , ## args) 139#define IVTV_DEBUG_INFO(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_INFO, "info", fmt , ## args)
@@ -152,8 +148,8 @@ extern int ivtv_debug;
152 148
153#define IVTV_DEBUG_HIGH_VOL(x, type, fmt, args...) \ 149#define IVTV_DEBUG_HIGH_VOL(x, type, fmt, args...) \
154 do { \ 150 do { \
155 if (((x) & ivtv_debug) && (ivtv_debug & IVTV_DBGFLG_HIGHVOL)) \ 151 if (((x) & ivtv_debug) && (ivtv_debug & IVTV_DBGFLG_HIGHVOL)) \
156 printk(KERN_INFO "ivtv%d " type ": " fmt, itv->num , ## args); \ 152 v4l2_info(&itv->device, " " type ": " fmt , ##args); \
157 } while (0) 153 } while (0)
158#define IVTV_DEBUG_HI_WARN(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_WARN, "warn", fmt , ## args) 154#define IVTV_DEBUG_HI_WARN(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_WARN, "warn", fmt , ## args)
159#define IVTV_DEBUG_HI_INFO(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_INFO, "info", fmt , ## args) 155#define IVTV_DEBUG_HI_INFO(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_INFO, "info", fmt , ## args)
@@ -167,9 +163,9 @@ extern int ivtv_debug;
167#define IVTV_DEBUG_HI_YUV(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_YUV, "yuv", fmt , ## args) 163#define IVTV_DEBUG_HI_YUV(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_YUV, "yuv", fmt , ## args)
168 164
169/* Standard kernel messages */ 165/* Standard kernel messages */
170#define IVTV_ERR(fmt, args...) printk(KERN_ERR "ivtv%d: " fmt, itv->num , ## args) 166#define IVTV_ERR(fmt, args...) v4l2_err(&itv->device, fmt , ## args)
171#define IVTV_WARN(fmt, args...) printk(KERN_WARNING "ivtv%d: " fmt, itv->num , ## args) 167#define IVTV_WARN(fmt, args...) v4l2_warn(&itv->device, fmt , ## args)
172#define IVTV_INFO(fmt, args...) printk(KERN_INFO "ivtv%d: " fmt, itv->num , ## args) 168#define IVTV_INFO(fmt, args...) v4l2_info(&itv->device, fmt , ## args)
173 169
174/* output modes (cx23415 only) */ 170/* output modes (cx23415 only) */
175#define OUT_NONE 0 171#define OUT_NONE 0
@@ -596,8 +592,6 @@ struct ivtv_card;
596/* Struct to hold info about ivtv cards */ 592/* Struct to hold info about ivtv cards */
597struct ivtv { 593struct ivtv {
598 /* General fixed card data */ 594 /* General fixed card data */
599 int num; /* board number, -1 during init! */
600 char name[8]; /* board name for printk and interrupts (e.g. 'ivtv0') */
601 struct pci_dev *dev; /* PCI device */ 595 struct pci_dev *dev; /* PCI device */
602 const struct ivtv_card *card; /* card information */ 596 const struct ivtv_card *card; /* card information */
603 const char *card_name; /* full name of the card */ 597 const char *card_name; /* full name of the card */
@@ -609,14 +603,18 @@ struct ivtv {
609 u32 v4l2_cap; /* V4L2 capabilities of card */ 603 u32 v4l2_cap; /* V4L2 capabilities of card */
610 u32 hw_flags; /* hardware description of the board */ 604 u32 hw_flags; /* hardware description of the board */
611 v4l2_std_id tuner_std; /* the norm of the card's tuner (fixed) */ 605 v4l2_std_id tuner_std; /* the norm of the card's tuner (fixed) */
612 /* controlling video decoder function */ 606 struct v4l2_subdev *sd_video; /* controlling video decoder subdev */
613 int (*video_dec_func)(struct ivtv *, unsigned int, void *); 607 struct v4l2_subdev *sd_audio; /* controlling audio subdev */
608 struct v4l2_subdev *sd_muxer; /* controlling audio muxer subdev */
614 u32 base_addr; /* PCI resource base address */ 609 u32 base_addr; /* PCI resource base address */
615 volatile void __iomem *enc_mem; /* pointer to mapped encoder memory */ 610 volatile void __iomem *enc_mem; /* pointer to mapped encoder memory */
616 volatile void __iomem *dec_mem; /* pointer to mapped decoder memory */ 611 volatile void __iomem *dec_mem; /* pointer to mapped decoder memory */
617 volatile void __iomem *reg_mem; /* pointer to mapped registers */ 612 volatile void __iomem *reg_mem; /* pointer to mapped registers */
618 struct ivtv_options options; /* user options */ 613 struct ivtv_options options; /* user options */
619 614
615 struct v4l2_device device;
616 struct v4l2_subdev sd_gpio; /* GPIO sub-device */
617 u16 instance;
620 618
621 /* High-level state info */ 619 /* High-level state info */
622 unsigned long i_flags; /* global ivtv flags */ 620 unsigned long i_flags; /* global ivtv flags */
@@ -676,7 +674,6 @@ struct ivtv {
676 struct i2c_adapter i2c_adap; 674 struct i2c_adapter i2c_adap;
677 struct i2c_algo_bit_data i2c_algo; 675 struct i2c_algo_bit_data i2c_algo;
678 struct i2c_client i2c_client; 676 struct i2c_client i2c_client;
679 struct i2c_client *i2c_clients[I2C_CLIENTS_MAX];/* pointers to all I2C clients */
680 int i2c_state; /* i2c bit state */ 677 int i2c_state; /* i2c bit state */
681 struct mutex i2c_bus_lock; /* lock i2c bus */ 678 struct mutex i2c_bus_lock; /* lock i2c bus */
682 679
@@ -722,11 +719,13 @@ struct ivtv {
722 struct osd_info *osd_info; /* ivtvfb private OSD info */ 719 struct osd_info *osd_info; /* ivtvfb private OSD info */
723}; 720};
724 721
722static inline struct ivtv *to_ivtv(struct v4l2_device *dev)
723{
724 return container_of(dev, struct ivtv, device);
725}
726
725/* Globals */ 727/* Globals */
726extern struct ivtv *ivtv_cards[];
727extern int ivtv_cards_active;
728extern int ivtv_first_minor; 728extern int ivtv_first_minor;
729extern spinlock_t ivtv_cards_lock;
730 729
731/*==============Prototypes==================*/ 730/*==============Prototypes==================*/
732 731
@@ -786,4 +785,19 @@ static inline int ivtv_raw_vbi(const struct ivtv *itv)
786#define write_dec_sync(val, addr) \ 785#define write_dec_sync(val, addr) \
787 do { write_dec(val, addr); read_dec(addr); } while (0) 786 do { write_dec(val, addr); read_dec(addr); } while (0)
788 787
788/* Call the specified callback for all subdevs matching hw (if 0, then
789 match them all). Ignore any errors. */
790#define ivtv_call_hw(itv, hw, o, f, args...) \
791 __v4l2_device_call_subdevs(&(itv)->device, !(hw) || (sd->grp_id & (hw)), o, f , ##args)
792
793#define ivtv_call_all(itv, o, f, args...) ivtv_call_hw(itv, 0, o, f , ##args)
794
795/* Call the specified callback for all subdevs matching hw (if 0, then
796 match them all). If the callback returns an error other than 0 or
797 -ENOIOCTLCMD, then return with that error code. */
798#define ivtv_call_hw_err(itv, hw, o, f, args...) \
799 __v4l2_device_call_subdevs_until_err(&(itv)->device, !(hw) || (sd->grp_id & (hw)), o, f , ##args)
800
801#define ivtv_call_all_err(itv, o, f, args...) ivtv_call_hw_err(itv, 0, o, f , ##args)
802
789#endif 803#endif
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 1c404e454a36..5eb587592e9d 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -155,7 +155,7 @@ static void ivtv_dualwatch(struct ivtv *itv)
155 155
156 new_stereo_mode = itv->params.audio_properties & stereo_mask; 156 new_stereo_mode = itv->params.audio_properties & stereo_mask;
157 memset(&vt, 0, sizeof(vt)); 157 memset(&vt, 0, sizeof(vt));
158 ivtv_call_i2c_clients(itv, VIDIOC_G_TUNER, &vt); 158 ivtv_call_all(itv, tuner, g_tuner, &vt);
159 if (vt.audmode == V4L2_TUNER_MODE_LANG1_LANG2 && (vt.rxsubchans & V4L2_TUNER_SUB_LANG2)) 159 if (vt.audmode == V4L2_TUNER_MODE_LANG1_LANG2 && (vt.rxsubchans & V4L2_TUNER_SUB_LANG2))
160 new_stereo_mode = dual; 160 new_stereo_mode = dual;
161 161
@@ -857,7 +857,7 @@ int ivtv_v4l2_close(struct inode *inode, struct file *filp)
857 /* Mark that the radio is no longer in use */ 857 /* Mark that the radio is no longer in use */
858 clear_bit(IVTV_F_I_RADIO_USER, &itv->i_flags); 858 clear_bit(IVTV_F_I_RADIO_USER, &itv->i_flags);
859 /* Switch tuner to TV */ 859 /* Switch tuner to TV */
860 ivtv_call_i2c_clients(itv, VIDIOC_S_STD, &itv->std); 860 ivtv_call_all(itv, tuner, s_std, itv->std);
861 /* Select correct audio input (i.e. TV tuner or Line in) */ 861 /* Select correct audio input (i.e. TV tuner or Line in) */
862 ivtv_audio_set_io(itv); 862 ivtv_audio_set_io(itv);
863 if (itv->hw_flags & IVTV_HW_SAA711X) 863 if (itv->hw_flags & IVTV_HW_SAA711X)
@@ -865,7 +865,7 @@ int ivtv_v4l2_close(struct inode *inode, struct file *filp)
865 struct v4l2_crystal_freq crystal_freq; 865 struct v4l2_crystal_freq crystal_freq;
866 crystal_freq.freq = SAA7115_FREQ_32_11_MHZ; 866 crystal_freq.freq = SAA7115_FREQ_32_11_MHZ;
867 crystal_freq.flags = 0; 867 crystal_freq.flags = 0;
868 ivtv_saa7115(itv, VIDIOC_INT_S_CRYSTAL_FREQ, &crystal_freq); 868 ivtv_call_hw(itv, IVTV_HW_SAA711X, video, s_crystal_freq, &crystal_freq);
869 } 869 }
870 if (atomic_read(&itv->capturing) > 0) { 870 if (atomic_read(&itv->capturing) > 0) {
871 /* Undo video mute */ 871 /* Undo video mute */
@@ -952,15 +952,14 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
952 /* We have the radio */ 952 /* We have the radio */
953 ivtv_mute(itv); 953 ivtv_mute(itv);
954 /* Switch tuner to radio */ 954 /* Switch tuner to radio */
955 ivtv_call_i2c_clients(itv, AUDC_SET_RADIO, NULL); 955 ivtv_call_all(itv, tuner, s_radio);
956 /* Select the correct audio input (i.e. radio tuner) */ 956 /* Select the correct audio input (i.e. radio tuner) */
957 ivtv_audio_set_io(itv); 957 ivtv_audio_set_io(itv);
958 if (itv->hw_flags & IVTV_HW_SAA711X) 958 if (itv->hw_flags & IVTV_HW_SAA711X) {
959 {
960 struct v4l2_crystal_freq crystal_freq; 959 struct v4l2_crystal_freq crystal_freq;
961 crystal_freq.freq = SAA7115_FREQ_32_11_MHZ; 960 crystal_freq.freq = SAA7115_FREQ_32_11_MHZ;
962 crystal_freq.flags = SAA7115_FREQ_FL_APLL; 961 crystal_freq.flags = SAA7115_FREQ_FL_APLL;
963 ivtv_saa7115(itv, VIDIOC_INT_S_CRYSTAL_FREQ, &crystal_freq); 962 ivtv_call_hw(itv, IVTV_HW_SAA711X, video, s_crystal_freq, &crystal_freq);
964 } 963 }
965 /* Done! Unmute and continue. */ 964 /* Done! Unmute and continue. */
966 ivtv_unmute(itv); 965 ivtv_unmute(itv);
@@ -981,37 +980,18 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
981 980
982int ivtv_v4l2_open(struct inode *inode, struct file *filp) 981int ivtv_v4l2_open(struct inode *inode, struct file *filp)
983{ 982{
984 int res, x, y = 0; 983 int res;
985 struct ivtv *itv = NULL; 984 struct ivtv *itv = NULL;
986 struct ivtv_stream *s = NULL; 985 struct ivtv_stream *s = NULL;
987 int minor = iminor(inode); 986 struct video_device *vdev = video_devdata(filp);
988
989 /* Find which card this open was on */
990 spin_lock(&ivtv_cards_lock);
991 for (x = 0; itv == NULL && x < ivtv_cards_active; x++) {
992 if (ivtv_cards[x] == NULL)
993 continue;
994 /* find out which stream this open was on */
995 for (y = 0; y < IVTV_MAX_STREAMS; y++) {
996 s = &ivtv_cards[x]->streams[y];
997 if (s->v4l2dev && s->v4l2dev->minor == minor) {
998 itv = ivtv_cards[x];
999 break;
1000 }
1001 }
1002 }
1003 spin_unlock(&ivtv_cards_lock);
1004 987
1005 if (itv == NULL) { 988 s = video_get_drvdata(vdev);
1006 /* Couldn't find a device registered 989 itv = s->itv;
1007 on that minor, shouldn't happen! */
1008 printk(KERN_WARNING "No ivtv device found on minor %d\n", minor);
1009 return -ENXIO;
1010 }
1011 990
1012 mutex_lock(&itv->serialize_lock); 991 mutex_lock(&itv->serialize_lock);
1013 if (ivtv_init_on_first_open(itv)) { 992 if (ivtv_init_on_first_open(itv)) {
1014 IVTV_ERR("Failed to initialize on minor %d\n", minor); 993 IVTV_ERR("Failed to initialize on minor %d\n",
994 s->v4l2dev->minor);
1015 mutex_unlock(&itv->serialize_lock); 995 mutex_unlock(&itv->serialize_lock);
1016 return -ENXIO; 996 return -ENXIO;
1017 } 997 }
diff --git a/drivers/media/video/ivtv/ivtv-gpio.c b/drivers/media/video/ivtv/ivtv-gpio.c
index 74a44844ccaf..dc2850e87a7e 100644
--- a/drivers/media/video/ivtv/ivtv-gpio.c
+++ b/drivers/media/video/ivtv/ivtv-gpio.c
@@ -144,22 +144,9 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
144 return 0; 144 return 0;
145} 145}
146 146
147void ivtv_gpio_init(struct ivtv *itv) 147static inline struct ivtv *sd_to_ivtv(struct v4l2_subdev *sd)
148{ 148{
149 u16 pin = 0; 149 return container_of(sd, struct ivtv, sd_gpio);
150
151 if (itv->card->xceive_pin)
152 pin = 1 << itv->card->xceive_pin;
153
154 if ((itv->card->gpio_init.direction | pin) == 0)
155 return;
156
157 IVTV_DEBUG_INFO("GPIO initial dir: %08x out: %08x\n",
158 read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT));
159
160 /* init output data then direction */
161 write_reg(itv->card->gpio_init.initial_value | pin, IVTV_REG_GPIO_OUT);
162 write_reg(itv->card->gpio_init.direction | pin, IVTV_REG_GPIO_DIR);
163} 150}
164 151
165static struct v4l2_queryctrl gpio_ctrl_mute = { 152static struct v4l2_queryctrl gpio_ctrl_mute = {
@@ -173,134 +160,231 @@ static struct v4l2_queryctrl gpio_ctrl_mute = {
173 .flags = 0, 160 .flags = 0,
174}; 161};
175 162
176int ivtv_gpio(struct ivtv *itv, unsigned int command, void *arg) 163static int subdev_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
177{ 164{
178 struct v4l2_tuner *tuner = arg; 165 struct ivtv *itv = sd_to_ivtv(sd);
179 struct v4l2_control *ctrl = arg;
180 struct v4l2_routing *route = arg;
181 u16 mask, data; 166 u16 mask, data;
182 167
183 switch (command) { 168 mask = itv->card->gpio_audio_freq.mask;
184 case VIDIOC_INT_AUDIO_CLOCK_FREQ: 169 switch (freq) {
185 mask = itv->card->gpio_audio_freq.mask; 170 case 32000:
186 switch (*(u32 *)arg) { 171 data = itv->card->gpio_audio_freq.f32000;
187 case 32000: 172 break;
188 data = itv->card->gpio_audio_freq.f32000; 173 case 44100:
189 break; 174 data = itv->card->gpio_audio_freq.f44100;
190 case 44100: 175 break;
191 data = itv->card->gpio_audio_freq.f44100; 176 case 48000:
192 break; 177 default:
193 case 48000: 178 data = itv->card->gpio_audio_freq.f48000;
194 default:
195 data = itv->card->gpio_audio_freq.f48000;
196 break;
197 }
198 break; 179 break;
180 }
181 if (mask)
182 write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
183 return 0;
184}
199 185
200 case VIDIOC_G_TUNER: 186static int subdev_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
201 mask = itv->card->gpio_audio_detect.mask; 187{
202 if (mask == 0 || (read_reg(IVTV_REG_GPIO_IN) & mask)) 188 struct ivtv *itv = sd_to_ivtv(sd);
203 tuner->rxsubchans = V4L2_TUNER_MODE_STEREO | 189 u16 mask;
204 V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; 190
205 else 191 mask = itv->card->gpio_audio_detect.mask;
206 tuner->rxsubchans = V4L2_TUNER_SUB_MONO; 192 if (mask == 0 || (read_reg(IVTV_REG_GPIO_IN) & mask))
207 return 0; 193 vt->rxsubchans = V4L2_TUNER_MODE_STEREO |
194 V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2;
195 else
196 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
197 return 0;
198}
208 199
209 case VIDIOC_S_TUNER: 200static int subdev_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
210 mask = itv->card->gpio_audio_mode.mask; 201{
211 switch (tuner->audmode) { 202 struct ivtv *itv = sd_to_ivtv(sd);
212 case V4L2_TUNER_MODE_LANG1: 203 u16 mask, data;
213 data = itv->card->gpio_audio_mode.lang1;
214 break;
215 case V4L2_TUNER_MODE_LANG2:
216 data = itv->card->gpio_audio_mode.lang2;
217 break;
218 case V4L2_TUNER_MODE_MONO:
219 data = itv->card->gpio_audio_mode.mono;
220 break;
221 case V4L2_TUNER_MODE_STEREO:
222 case V4L2_TUNER_MODE_LANG1_LANG2:
223 default:
224 data = itv->card->gpio_audio_mode.stereo;
225 break;
226 }
227 break;
228 204
229 case AUDC_SET_RADIO: 205 mask = itv->card->gpio_audio_mode.mask;
230 mask = itv->card->gpio_audio_input.mask; 206 switch (vt->audmode) {
231 data = itv->card->gpio_audio_input.radio; 207 case V4L2_TUNER_MODE_LANG1:
208 data = itv->card->gpio_audio_mode.lang1;
209 break;
210 case V4L2_TUNER_MODE_LANG2:
211 data = itv->card->gpio_audio_mode.lang2;
212 break;
213 case V4L2_TUNER_MODE_MONO:
214 data = itv->card->gpio_audio_mode.mono;
232 break; 215 break;
216 case V4L2_TUNER_MODE_STEREO:
217 case V4L2_TUNER_MODE_LANG1_LANG2:
218 default:
219 data = itv->card->gpio_audio_mode.stereo;
220 break;
221 }
222 if (mask)
223 write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
224 return 0;
225}
226
227static int subdev_s_radio(struct v4l2_subdev *sd)
228{
229 struct ivtv *itv = sd_to_ivtv(sd);
230 u16 mask, data;
231
232 mask = itv->card->gpio_audio_input.mask;
233 data = itv->card->gpio_audio_input.radio;
234 if (mask)
235 write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
236 return 0;
237}
233 238
234 case VIDIOC_S_STD: 239static int subdev_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
235 mask = itv->card->gpio_audio_input.mask; 240{
241 struct ivtv *itv = sd_to_ivtv(sd);
242 u16 mask, data;
243
244 mask = itv->card->gpio_audio_input.mask;
245 data = itv->card->gpio_audio_input.tuner;
246 if (mask)
247 write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
248 return 0;
249}
250
251static int subdev_s_audio_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
252{
253 struct ivtv *itv = sd_to_ivtv(sd);
254 u16 mask, data;
255
256 if (route->input > 2)
257 return -EINVAL;
258 mask = itv->card->gpio_audio_input.mask;
259 switch (route->input) {
260 case 0:
236 data = itv->card->gpio_audio_input.tuner; 261 data = itv->card->gpio_audio_input.tuner;
237 break; 262 break;
238 263 case 1:
239 case VIDIOC_INT_S_AUDIO_ROUTING: 264 data = itv->card->gpio_audio_input.linein;
240 if (route->input > 2) 265 break;
241 return -EINVAL; 266 case 2:
242 mask = itv->card->gpio_audio_input.mask; 267 default:
243 switch (route->input) { 268 data = itv->card->gpio_audio_input.radio;
244 case 0:
245 data = itv->card->gpio_audio_input.tuner;
246 break;
247 case 1:
248 data = itv->card->gpio_audio_input.linein;
249 break;
250 case 2:
251 default:
252 data = itv->card->gpio_audio_input.radio;
253 break;
254 }
255 break; 269 break;
270 }
271 if (mask)
272 write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
273 return 0;
274}
256 275
257 case VIDIOC_G_CTRL: 276static int subdev_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
258 if (ctrl->id != V4L2_CID_AUDIO_MUTE) 277{
259 return -EINVAL; 278 struct ivtv *itv = sd_to_ivtv(sd);
260 mask = itv->card->gpio_audio_mute.mask; 279 u16 mask, data;
261 data = itv->card->gpio_audio_mute.mute;
262 ctrl->value = (read_reg(IVTV_REG_GPIO_OUT) & mask) == data;
263 return 0;
264 280
265 case VIDIOC_S_CTRL: 281 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
266 if (ctrl->id != V4L2_CID_AUDIO_MUTE) 282 return -EINVAL;
267 return -EINVAL; 283 mask = itv->card->gpio_audio_mute.mask;
268 mask = itv->card->gpio_audio_mute.mask; 284 data = itv->card->gpio_audio_mute.mute;
269 data = ctrl->value ? itv->card->gpio_audio_mute.mute : 0; 285 ctrl->value = (read_reg(IVTV_REG_GPIO_OUT) & mask) == data;
270 break; 286 return 0;
287}
271 288
272 case VIDIOC_QUERYCTRL: 289static int subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
273 { 290{
274 struct v4l2_queryctrl *qc = arg; 291 struct ivtv *itv = sd_to_ivtv(sd);
292 u16 mask, data;
275 293
276 if (qc->id != V4L2_CID_AUDIO_MUTE) 294 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
277 return -EINVAL; 295 return -EINVAL;
278 *qc = gpio_ctrl_mute; 296 mask = itv->card->gpio_audio_mute.mask;
279 return 0; 297 data = ctrl->value ? itv->card->gpio_audio_mute.mute : 0;
280 } 298 if (mask)
299 write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
300 return 0;
301}
302
303static int subdev_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
304{
305 if (qc->id != V4L2_CID_AUDIO_MUTE)
306 return -EINVAL;
307 *qc = gpio_ctrl_mute;
308 return 0;
309}
310
311static int subdev_log_status(struct v4l2_subdev *sd)
312{
313 struct ivtv *itv = sd_to_ivtv(sd);
281 314
282 case VIDIOC_LOG_STATUS: 315 IVTV_INFO("GPIO status: DIR=0x%04x OUT=0x%04x IN=0x%04x\n",
283 IVTV_INFO("GPIO status: DIR=0x%04x OUT=0x%04x IN=0x%04x\n",
284 read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT), 316 read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT),
285 read_reg(IVTV_REG_GPIO_IN)); 317 read_reg(IVTV_REG_GPIO_IN));
286 return 0; 318 return 0;
319}
287 320
288 case VIDIOC_INT_S_VIDEO_ROUTING: 321static int subdev_s_video_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
289 if (route->input > 2) /* 0:Tuner 1:Composite 2:S-Video */ 322{
290 return -EINVAL; 323 struct ivtv *itv = sd_to_ivtv(sd);
291 mask = itv->card->gpio_video_input.mask; 324 u16 mask, data;
292 if (route->input == 0)
293 data = itv->card->gpio_video_input.tuner;
294 else if (route->input == 1)
295 data = itv->card->gpio_video_input.composite;
296 else
297 data = itv->card->gpio_video_input.svideo;
298 break;
299 325
300 default: 326 if (route->input > 2) /* 0:Tuner 1:Composite 2:S-Video */
301 return -EINVAL; 327 return -EINVAL;
302 } 328 mask = itv->card->gpio_video_input.mask;
329 if (route->input == 0)
330 data = itv->card->gpio_video_input.tuner;
331 else if (route->input == 1)
332 data = itv->card->gpio_video_input.composite;
333 else
334 data = itv->card->gpio_video_input.svideo;
303 if (mask) 335 if (mask)
304 write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT); 336 write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
305 return 0; 337 return 0;
306} 338}
339
340static const struct v4l2_subdev_core_ops subdev_core_ops = {
341 .log_status = subdev_log_status,
342 .g_ctrl = subdev_g_ctrl,
343 .s_ctrl = subdev_s_ctrl,
344 .queryctrl = subdev_queryctrl,
345};
346
347static const struct v4l2_subdev_tuner_ops subdev_tuner_ops = {
348 .s_std = subdev_s_std,
349 .s_radio = subdev_s_radio,
350 .g_tuner = subdev_g_tuner,
351 .s_tuner = subdev_s_tuner,
352};
353
354static const struct v4l2_subdev_audio_ops subdev_audio_ops = {
355 .s_clock_freq = subdev_s_clock_freq,
356 .s_routing = subdev_s_audio_routing,
357};
358
359static const struct v4l2_subdev_video_ops subdev_video_ops = {
360 .s_routing = subdev_s_video_routing,
361};
362
363static const struct v4l2_subdev_ops subdev_ops = {
364 .core = &subdev_core_ops,
365 .tuner = &subdev_tuner_ops,
366 .audio = &subdev_audio_ops,
367 .video = &subdev_video_ops,
368};
369
370int ivtv_gpio_init(struct ivtv *itv)
371{
372 u16 pin = 0;
373
374 if (itv->card->xceive_pin)
375 pin = 1 << itv->card->xceive_pin;
376
377 if ((itv->card->gpio_init.direction | pin) == 0)
378 return 0;
379
380 IVTV_DEBUG_INFO("GPIO initial dir: %08x out: %08x\n",
381 read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT));
382
383 /* init output data then direction */
384 write_reg(itv->card->gpio_init.initial_value | pin, IVTV_REG_GPIO_OUT);
385 write_reg(itv->card->gpio_init.direction | pin, IVTV_REG_GPIO_DIR);
386 v4l2_subdev_init(&itv->sd_gpio, &subdev_ops);
387 snprintf(itv->sd_gpio.name, sizeof(itv->sd_gpio.name), "%s-gpio", itv->device.name);
388 itv->sd_gpio.grp_id = IVTV_HW_GPIO;
389 return v4l2_device_register_subdev(&itv->device, &itv->sd_gpio);
390}
diff --git a/drivers/media/video/ivtv/ivtv-gpio.h b/drivers/media/video/ivtv/ivtv-gpio.h
index 48b6291613a2..0b5d19c8ecb4 100644
--- a/drivers/media/video/ivtv/ivtv-gpio.h
+++ b/drivers/media/video/ivtv/ivtv-gpio.h
@@ -22,9 +22,8 @@
22#define IVTV_GPIO_H 22#define IVTV_GPIO_H
23 23
24/* GPIO stuff */ 24/* GPIO stuff */
25void ivtv_gpio_init(struct ivtv *itv); 25int ivtv_gpio_init(struct ivtv *itv);
26void ivtv_reset_ir_gpio(struct ivtv *itv); 26void ivtv_reset_ir_gpio(struct ivtv *itv);
27int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value); 27int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value);
28int ivtv_gpio(struct ivtv *itv, unsigned int command, void *arg);
29 28
30#endif 29#endif
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 41dbbe9621a1..ca1d9557945e 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -90,26 +90,6 @@
90#define IVTV_M52790_I2C_ADDR 0x48 90#define IVTV_M52790_I2C_ADDR 0x48
91 91
92/* This array should match the IVTV_HW_ defines */ 92/* This array should match the IVTV_HW_ defines */
93static const u8 hw_driverids[] = {
94 I2C_DRIVERID_CX25840,
95 I2C_DRIVERID_SAA711X,
96 I2C_DRIVERID_SAA7127,
97 I2C_DRIVERID_MSP3400,
98 I2C_DRIVERID_TUNER,
99 I2C_DRIVERID_WM8775,
100 I2C_DRIVERID_CS53L32A,
101 I2C_DRIVERID_TVEEPROM,
102 I2C_DRIVERID_SAA711X,
103 I2C_DRIVERID_UPD64031A,
104 I2C_DRIVERID_UPD64083,
105 I2C_DRIVERID_SAA717X,
106 I2C_DRIVERID_WM8739,
107 I2C_DRIVERID_VP27SMPX,
108 I2C_DRIVERID_M52790,
109 0 /* IVTV_HW_GPIO dummy driver ID */
110};
111
112/* This array should match the IVTV_HW_ defines */
113static const u8 hw_addrs[] = { 93static const u8 hw_addrs[] = {
114 IVTV_CX25840_I2C_ADDR, 94 IVTV_CX25840_I2C_ADDR,
115 IVTV_SAA7115_I2C_ADDR, 95 IVTV_SAA7115_I2C_ADDR,
@@ -130,6 +110,26 @@ static const u8 hw_addrs[] = {
130}; 110};
131 111
132/* This array should match the IVTV_HW_ defines */ 112/* This array should match the IVTV_HW_ defines */
113static const char *hw_modules[] = {
114 "cx25840",
115 "saa7115",
116 "saa7127",
117 "msp3400",
118 "tuner",
119 "wm8775",
120 "cs53l32a",
121 NULL,
122 "saa7115",
123 "upd64031a",
124 "upd64083",
125 "saa717x",
126 "wm8739",
127 "vp27smpx",
128 "m52790",
129 NULL
130};
131
132/* This array should match the IVTV_HW_ defines */
133static const char * const hw_devicenames[] = { 133static const char * const hw_devicenames[] = {
134 "cx25840", 134 "cx25840",
135 "saa7115", 135 "saa7115",
@@ -151,80 +151,58 @@ static const char * const hw_devicenames[] = {
151 151
152int ivtv_i2c_register(struct ivtv *itv, unsigned idx) 152int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
153{ 153{
154 struct i2c_board_info info; 154 struct v4l2_subdev *sd;
155 struct i2c_client *c; 155 struct i2c_adapter *adap = &itv->i2c_adap;
156 u8 id; 156 const char *mod = hw_modules[idx];
157 int i; 157 const char *type = hw_devicenames[idx];
158 u32 hw = 1 << idx;
158 159
159 IVTV_DEBUG_I2C("i2c client register\n"); 160 if (idx >= ARRAY_SIZE(hw_addrs))
160 if (idx >= ARRAY_SIZE(hw_driverids) || hw_driverids[idx] == 0)
161 return -1; 161 return -1;
162 id = hw_driverids[idx]; 162 if (hw == IVTV_HW_TUNER) {
163 memset(&info, 0, sizeof(info)); 163 /* special tuner handling */
164 strlcpy(info.type, hw_devicenames[idx], sizeof(info.type)); 164 sd = v4l2_i2c_new_probed_subdev(adap, mod, type,
165 info.addr = hw_addrs[idx]; 165 itv->card_i2c->radio);
166 for (i = 0; itv->i2c_clients[i] && i < I2C_CLIENTS_MAX; i++) {} 166 if (sd)
167 167 sd->grp_id = 1 << idx;
168 if (i == I2C_CLIENTS_MAX) { 168 sd = v4l2_i2c_new_probed_subdev(adap, mod, type,
169 IVTV_ERR("insufficient room for new I2C client!\n"); 169 itv->card_i2c->demod);
170 return -ENOMEM; 170 if (sd)
171 sd->grp_id = 1 << idx;
172 sd = v4l2_i2c_new_probed_subdev(adap, mod, type,
173 itv->card_i2c->tv);
174 if (sd)
175 sd->grp_id = 1 << idx;
176 return sd ? 0 : -1;
171 } 177 }
178 if (!hw_addrs[idx])
179 return -1;
180 if (hw == IVTV_HW_UPD64031A || hw == IVTV_HW_UPD6408X) {
181 unsigned short addrs[2] = { hw_addrs[idx], I2C_CLIENT_END };
172 182
173 if (id != I2C_DRIVERID_TUNER) { 183 sd = v4l2_i2c_new_probed_subdev(adap, mod, type, addrs);
174 if (id == I2C_DRIVERID_UPD64031A || 184 } else {
175 id == I2C_DRIVERID_UPD64083) { 185 sd = v4l2_i2c_new_subdev(adap, mod, type, hw_addrs[idx]);
176 unsigned short addrs[2] = { info.addr, I2C_CLIENT_END };
177
178 c = i2c_new_probed_device(&itv->i2c_adap, &info, addrs);
179 } else
180 c = i2c_new_device(&itv->i2c_adap, &info);
181 if (c && c->driver == NULL)
182 i2c_unregister_device(c);
183 else if (c)
184 itv->i2c_clients[i] = c;
185 return itv->i2c_clients[i] ? 0 : -ENODEV;
186 } 186 }
187 187 if (sd)
188 /* special tuner handling */ 188 sd->grp_id = 1 << idx;
189 c = i2c_new_probed_device(&itv->i2c_adap, &info, itv->card_i2c->radio); 189 return sd ? 0 : -1;
190 if (c && c->driver == NULL)
191 i2c_unregister_device(c);
192 else if (c)
193 itv->i2c_clients[i++] = c;
194 c = i2c_new_probed_device(&itv->i2c_adap, &info, itv->card_i2c->demod);
195 if (c && c->driver == NULL)
196 i2c_unregister_device(c);
197 else if (c)
198 itv->i2c_clients[i++] = c;
199 c = i2c_new_probed_device(&itv->i2c_adap, &info, itv->card_i2c->tv);
200 if (c && c->driver == NULL)
201 i2c_unregister_device(c);
202 else if (c)
203 itv->i2c_clients[i++] = c;
204 return 0;
205}
206
207static int attach_inform(struct i2c_client *client)
208{
209 return 0;
210} 190}
211 191
212static int detach_inform(struct i2c_client *client) 192struct v4l2_subdev *ivtv_find_hw(struct ivtv *itv, u32 hw)
213{ 193{
214 int i; 194 struct v4l2_subdev *result = NULL;
215 struct ivtv *itv = (struct ivtv *)i2c_get_adapdata(client->adapter); 195 struct v4l2_subdev *sd;
216 196
217 IVTV_DEBUG_I2C("i2c client detach\n"); 197 spin_lock(&itv->device.lock);
218 for (i = 0; i < I2C_CLIENTS_MAX; i++) { 198 v4l2_device_for_each_subdev(sd, &itv->device) {
219 if (itv->i2c_clients[i] == client) { 199 if (sd->grp_id == hw) {
220 itv->i2c_clients[i] = NULL; 200 result = sd;
221 break; 201 break;
222 } 202 }
223 } 203 }
224 IVTV_DEBUG_I2C("i2c detach [client=%s,%s]\n", 204 spin_unlock(&itv->device.lock);
225 client->name, (i < I2C_CLIENTS_MAX) ? "ok" : "failed"); 205 return result;
226
227 return 0;
228} 206}
229 207
230/* Set the serial clock line to the desired state */ 208/* Set the serial clock line to the desired state */
@@ -494,7 +472,8 @@ static int ivtv_read(struct ivtv *itv, unsigned char addr, unsigned char *data,
494 intervening stop condition */ 472 intervening stop condition */
495static int ivtv_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) 473static int ivtv_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
496{ 474{
497 struct ivtv *itv = i2c_get_adapdata(i2c_adap); 475 struct v4l2_device *drv = i2c_get_adapdata(i2c_adap);
476 struct ivtv *itv = to_ivtv(drv);
498 int retval; 477 int retval;
499 int i; 478 int i;
500 479
@@ -530,8 +509,6 @@ static struct i2c_adapter ivtv_i2c_adap_hw_template = {
530 .id = I2C_HW_B_CX2341X, 509 .id = I2C_HW_B_CX2341X,
531 .algo = &ivtv_algo, 510 .algo = &ivtv_algo,
532 .algo_data = NULL, /* filled from template */ 511 .algo_data = NULL, /* filled from template */
533 .client_register = attach_inform,
534 .client_unregister = detach_inform,
535 .owner = THIS_MODULE, 512 .owner = THIS_MODULE,
536}; 513};
537 514
@@ -583,8 +560,6 @@ static struct i2c_adapter ivtv_i2c_adap_template = {
583 .id = I2C_HW_B_CX2341X, 560 .id = I2C_HW_B_CX2341X,
584 .algo = NULL, /* set by i2c-algo-bit */ 561 .algo = NULL, /* set by i2c-algo-bit */
585 .algo_data = NULL, /* filled from template */ 562 .algo_data = NULL, /* filled from template */
586 .client_register = attach_inform,
587 .client_unregister = detach_inform,
588 .owner = THIS_MODULE, 563 .owner = THIS_MODULE,
589}; 564};
590 565
@@ -601,160 +576,6 @@ static struct i2c_client ivtv_i2c_client_template = {
601 .name = "ivtv internal", 576 .name = "ivtv internal",
602}; 577};
603 578
604int ivtv_call_i2c_client(struct ivtv *itv, int addr, unsigned int cmd, void *arg)
605{
606 struct i2c_client *client;
607 int retval;
608 int i;
609
610 IVTV_DEBUG_I2C("call_i2c_client addr=%02x\n", addr);
611 for (i = 0; i < I2C_CLIENTS_MAX; i++) {
612 client = itv->i2c_clients[i];
613 if (client == NULL || client->driver == NULL ||
614 client->driver->command == NULL)
615 continue;
616 if (addr == client->addr) {
617 retval = client->driver->command(client, cmd, arg);
618 return retval;
619 }
620 }
621 if (cmd != VIDIOC_G_CHIP_IDENT)
622 IVTV_ERR("i2c addr 0x%02x not found for command 0x%x\n", addr, cmd);
623 return -ENODEV;
624}
625
626/* Find the i2c device based on the driver ID and return
627 its i2c address or -ENODEV if no matching device was found. */
628static int ivtv_i2c_id_addr(struct ivtv *itv, u32 id)
629{
630 struct i2c_client *client;
631 int retval = -ENODEV;
632 int i;
633
634 for (i = 0; i < I2C_CLIENTS_MAX; i++) {
635 client = itv->i2c_clients[i];
636 if (client == NULL || client->driver == NULL)
637 continue;
638 if (id == client->driver->id) {
639 retval = client->addr;
640 break;
641 }
642 }
643 return retval;
644}
645
646/* Find the i2c device name matching the DRIVERID */
647static const char *ivtv_i2c_id_name(u32 id)
648{
649 int i;
650
651 for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
652 if (hw_driverids[i] == id)
653 return hw_devicenames[i];
654 return "unknown device";
655}
656
657/* Find the i2c device name matching the IVTV_HW_ flag */
658static const char *ivtv_i2c_hw_name(u32 hw)
659{
660 int i;
661
662 for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
663 if (1 << i == hw)
664 return hw_devicenames[i];
665 return "unknown device";
666}
667
668/* Find the i2c device matching the IVTV_HW_ flag and return
669 its i2c address or -ENODEV if no matching device was found. */
670int ivtv_i2c_hw_addr(struct ivtv *itv, u32 hw)
671{
672 int i;
673
674 for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
675 if (1 << i == hw)
676 return ivtv_i2c_id_addr(itv, hw_driverids[i]);
677 return -ENODEV;
678}
679
680/* Calls i2c device based on IVTV_HW_ flag. If hw == 0, then do nothing.
681 If hw == IVTV_HW_GPIO then call the gpio handler. */
682int ivtv_i2c_hw(struct ivtv *itv, u32 hw, unsigned int cmd, void *arg)
683{
684 int addr;
685
686 if (hw == IVTV_HW_GPIO)
687 return ivtv_gpio(itv, cmd, arg);
688 if (hw == 0)
689 return 0;
690
691 addr = ivtv_i2c_hw_addr(itv, hw);
692 if (addr < 0) {
693 IVTV_ERR("i2c hardware 0x%08x (%s) not found for command 0x%x\n",
694 hw, ivtv_i2c_hw_name(hw), cmd);
695 return addr;
696 }
697 return ivtv_call_i2c_client(itv, addr, cmd, arg);
698}
699
700/* Calls i2c device based on I2C driver ID. */
701int ivtv_i2c_id(struct ivtv *itv, u32 id, unsigned int cmd, void *arg)
702{
703 int addr;
704
705 addr = ivtv_i2c_id_addr(itv, id);
706 if (addr < 0) {
707 if (cmd != VIDIOC_G_CHIP_IDENT)
708 IVTV_ERR("i2c ID 0x%08x (%s) not found for command 0x%x\n",
709 id, ivtv_i2c_id_name(id), cmd);
710 return addr;
711 }
712 return ivtv_call_i2c_client(itv, addr, cmd, arg);
713}
714
715int ivtv_cx25840(struct ivtv *itv, unsigned int cmd, void *arg)
716{
717 return ivtv_call_i2c_client(itv, IVTV_CX25840_I2C_ADDR, cmd, arg);
718}
719
720int ivtv_saa7115(struct ivtv *itv, unsigned int cmd, void *arg)
721{
722 return ivtv_call_i2c_client(itv, IVTV_SAA7115_I2C_ADDR, cmd, arg);
723}
724
725int ivtv_saa7127(struct ivtv *itv, unsigned int cmd, void *arg)
726{
727 return ivtv_call_i2c_client(itv, IVTV_SAA7127_I2C_ADDR, cmd, arg);
728}
729EXPORT_SYMBOL(ivtv_saa7127);
730
731int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg)
732{
733 return ivtv_call_i2c_client(itv, IVTV_SAA717x_I2C_ADDR, cmd, arg);
734}
735
736int ivtv_upd64031a(struct ivtv *itv, unsigned int cmd, void *arg)
737{
738 return ivtv_call_i2c_client(itv, IVTV_UPD64031A_I2C_ADDR, cmd, arg);
739}
740
741int ivtv_upd64083(struct ivtv *itv, unsigned int cmd, void *arg)
742{
743 return ivtv_call_i2c_client(itv, IVTV_UPD64083_I2C_ADDR, cmd, arg);
744}
745
746/* broadcast cmd for all I2C clients and for the gpio subsystem */
747void ivtv_call_i2c_clients(struct ivtv *itv, unsigned int cmd, void *arg)
748{
749 if (itv->i2c_adap.algo == NULL) {
750 IVTV_ERR("Adapter is not set");
751 return;
752 }
753 i2c_clients_command(&itv->i2c_adap, cmd, arg);
754 if (itv->hw_flags & IVTV_HW_GPIO)
755 ivtv_gpio(itv, cmd, arg);
756}
757
758/* init + register i2c algo-bit adapter */ 579/* init + register i2c algo-bit adapter */
759int init_ivtv_i2c(struct ivtv *itv) 580int init_ivtv_i2c(struct ivtv *itv)
760{ 581{
@@ -763,10 +584,9 @@ int init_ivtv_i2c(struct ivtv *itv)
763 /* Sanity checks for the I2C hardware arrays. They must be the 584 /* Sanity checks for the I2C hardware arrays. They must be the
764 * same size and GPIO must be the last entry. 585 * same size and GPIO must be the last entry.
765 */ 586 */
766 if (ARRAY_SIZE(hw_driverids) != ARRAY_SIZE(hw_addrs) || 587 if (ARRAY_SIZE(hw_devicenames) != ARRAY_SIZE(hw_addrs) ||
767 ARRAY_SIZE(hw_devicenames) != ARRAY_SIZE(hw_addrs) || 588 ARRAY_SIZE(hw_devicenames) != ARRAY_SIZE(hw_modules) ||
768 IVTV_HW_GPIO != (1 << (ARRAY_SIZE(hw_addrs) - 1)) || 589 IVTV_HW_GPIO != (1 << (ARRAY_SIZE(hw_addrs) - 1))) {
769 hw_driverids[ARRAY_SIZE(hw_addrs) - 1]) {
770 IVTV_ERR("Mismatched I2C hardware arrays\n"); 590 IVTV_ERR("Mismatched I2C hardware arrays\n");
771 return -ENODEV; 591 return -ENODEV;
772 } 592 }
@@ -783,8 +603,8 @@ int init_ivtv_i2c(struct ivtv *itv)
783 itv->i2c_adap.algo_data = &itv->i2c_algo; 603 itv->i2c_adap.algo_data = &itv->i2c_algo;
784 604
785 sprintf(itv->i2c_adap.name + strlen(itv->i2c_adap.name), " #%d", 605 sprintf(itv->i2c_adap.name + strlen(itv->i2c_adap.name), " #%d",
786 itv->num); 606 itv->instance);
787 i2c_set_adapdata(&itv->i2c_adap, itv); 607 i2c_set_adapdata(&itv->i2c_adap, &itv->device);
788 608
789 memcpy(&itv->i2c_client, &ivtv_i2c_client_template, 609 memcpy(&itv->i2c_client, &ivtv_i2c_client_template,
790 sizeof(struct i2c_client)); 610 sizeof(struct i2c_client));
diff --git a/drivers/media/video/ivtv/ivtv-i2c.h b/drivers/media/video/ivtv/ivtv-i2c.h
index 022978cf533d..396928a06a54 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.h
+++ b/drivers/media/video/ivtv/ivtv-i2c.h
@@ -21,19 +21,8 @@
21#ifndef IVTV_I2C_H 21#ifndef IVTV_I2C_H
22#define IVTV_I2C_H 22#define IVTV_I2C_H
23 23
24int ivtv_cx25840(struct ivtv *itv, unsigned int cmd, void *arg);
25int ivtv_saa7115(struct ivtv *itv, unsigned int cmd, void *arg);
26int ivtv_saa7127(struct ivtv *itv, unsigned int cmd, void *arg);
27int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg);
28int ivtv_upd64031a(struct ivtv *itv, unsigned int cmd, void *arg);
29int ivtv_upd64083(struct ivtv *itv, unsigned int cmd, void *arg);
30
31int ivtv_i2c_hw_addr(struct ivtv *itv, u32 hw);
32int ivtv_i2c_hw(struct ivtv *itv, u32 hw, unsigned int cmd, void *arg);
33int ivtv_i2c_id(struct ivtv *itv, u32 id, unsigned int cmd, void *arg);
34int ivtv_call_i2c_client(struct ivtv *itv, int addr, unsigned int cmd, void *arg);
35void ivtv_call_i2c_clients(struct ivtv *itv, unsigned int cmd, void *arg);
36int ivtv_i2c_register(struct ivtv *itv, unsigned idx); 24int ivtv_i2c_register(struct ivtv *itv, unsigned idx);
25struct v4l2_subdev *ivtv_find_hw(struct ivtv *itv, u32 hw);
37 26
38/* init + register i2c algo-bit adapter */ 27/* init + register i2c algo-bit adapter */
39int init_ivtv_i2c(struct ivtv *itv); 28int init_ivtv_i2c(struct ivtv *itv);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 4bae38d21ef6..cd990a4b81a9 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -393,7 +393,7 @@ static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
393 return 0; 393 return 0;
394 } 394 }
395 395
396 itv->video_dec_func(itv, VIDIOC_G_FMT, fmt); 396 v4l2_subdev_call(itv->sd_video, video, s_fmt, fmt);
397 vbifmt->service_set = ivtv_get_service_set(vbifmt); 397 vbifmt->service_set = ivtv_get_service_set(vbifmt);
398 return 0; 398 return 0;
399} 399}
@@ -581,7 +581,7 @@ static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
581 p->height = h; 581 p->height = h;
582 if (p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) 582 if (p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1)
583 fmt->fmt.pix.width /= 2; 583 fmt->fmt.pix.width /= 2;
584 itv->video_dec_func(itv, VIDIOC_S_FMT, fmt); 584 v4l2_subdev_call(itv->sd_video, video, s_fmt, fmt);
585 return ivtv_g_fmt_vid_cap(file, fh, fmt); 585 return ivtv_g_fmt_vid_cap(file, fh, fmt);
586} 586}
587 587
@@ -593,7 +593,7 @@ static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f
593 return -EBUSY; 593 return -EBUSY;
594 itv->vbi.sliced_in->service_set = 0; 594 itv->vbi.sliced_in->service_set = 0;
595 itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE; 595 itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE;
596 itv->video_dec_func(itv, VIDIOC_S_FMT, fmt); 596 v4l2_subdev_call(itv->sd_video, video, s_fmt, fmt);
597 return ivtv_g_fmt_vbi_cap(file, fh, fmt); 597 return ivtv_g_fmt_vbi_cap(file, fh, fmt);
598} 598}
599 599
@@ -611,7 +611,7 @@ static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
611 if (ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0) 611 if (ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0)
612 return -EBUSY; 612 return -EBUSY;
613 itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; 613 itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
614 itv->video_dec_func(itv, VIDIOC_S_FMT, fmt); 614 v4l2_subdev_call(itv->sd_video, video, s_fmt, fmt);
615 memcpy(itv->vbi.sliced_in, vbifmt, sizeof(*itv->vbi.sliced_in)); 615 memcpy(itv->vbi.sliced_in, vbifmt, sizeof(*itv->vbi.sliced_in));
616 return 0; 616 return 0;
617} 617}
@@ -685,18 +685,17 @@ static int ivtv_g_chip_ident(struct file *file, void *fh, struct v4l2_chip_ident
685 chip->ident = itv->has_cx23415 ? V4L2_IDENT_CX23415 : V4L2_IDENT_CX23416; 685 chip->ident = itv->has_cx23415 ? V4L2_IDENT_CX23415 : V4L2_IDENT_CX23416;
686 return 0; 686 return 0;
687 } 687 }
688 if (chip->match_type == V4L2_CHIP_MATCH_I2C_DRIVER) 688 if (chip->match_type != V4L2_CHIP_MATCH_I2C_DRIVER &&
689 return ivtv_i2c_id(itv, chip->match_chip, VIDIOC_G_CHIP_IDENT, chip); 689 chip->match_type != V4L2_CHIP_MATCH_I2C_ADDR)
690 if (chip->match_type == V4L2_CHIP_MATCH_I2C_ADDR) 690 return -EINVAL;
691 return ivtv_call_i2c_client(itv, chip->match_chip, VIDIOC_G_CHIP_IDENT, chip); 691 /* TODO: is this correct? */
692 return -EINVAL; 692 return ivtv_call_all_err(itv, core, g_chip_ident, chip);
693} 693}
694 694
695#ifdef CONFIG_VIDEO_ADV_DEBUG 695#ifdef CONFIG_VIDEO_ADV_DEBUG
696static int ivtv_itvc(struct ivtv *itv, unsigned int cmd, void *arg) 696static int ivtv_itvc(struct ivtv *itv, unsigned int cmd, void *arg)
697{ 697{
698 struct v4l2_register *regs = arg; 698 struct v4l2_register *regs = arg;
699 unsigned long flags;
700 volatile u8 __iomem *reg_start; 699 volatile u8 __iomem *reg_start;
701 700
702 if (!capable(CAP_SYS_ADMIN)) 701 if (!capable(CAP_SYS_ADMIN))
@@ -711,12 +710,10 @@ static int ivtv_itvc(struct ivtv *itv, unsigned int cmd, void *arg)
711 else 710 else
712 return -EINVAL; 711 return -EINVAL;
713 712
714 spin_lock_irqsave(&ivtv_cards_lock, flags);
715 if (cmd == VIDIOC_DBG_G_REGISTER) 713 if (cmd == VIDIOC_DBG_G_REGISTER)
716 regs->val = readl(regs->reg + reg_start); 714 regs->val = readl(regs->reg + reg_start);
717 else 715 else
718 writel(regs->val, regs->reg + reg_start); 716 writel(regs->val, regs->reg + reg_start);
719 spin_unlock_irqrestore(&ivtv_cards_lock, flags);
720 return 0; 717 return 0;
721} 718}
722 719
@@ -726,9 +723,10 @@ static int ivtv_g_register(struct file *file, void *fh, struct v4l2_register *re
726 723
727 if (v4l2_chip_match_host(reg->match_type, reg->match_chip)) 724 if (v4l2_chip_match_host(reg->match_type, reg->match_chip))
728 return ivtv_itvc(itv, VIDIOC_DBG_G_REGISTER, reg); 725 return ivtv_itvc(itv, VIDIOC_DBG_G_REGISTER, reg);
729 if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER) 726 /* TODO: subdev errors should not be ignored, this should become a
730 return ivtv_i2c_id(itv, reg->match_chip, VIDIOC_DBG_G_REGISTER, reg); 727 subdev helper function. */
731 return ivtv_call_i2c_client(itv, reg->match_chip, VIDIOC_DBG_G_REGISTER, reg); 728 ivtv_call_all(itv, core, g_register, reg);
729 return 0;
732} 730}
733 731
734static int ivtv_s_register(struct file *file, void *fh, struct v4l2_register *reg) 732static int ivtv_s_register(struct file *file, void *fh, struct v4l2_register *reg)
@@ -737,9 +735,10 @@ static int ivtv_s_register(struct file *file, void *fh, struct v4l2_register *re
737 735
738 if (v4l2_chip_match_host(reg->match_type, reg->match_chip)) 736 if (v4l2_chip_match_host(reg->match_type, reg->match_chip))
739 return ivtv_itvc(itv, VIDIOC_DBG_S_REGISTER, reg); 737 return ivtv_itvc(itv, VIDIOC_DBG_S_REGISTER, reg);
740 if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER) 738 /* TODO: subdev errors should not be ignored, this should become a
741 return ivtv_i2c_id(itv, reg->match_chip, VIDIOC_DBG_S_REGISTER, reg); 739 subdev helper function. */
742 return ivtv_call_i2c_client(itv, reg->match_chip, VIDIOC_DBG_S_REGISTER, reg); 740 ivtv_call_all(itv, core, s_register, reg);
741 return 0;
743} 742}
744#endif 743#endif
745 744
@@ -884,12 +883,6 @@ static int ivtv_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
884 883
885 streamtype = id->type; 884 streamtype = id->type;
886 885
887 if (ivtv_debug & IVTV_DBGFLG_IOCTL) {
888 printk(KERN_INFO "ivtv%d ioctl: ", itv->num);
889 /* Should be replaced */
890 /* v4l_printk_ioctl(VIDIOC_S_CROP); */
891 }
892
893 if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && 886 if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
894 (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) { 887 (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) {
895 if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) { 888 if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) {
@@ -1050,7 +1043,7 @@ static int ivtv_s_output(struct file *file, void *fh, unsigned int outp)
1050 itv->active_output = outp; 1043 itv->active_output = outp;
1051 route.input = SAA7127_INPUT_TYPE_NORMAL; 1044 route.input = SAA7127_INPUT_TYPE_NORMAL;
1052 route.output = itv->card->video_outputs[outp].video_output; 1045 route.output = itv->card->video_outputs[outp].video_output;
1053 ivtv_saa7127(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route); 1046 ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_routing, &route);
1054 1047
1055 return 0; 1048 return 0;
1056} 1049}
@@ -1062,7 +1055,7 @@ static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *
1062 if (vf->tuner != 0) 1055 if (vf->tuner != 0)
1063 return -EINVAL; 1056 return -EINVAL;
1064 1057
1065 ivtv_call_i2c_clients(itv, VIDIOC_G_FREQUENCY, vf); 1058 ivtv_call_all(itv, tuner, g_frequency, vf);
1066 return 0; 1059 return 0;
1067} 1060}
1068 1061
@@ -1075,7 +1068,7 @@ int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
1075 1068
1076 ivtv_mute(itv); 1069 ivtv_mute(itv);
1077 IVTV_DEBUG_INFO("v4l2 ioctl: set frequency %d\n", vf->frequency); 1070 IVTV_DEBUG_INFO("v4l2 ioctl: set frequency %d\n", vf->frequency);
1078 ivtv_call_i2c_clients(itv, VIDIOC_S_FREQUENCY, vf); 1071 ivtv_call_all(itv, tuner, s_frequency, vf);
1079 ivtv_unmute(itv); 1072 ivtv_unmute(itv);
1080 return 0; 1073 return 0;
1081} 1074}
@@ -1123,14 +1116,14 @@ int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
1123 IVTV_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)itv->std); 1116 IVTV_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)itv->std);
1124 1117
1125 /* Tuner */ 1118 /* Tuner */
1126 ivtv_call_i2c_clients(itv, VIDIOC_S_STD, &itv->std); 1119 ivtv_call_all(itv, tuner, s_std, itv->std);
1127 1120
1128 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { 1121 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
1129 /* set display standard */ 1122 /* set display standard */
1130 itv->std_out = *std; 1123 itv->std_out = *std;
1131 itv->is_out_60hz = itv->is_60hz; 1124 itv->is_out_60hz = itv->is_60hz;
1132 itv->is_out_50hz = itv->is_50hz; 1125 itv->is_out_50hz = itv->is_50hz;
1133 ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std_out); 1126 ivtv_call_all(itv, video, s_std_output, itv->std_out);
1134 ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz); 1127 ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
1135 itv->main_rect.left = itv->main_rect.top = 0; 1128 itv->main_rect.left = itv->main_rect.top = 0;
1136 itv->main_rect.width = 720; 1129 itv->main_rect.width = 720;
@@ -1154,7 +1147,7 @@ static int ivtv_s_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
1154 if (vt->index != 0) 1147 if (vt->index != 0)
1155 return -EINVAL; 1148 return -EINVAL;
1156 1149
1157 ivtv_call_i2c_clients(itv, VIDIOC_S_TUNER, vt); 1150 ivtv_call_all(itv, tuner, s_tuner, vt);
1158 1151
1159 return 0; 1152 return 0;
1160} 1153}
@@ -1166,7 +1159,7 @@ static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
1166 if (vt->index != 0) 1159 if (vt->index != 0)
1167 return -EINVAL; 1160 return -EINVAL;
1168 1161
1169 ivtv_call_i2c_clients(itv, VIDIOC_G_TUNER, vt); 1162 ivtv_call_all(itv, tuner, g_tuner, vt);
1170 1163
1171 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) { 1164 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
1172 strlcpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name)); 1165 strlcpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name));
@@ -1444,14 +1437,15 @@ static int ivtv_log_status(struct file *file, void *fh)
1444 struct v4l2_audio audin; 1437 struct v4l2_audio audin;
1445 int i; 1438 int i;
1446 1439
1447 IVTV_INFO("================= START STATUS CARD #%d =================\n", itv->num); 1440 IVTV_INFO("================= START STATUS CARD #%d =================\n",
1441 itv->instance);
1448 IVTV_INFO("Version: %s Card: %s\n", IVTV_VERSION, itv->card_name); 1442 IVTV_INFO("Version: %s Card: %s\n", IVTV_VERSION, itv->card_name);
1449 if (itv->hw_flags & IVTV_HW_TVEEPROM) { 1443 if (itv->hw_flags & IVTV_HW_TVEEPROM) {
1450 struct tveeprom tv; 1444 struct tveeprom tv;
1451 1445
1452 ivtv_read_eeprom(itv, &tv); 1446 ivtv_read_eeprom(itv, &tv);
1453 } 1447 }
1454 ivtv_call_i2c_clients(itv, VIDIOC_LOG_STATUS, NULL); 1448 ivtv_call_all(itv, core, log_status);
1455 ivtv_get_input(itv, itv->active_input, &vidin); 1449 ivtv_get_input(itv, itv->active_input, &vidin);
1456 ivtv_get_audio_input(itv, itv->audio_input, &audin); 1450 ivtv_get_audio_input(itv, itv->audio_input, &audin);
1457 IVTV_INFO("Video Input: %s\n", vidin.name); 1451 IVTV_INFO("Video Input: %s\n", vidin.name);
@@ -1518,7 +1512,7 @@ static int ivtv_log_status(struct file *file, void *fh)
1518 } 1512 }
1519 IVTV_INFO("Tuner: %s\n", 1513 IVTV_INFO("Tuner: %s\n",
1520 test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? "Radio" : "TV"); 1514 test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? "Radio" : "TV");
1521 cx2341x_log_status(&itv->params, itv->name); 1515 cx2341x_log_status(&itv->params, itv->device.name);
1522 IVTV_INFO("Status flags: 0x%08lx\n", itv->i_flags); 1516 IVTV_INFO("Status flags: 0x%08lx\n", itv->i_flags);
1523 for (i = 0; i < IVTV_MAX_STREAMS; i++) { 1517 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
1524 struct ivtv_stream *s = &itv->streams[i]; 1518 struct ivtv_stream *s = &itv->streams[i];
@@ -1530,8 +1524,11 @@ static int ivtv_log_status(struct file *file, void *fh)
1530 (s->buffers * s->buf_size) / 1024, s->buffers); 1524 (s->buffers * s->buf_size) / 1024, s->buffers);
1531 } 1525 }
1532 1526
1533 IVTV_INFO("Read MPG/VBI: %lld/%lld bytes\n", (long long)itv->mpg_data_received, (long long)itv->vbi_data_inserted); 1527 IVTV_INFO("Read MPG/VBI: %lld/%lld bytes\n",
1534 IVTV_INFO("================== END STATUS CARD #%d ==================\n", itv->num); 1528 (long long)itv->mpg_data_received,
1529 (long long)itv->vbi_data_inserted);
1530 IVTV_INFO("================== END STATUS CARD #%d ==================\n",
1531 itv->instance);
1535 1532
1536 return 0; 1533 return 0;
1537} 1534}
@@ -1736,7 +1733,7 @@ static int ivtv_default(struct file *file, void *fh, int cmd, void *arg)
1736 case VIDIOC_INT_S_AUDIO_ROUTING: { 1733 case VIDIOC_INT_S_AUDIO_ROUTING: {
1737 struct v4l2_routing *route = arg; 1734 struct v4l2_routing *route = arg;
1738 1735
1739 ivtv_i2c_hw(itv, itv->card->hw_audio, VIDIOC_INT_S_AUDIO_ROUTING, route); 1736 ivtv_call_hw(itv, itv->card->hw_audio, audio, s_routing, route);
1740 break; 1737 break;
1741 } 1738 }
1742 1739
@@ -1746,7 +1743,7 @@ static int ivtv_default(struct file *file, void *fh, int cmd, void *arg)
1746 if ((val == 0 && itv->options.newi2c) || (val & 0x01)) 1743 if ((val == 0 && itv->options.newi2c) || (val & 0x01))
1747 ivtv_reset_ir_gpio(itv); 1744 ivtv_reset_ir_gpio(itv);
1748 if (val & 0x02) 1745 if (val & 0x02)
1749 itv->video_dec_func(itv, cmd, NULL); 1746 v4l2_subdev_call(itv->sd_video, core, reset, 0);
1750 break; 1747 break;
1751 } 1748 }
1752 1749
diff --git a/drivers/media/video/ivtv/ivtv-routing.c b/drivers/media/video/ivtv/ivtv-routing.c
index 05564919b57f..3fd302294497 100644
--- a/drivers/media/video/ivtv/ivtv-routing.c
+++ b/drivers/media/video/ivtv/ivtv-routing.c
@@ -47,13 +47,13 @@ void ivtv_audio_set_io(struct ivtv *itv)
47 route.output = 0; 47 route.output = 0;
48 if (itv->card->hw_muxer & IVTV_HW_M52790) 48 if (itv->card->hw_muxer & IVTV_HW_M52790)
49 route.output = M52790_OUT_STEREO; 49 route.output = M52790_OUT_STEREO;
50 ivtv_i2c_hw(itv, itv->card->hw_muxer, VIDIOC_INT_S_AUDIO_ROUTING, &route); 50 v4l2_subdev_call(itv->sd_muxer, audio, s_routing, &route);
51 51
52 route.input = in->audio_input; 52 route.input = in->audio_input;
53 route.output = 0; 53 route.output = 0;
54 if (itv->card->hw_audio & IVTV_HW_MSP34XX) 54 if (itv->card->hw_audio & IVTV_HW_MSP34XX)
55 route.output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1); 55 route.output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1);
56 ivtv_i2c_hw(itv, itv->card->hw_audio, VIDIOC_INT_S_AUDIO_ROUTING, &route); 56 ivtv_call_hw(itv, itv->card->hw_audio, audio, s_routing, &route);
57} 57}
58 58
59/* Selects the video input and output according to the current 59/* Selects the video input and output according to the current
@@ -66,7 +66,7 @@ void ivtv_video_set_io(struct ivtv *itv)
66 66
67 route.input = itv->card->video_inputs[inp].video_input; 67 route.input = itv->card->video_inputs[inp].video_input;
68 route.output = 0; 68 route.output = 0;
69 itv->video_dec_func(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route); 69 v4l2_subdev_call(itv->sd_video, video, s_routing, &route);
70 70
71 type = itv->card->video_inputs[inp].video_type; 71 type = itv->card->video_inputs[inp].video_type;
72 72
@@ -79,7 +79,7 @@ void ivtv_video_set_io(struct ivtv *itv)
79 } 79 }
80 80
81 if (itv->card->hw_video & IVTV_HW_GPIO) 81 if (itv->card->hw_video & IVTV_HW_GPIO)
82 ivtv_gpio(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route); 82 ivtv_call_hw(itv, IVTV_HW_GPIO, video, s_routing, &route);
83 83
84 if (itv->card->hw_video & IVTV_HW_UPD64031A) { 84 if (itv->card->hw_video & IVTV_HW_UPD64031A) {
85 if (type == IVTV_CARD_INPUT_VID_TUNER || 85 if (type == IVTV_CARD_INPUT_VID_TUNER ||
@@ -92,7 +92,7 @@ void ivtv_video_set_io(struct ivtv *itv)
92 } 92 }
93 route.input |= itv->card->gr_config; 93 route.input |= itv->card->gr_config;
94 94
95 ivtv_upd64031a(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route); 95 ivtv_call_hw(itv, IVTV_HW_UPD64031A, video, s_routing, &route);
96 } 96 }
97 97
98 if (itv->card->hw_video & IVTV_HW_UPD6408X) { 98 if (itv->card->hw_video & IVTV_HW_UPD6408X) {
@@ -110,6 +110,6 @@ void ivtv_video_set_io(struct ivtv *itv)
110 route.input |= UPD64083_EXT_Y_ADC; 110 route.input |= UPD64083_EXT_Y_ADC;
111 } 111 }
112 } 112 }
113 ivtv_upd64083(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route); 113 ivtv_call_hw(itv, IVTV_HW_UPD6408X, video, s_routing, &route);
114 } 114 }
115} 115}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 9b7aa79eb267..f77d764707b2 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -172,7 +172,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
172{ 172{
173 struct ivtv_stream *s = &itv->streams[type]; 173 struct ivtv_stream *s = &itv->streams[type];
174 int num_offset = ivtv_stream_info[type].num_offset; 174 int num_offset = ivtv_stream_info[type].num_offset;
175 int num = itv->num + ivtv_first_minor + num_offset; 175 int num = itv->instance + ivtv_first_minor + num_offset;
176 176
177 /* These four fields are always initialized. If v4l2dev == NULL, then 177 /* These four fields are always initialized. If v4l2dev == NULL, then
178 this stream is not in use. In that case no other fields but these 178 this stream is not in use. In that case no other fields but these
@@ -205,11 +205,11 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
205 return -ENOMEM; 205 return -ENOMEM;
206 } 206 }
207 207
208 snprintf(s->v4l2dev->name, sizeof(s->v4l2dev->name), "ivtv%d %s", 208 snprintf(s->v4l2dev->name, sizeof(s->v4l2dev->name), "%s %s",
209 itv->num, s->name); 209 itv->device.name, s->name);
210 210
211 s->v4l2dev->num = num; 211 s->v4l2dev->num = num;
212 s->v4l2dev->parent = &itv->dev->dev; 212 s->v4l2dev->v4l2_dev = &itv->device;
213 s->v4l2dev->fops = ivtv_stream_info[type].fops; 213 s->v4l2dev->fops = ivtv_stream_info[type].fops;
214 s->v4l2dev->release = video_device_release; 214 s->v4l2dev->release = video_device_release;
215 s->v4l2dev->tvnorms = V4L2_STD_ALL; 215 s->v4l2dev->tvnorms = V4L2_STD_ALL;
@@ -260,6 +260,7 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
260 if (s_mpg->v4l2dev) 260 if (s_mpg->v4l2dev)
261 num = s_mpg->v4l2dev->num + ivtv_stream_info[type].num_offset; 261 num = s_mpg->v4l2dev->num + ivtv_stream_info[type].num_offset;
262 } 262 }
263 video_set_drvdata(s->v4l2dev, s);
263 264
264 /* Register device. First try the desired minor, then any free one. */ 265 /* Register device. First try the desired minor, then any free one. */
265 if (video_register_device(s->v4l2dev, vfl_type, num)) { 266 if (video_register_device(s->v4l2dev, vfl_type, num)) {
@@ -343,7 +344,7 @@ static void ivtv_vbi_setup(struct ivtv *itv)
343 ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, 0xffff , 0, 0, 0, 0); 344 ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, 0xffff , 0, 0, 0, 0);
344 345
345 /* setup VBI registers */ 346 /* setup VBI registers */
346 itv->video_dec_func(itv, VIDIOC_S_FMT, &itv->vbi.in); 347 v4l2_subdev_call(itv->sd_video, video, s_fmt, &itv->vbi.in);
347 348
348 /* determine number of lines and total number of VBI bytes. 349 /* determine number of lines and total number of VBI bytes.
349 A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1 350 A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1
@@ -577,10 +578,10 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
577 clear_bit(IVTV_F_I_EOS, &itv->i_flags); 578 clear_bit(IVTV_F_I_EOS, &itv->i_flags);
578 579
579 /* Initialize Digitizer for Capture */ 580 /* Initialize Digitizer for Capture */
580 itv->video_dec_func(itv, VIDIOC_STREAMOFF, NULL); 581 v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
581 ivtv_msleep_timeout(300, 1); 582 ivtv_msleep_timeout(300, 1);
582 ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0); 583 ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
583 itv->video_dec_func(itv, VIDIOC_STREAMON, NULL); 584 v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
584 } 585 }
585 586
586 /* begin_capture */ 587 /* begin_capture */
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
index 4a37a7d2e69d..5c5d1c462fef 100644
--- a/drivers/media/video/ivtv/ivtv-vbi.c
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -21,6 +21,7 @@
21#include "ivtv-i2c.h" 21#include "ivtv-i2c.h"
22#include "ivtv-ioctl.h" 22#include "ivtv-ioctl.h"
23#include "ivtv-queue.h" 23#include "ivtv-queue.h"
24#include "ivtv-cards.h"
24#include "ivtv-vbi.h" 25#include "ivtv-vbi.h"
25 26
26static void ivtv_set_vps(struct ivtv *itv, int enabled) 27static void ivtv_set_vps(struct ivtv *itv, int enabled)
@@ -37,7 +38,7 @@ static void ivtv_set_vps(struct ivtv *itv, int enabled)
37 data.data[9] = itv->vbi.vps_payload.data[2]; 38 data.data[9] = itv->vbi.vps_payload.data[2];
38 data.data[10] = itv->vbi.vps_payload.data[3]; 39 data.data[10] = itv->vbi.vps_payload.data[3];
39 data.data[11] = itv->vbi.vps_payload.data[4]; 40 data.data[11] = itv->vbi.vps_payload.data[4];
40 ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data); 41 ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
41} 42}
42 43
43static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc) 44static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
@@ -51,12 +52,12 @@ static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
51 data.line = (mode & 1) ? 21 : 0; 52 data.line = (mode & 1) ? 21 : 0;
52 data.data[0] = cc->odd[0]; 53 data.data[0] = cc->odd[0];
53 data.data[1] = cc->odd[1]; 54 data.data[1] = cc->odd[1];
54 ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data); 55 ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
55 data.field = 1; 56 data.field = 1;
56 data.line = (mode & 2) ? 21 : 0; 57 data.line = (mode & 2) ? 21 : 0;
57 data.data[0] = cc->even[0]; 58 data.data[0] = cc->even[0];
58 data.data[1] = cc->even[1]; 59 data.data[1] = cc->even[1];
59 ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data); 60 ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
60} 61}
61 62
62static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode) 63static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
@@ -79,7 +80,7 @@ static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
79 data.line = enabled ? 23 : 0; 80 data.line = enabled ? 23 : 0;
80 data.data[0] = mode & 0xff; 81 data.data[0] = mode & 0xff;
81 data.data[1] = (mode >> 8) & 0xff; 82 data.data[1] = (mode >> 8) & 0xff;
82 ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data); 83 ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
83} 84}
84 85
85static int odd_parity(u8 c) 86static int odd_parity(u8 c)
@@ -313,7 +314,7 @@ static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8
313 continue; 314 continue;
314 } 315 }
315 vbi.p = p + 4; 316 vbi.p = p + 4;
316 itv->video_dec_func(itv, VIDIOC_INT_DECODE_VBI_LINE, &vbi); 317 v4l2_subdev_call(itv->sd_video, video, decode_vbi_line, &vbi);
317 if (vbi.type && !(lines & (1 << vbi.line))) { 318 if (vbi.type && !(lines & (1 << vbi.line))) {
318 lines |= 1 << vbi.line; 319 lines |= 1 << vbi.line;
319 itv->vbi.sliced_data[line].id = vbi.type; 320 itv->vbi.sliced_data[line].id = vbi.type;
@@ -437,7 +438,7 @@ void ivtv_vbi_work_handler(struct ivtv *itv)
437 data.id = V4L2_SLICED_WSS_625; 438 data.id = V4L2_SLICED_WSS_625;
438 data.field = 0; 439 data.field = 0;
439 440
440 if (itv->video_dec_func(itv, VIDIOC_INT_G_VBI_DATA, &data) == 0) { 441 if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
441 ivtv_set_wss(itv, 1, data.data[0] & 0xf); 442 ivtv_set_wss(itv, 1, data.data[0] & 0xf);
442 vi->wss_missing_cnt = 0; 443 vi->wss_missing_cnt = 0;
443 } else if (vi->wss_missing_cnt == 4) { 444 } else if (vi->wss_missing_cnt == 4) {
@@ -451,13 +452,13 @@ void ivtv_vbi_work_handler(struct ivtv *itv)
451 452
452 data.id = V4L2_SLICED_CAPTION_525; 453 data.id = V4L2_SLICED_CAPTION_525;
453 data.field = 0; 454 data.field = 0;
454 if (itv->video_dec_func(itv, VIDIOC_INT_G_VBI_DATA, &data) == 0) { 455 if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
455 mode |= 1; 456 mode |= 1;
456 cc.odd[0] = data.data[0]; 457 cc.odd[0] = data.data[0];
457 cc.odd[1] = data.data[1]; 458 cc.odd[1] = data.data[1];
458 } 459 }
459 data.field = 1; 460 data.field = 1;
460 if (itv->video_dec_func(itv, VIDIOC_INT_G_VBI_DATA, &data) == 0) { 461 if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
461 mode |= 2; 462 mode |= 2;
462 cc.even[0] = data.data[0]; 463 cc.even[0] = data.data[0];
463 cc.even[1] = data.data[1]; 464 cc.even[1] = data.data[1];
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 921e281876f8..36abd2aef6f1 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -48,6 +48,7 @@
48#endif 48#endif
49 49
50#include "ivtv-driver.h" 50#include "ivtv-driver.h"
51#include "ivtv-cards.h"
51#include "ivtv-i2c.h" 52#include "ivtv-i2c.h"
52#include "ivtv-udma.h" 53#include "ivtv-udma.h"
53#include "ivtv-mailbox.h" 54#include "ivtv-mailbox.h"
@@ -121,15 +122,15 @@ MODULE_LICENSE("GPL");
121#define IVTVFB_DEBUG(x, type, fmt, args...) \ 122#define IVTVFB_DEBUG(x, type, fmt, args...) \
122 do { \ 123 do { \
123 if ((x) & ivtvfb_debug) \ 124 if ((x) & ivtvfb_debug) \
124 printk(KERN_INFO "ivtvfb%d " type ": " fmt, itv->num , ## args); \ 125 printk(KERN_INFO "ivtvfb%d " type ": " fmt, itv->instance , ## args); \
125 } while (0) 126 } while (0)
126#define IVTVFB_DEBUG_WARN(fmt, args...) IVTVFB_DEBUG(IVTVFB_DBGFLG_WARN, "warning", fmt , ## args) 127#define IVTVFB_DEBUG_WARN(fmt, args...) IVTVFB_DEBUG(IVTVFB_DBGFLG_WARN, "warning", fmt , ## args)
127#define IVTVFB_DEBUG_INFO(fmt, args...) IVTVFB_DEBUG(IVTVFB_DBGFLG_INFO, "info", fmt , ## args) 128#define IVTVFB_DEBUG_INFO(fmt, args...) IVTVFB_DEBUG(IVTVFB_DBGFLG_INFO, "info", fmt , ## args)
128 129
129/* Standard kernel messages */ 130/* Standard kernel messages */
130#define IVTVFB_ERR(fmt, args...) printk(KERN_ERR "ivtvfb%d: " fmt, itv->num , ## args) 131#define IVTVFB_ERR(fmt, args...) printk(KERN_ERR "ivtvfb%d: " fmt, itv->instance , ## args)
131#define IVTVFB_WARN(fmt, args...) printk(KERN_WARNING "ivtvfb%d: " fmt, itv->num , ## args) 132#define IVTVFB_WARN(fmt, args...) printk(KERN_WARNING "ivtvfb%d: " fmt, itv->instance , ## args)
132#define IVTVFB_INFO(fmt, args...) printk(KERN_INFO "ivtvfb%d: " fmt, itv->num , ## args) 133#define IVTVFB_INFO(fmt, args...) printk(KERN_INFO "ivtvfb%d: " fmt, itv->instance , ## args)
133 134
134/* --------------------------------------------------------------------- */ 135/* --------------------------------------------------------------------- */
135 136
@@ -895,16 +896,16 @@ static int ivtvfb_blank(int blank_mode, struct fb_info *info)
895 switch (blank_mode) { 896 switch (blank_mode) {
896 case FB_BLANK_UNBLANK: 897 case FB_BLANK_UNBLANK:
897 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 1); 898 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 1);
898 ivtv_saa7127(itv, VIDIOC_STREAMON, NULL); 899 ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1);
899 break; 900 break;
900 case FB_BLANK_NORMAL: 901 case FB_BLANK_NORMAL:
901 case FB_BLANK_HSYNC_SUSPEND: 902 case FB_BLANK_HSYNC_SUSPEND:
902 case FB_BLANK_VSYNC_SUSPEND: 903 case FB_BLANK_VSYNC_SUSPEND:
903 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0); 904 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
904 ivtv_saa7127(itv, VIDIOC_STREAMON, NULL); 905 ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1);
905 break; 906 break;
906 case FB_BLANK_POWERDOWN: 907 case FB_BLANK_POWERDOWN:
907 ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL); 908 ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 0);
908 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0); 909 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
909 break; 910 break;
910 } 911 }
@@ -1188,10 +1189,45 @@ static int ivtvfb_init_card(struct ivtv *itv)
1188 1189
1189} 1190}
1190 1191
1192static int __init ivtvfb_callback_init(struct device *dev, void *p)
1193{
1194 struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
1195 struct ivtv *itv = container_of(v4l2_dev, struct ivtv, device);
1196
1197 if (itv && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) {
1198 if (ivtvfb_init_card(itv) == 0) {
1199 IVTVFB_INFO("Framebuffer registered on %s\n",
1200 itv->device.name);
1201 (*(int *)p)++;
1202 }
1203 }
1204 return 0;
1205}
1206
1207static int ivtvfb_callback_cleanup(struct device *dev, void *p)
1208{
1209 struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
1210 struct ivtv *itv = container_of(v4l2_dev, struct ivtv, device);
1211
1212 if (itv && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) {
1213 if (unregister_framebuffer(&itv->osd_info->ivtvfb_info)) {
1214 IVTVFB_WARN("Framebuffer %d is in use, cannot unload\n",
1215 itv->instance);
1216 return 0;
1217 }
1218 IVTVFB_INFO("Unregister framebuffer %d\n", itv->instance);
1219 ivtvfb_blank(FB_BLANK_POWERDOWN, &itv->osd_info->ivtvfb_info);
1220 ivtvfb_release_buffers(itv);
1221 itv->osd_video_pbase = 0;
1222 }
1223 return 0;
1224}
1225
1191static int __init ivtvfb_init(void) 1226static int __init ivtvfb_init(void)
1192{ 1227{
1193 struct ivtv *itv; 1228 struct device_driver *drv;
1194 int i, registered = 0; 1229 int registered = 0;
1230 int err;
1195 1231
1196 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { 1232 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
1197 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", 1233 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
@@ -1199,20 +1235,11 @@ static int __init ivtvfb_init(void)
1199 return -EINVAL; 1235 return -EINVAL;
1200 } 1236 }
1201 1237
1202 /* Locate & initialise all cards supporting an OSD. */ 1238 drv = driver_find("ivtv", &pci_bus_type);
1203 for (i = 0; i < ivtv_cards_active; i++) { 1239 err = driver_for_each_device(drv, NULL, &registered, ivtvfb_callback_init);
1204 if (ivtvfb_card_id != -1 && i != ivtvfb_card_id) 1240 put_driver(drv);
1205 continue;
1206 itv = ivtv_cards[i];
1207 if (itv && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) {
1208 if (ivtvfb_init_card(itv) == 0) {
1209 IVTVFB_INFO("Framebuffer registered on ivtv card id %d\n", i);
1210 registered++;
1211 }
1212 }
1213 }
1214 if (!registered) { 1241 if (!registered) {
1215 printk(KERN_ERR "ivtvfb: no cards found"); 1242 printk(KERN_ERR "ivtvfb: no cards found\n");
1216 return -ENODEV; 1243 return -ENODEV;
1217 } 1244 }
1218 return 0; 1245 return 0;
@@ -1220,24 +1247,14 @@ static int __init ivtvfb_init(void)
1220 1247
1221static void ivtvfb_cleanup(void) 1248static void ivtvfb_cleanup(void)
1222{ 1249{
1223 struct ivtv *itv; 1250 struct device_driver *drv;
1224 int i; 1251 int err;
1225 1252
1226 printk(KERN_INFO "ivtvfb: Unloading framebuffer module\n"); 1253 printk(KERN_INFO "ivtvfb: Unloading framebuffer module\n");
1227 1254
1228 for (i = 0; i < ivtv_cards_active; i++) { 1255 drv = driver_find("ivtv", &pci_bus_type);
1229 itv = ivtv_cards[i]; 1256 err = driver_for_each_device(drv, NULL, NULL, ivtvfb_callback_cleanup);
1230 if (itv && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) && itv->osd_info) { 1257 put_driver(drv);
1231 if (unregister_framebuffer(&itv->osd_info->ivtvfb_info)) {
1232 IVTVFB_WARN("Framebuffer %d is in use, cannot unload\n", i);
1233 return;
1234 }
1235 IVTVFB_DEBUG_INFO("Unregister framebuffer %d\n", i);
1236 ivtvfb_blank(FB_BLANK_POWERDOWN, &itv->osd_info->ivtvfb_info);
1237 ivtvfb_release_buffers(itv);
1238 itv->osd_video_pbase = 0;
1239 }
1240 }
1241} 1258}
1242 1259
1243module_init(ivtvfb_init); 1260module_init(ivtvfb_init);
diff --git a/drivers/media/video/m52790.c b/drivers/media/video/m52790.c
index 89a781c6929d..07be14a9fe7b 100644
--- a/drivers/media/video/m52790.c
+++ b/drivers/media/video/m52790.c
@@ -28,7 +28,7 @@
28#include <linux/i2c-id.h> 28#include <linux/i2c-id.h>
29#include <linux/videodev2.h> 29#include <linux/videodev2.h>
30#include <media/m52790.h> 30#include <media/m52790.h>
31#include <media/v4l2-common.h> 31#include <media/v4l2-device.h>
32#include <media/v4l2-chip-ident.h> 32#include <media/v4l2-chip-ident.h>
33#include <media/v4l2-i2c-drv.h> 33#include <media/v4l2-i2c-drv.h>
34 34
@@ -38,89 +38,130 @@ MODULE_LICENSE("GPL");
38 38
39 39
40struct m52790_state { 40struct m52790_state {
41 struct v4l2_subdev sd;
41 u16 input; 42 u16 input;
42 u16 output; 43 u16 output;
43}; 44};
44 45
46static inline struct m52790_state *to_state(struct v4l2_subdev *sd)
47{
48 return container_of(sd, struct m52790_state, sd);
49}
50
45/* ----------------------------------------------------------------------- */ 51/* ----------------------------------------------------------------------- */
46 52
47static int m52790_write(struct i2c_client *client) 53static int m52790_write(struct v4l2_subdev *sd)
48{ 54{
49 struct m52790_state *state = i2c_get_clientdata(client); 55 struct m52790_state *state = to_state(sd);
56 struct i2c_client *client = v4l2_get_subdevdata(sd);
57
50 u8 sw1 = (state->input | state->output) & 0xff; 58 u8 sw1 = (state->input | state->output) & 0xff;
51 u8 sw2 = (state->input | state->output) >> 8; 59 u8 sw2 = (state->input | state->output) >> 8;
52 60
53 return i2c_smbus_write_byte_data(client, sw1, sw2); 61 return i2c_smbus_write_byte_data(client, sw1, sw2);
54} 62}
55 63
56static int m52790_command(struct i2c_client *client, unsigned int cmd, 64/* Note: audio and video are linked and cannot be switched separately.
57 void *arg) 65 So audio and video routing commands are identical for this chip.
66 In theory the video amplifier and audio modes could be handled
67 separately for the output, but that seems to be overkill right now.
68 The same holds for implementing an audio mute control, this is now
69 part of the audio output routing. The normal case is that another
70 chip takes care of the actual muting so making it part of the
71 output routing seems to be the right thing to do for now. */
72static int m52790_s_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
58{ 73{
59 struct m52790_state *state = i2c_get_clientdata(client); 74 struct m52790_state *state = to_state(sd);
60 struct v4l2_routing *route = arg; 75
61 76 state->input = route->input;
62 /* Note: audio and video are linked and cannot be switched separately. 77 state->output = route->output;
63 So audio and video routing commands are identical for this chip. 78 m52790_write(sd);
64 In theory the video amplifier and audio modes could be handled 79 return 0;
65 separately for the output, but that seems to be overkill right now. 80}
66 The same holds for implementing an audio mute control, this is now
67 part of the audio output routing. The normal case is that another
68 chip takes care of the actual muting so making it part of the
69 output routing seems to be the right thing to do for now. */
70 switch (cmd) {
71 case VIDIOC_INT_G_AUDIO_ROUTING:
72 case VIDIOC_INT_G_VIDEO_ROUTING:
73 route->input = state->input;
74 route->output = state->output;
75 break;
76
77 case VIDIOC_INT_S_AUDIO_ROUTING:
78 case VIDIOC_INT_S_VIDEO_ROUTING:
79 state->input = route->input;
80 state->output = route->output;
81 m52790_write(client);
82 break;
83 81
84#ifdef CONFIG_VIDEO_ADV_DEBUG 82#ifdef CONFIG_VIDEO_ADV_DEBUG
85 case VIDIOC_DBG_G_REGISTER: 83static int m52790_g_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
86 case VIDIOC_DBG_S_REGISTER: 84{
87 { 85 struct m52790_state *state = to_state(sd);
88 struct v4l2_register *reg = arg; 86 struct i2c_client *client = v4l2_get_subdevdata(sd);
89
90 if (!v4l2_chip_match_i2c_client(client,
91 reg->match_type, reg->match_chip))
92 return -EINVAL;
93 if (!capable(CAP_SYS_ADMIN))
94 return -EPERM;
95 if (reg->reg != 0)
96 return -EINVAL;
97 if (cmd == VIDIOC_DBG_G_REGISTER)
98 reg->val = state->input | state->output;
99 else {
100 state->input = reg->val & 0x0303;
101 state->output = reg->val & ~0x0303;
102 m52790_write(client);
103 }
104 break;
105 }
106#endif
107 87
108 case VIDIOC_G_CHIP_IDENT: 88 if (!v4l2_chip_match_i2c_client(client,
109 return v4l2_chip_ident_i2c_client(client, arg, 89 reg->match_type, reg->match_chip))
110 V4L2_IDENT_M52790, 0); 90 return -EINVAL;
91 if (!capable(CAP_SYS_ADMIN))
92 return -EPERM;
93 if (reg->reg != 0)
94 return -EINVAL;
95 reg->val = state->input | state->output;
96 return 0;
97}
111 98
112 case VIDIOC_LOG_STATUS: 99static int m52790_s_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
113 v4l_info(client, "Switch 1: %02x\n", 100{
114 (state->input | state->output) & 0xff); 101 struct m52790_state *state = to_state(sd);
115 v4l_info(client, "Switch 2: %02x\n", 102 struct i2c_client *client = v4l2_get_subdevdata(sd);
116 (state->input | state->output) >> 8);
117 break;
118 103
119 default: 104 if (!v4l2_chip_match_i2c_client(client,
105 reg->match_type, reg->match_chip))
120 return -EINVAL; 106 return -EINVAL;
121 } 107 if (!capable(CAP_SYS_ADMIN))
108 return -EPERM;
109 if (reg->reg != 0)
110 return -EINVAL;
111 state->input = reg->val & 0x0303;
112 state->output = reg->val & ~0x0303;
113 m52790_write(sd);
122 return 0; 114 return 0;
123} 115}
116#endif
117
118static int m52790_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
119{
120 struct i2c_client *client = v4l2_get_subdevdata(sd);
121
122 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_M52790, 0);
123}
124
125static int m52790_log_status(struct v4l2_subdev *sd)
126{
127 struct m52790_state *state = to_state(sd);
128
129 v4l2_info(sd, "Switch 1: %02x\n",
130 (state->input | state->output) & 0xff);
131 v4l2_info(sd, "Switch 2: %02x\n",
132 (state->input | state->output) >> 8);
133 return 0;
134}
135
136static int m52790_command(struct i2c_client *client, unsigned cmd, void *arg)
137{
138 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
139}
140
141/* ----------------------------------------------------------------------- */
142
143static const struct v4l2_subdev_core_ops m52790_core_ops = {
144 .log_status = m52790_log_status,
145 .g_chip_ident = m52790_g_chip_ident,
146#ifdef CONFIG_VIDEO_ADV_DEBUG
147 .g_register = m52790_g_register,
148 .s_register = m52790_s_register,
149#endif
150};
151
152static const struct v4l2_subdev_audio_ops m52790_audio_ops = {
153 .s_routing = m52790_s_routing,
154};
155
156static const struct v4l2_subdev_video_ops m52790_video_ops = {
157 .s_routing = m52790_s_routing,
158};
159
160static const struct v4l2_subdev_ops m52790_ops = {
161 .core = &m52790_core_ops,
162 .audio = &m52790_audio_ops,
163 .video = &m52790_video_ops,
164};
124 165
125/* ----------------------------------------------------------------------- */ 166/* ----------------------------------------------------------------------- */
126 167
@@ -130,6 +171,7 @@ static int m52790_probe(struct i2c_client *client,
130 const struct i2c_device_id *id) 171 const struct i2c_device_id *id)
131{ 172{
132 struct m52790_state *state; 173 struct m52790_state *state;
174 struct v4l2_subdev *sd;
133 175
134 /* Check if the adapter supports the needed features */ 176 /* Check if the adapter supports the needed features */
135 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 177 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -142,16 +184,20 @@ static int m52790_probe(struct i2c_client *client,
142 if (state == NULL) 184 if (state == NULL)
143 return -ENOMEM; 185 return -ENOMEM;
144 186
187 sd = &state->sd;
188 v4l2_i2c_subdev_init(sd, client, &m52790_ops);
145 state->input = M52790_IN_TUNER; 189 state->input = M52790_IN_TUNER;
146 state->output = M52790_OUT_STEREO; 190 state->output = M52790_OUT_STEREO;
147 i2c_set_clientdata(client, state); 191 m52790_write(sd);
148 m52790_write(client);
149 return 0; 192 return 0;
150} 193}
151 194
152static int m52790_remove(struct i2c_client *client) 195static int m52790_remove(struct i2c_client *client)
153{ 196{
154 kfree(i2c_get_clientdata(client)); 197 struct v4l2_subdev *sd = i2c_get_clientdata(client);
198
199 v4l2_device_unregister_subdev(sd);
200 kfree(to_state(sd));
155 return 0; 201 return 0;
156} 202}
157 203
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index 3da74dcee902..a622dbb72ed8 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -51,14 +51,14 @@
51#include <linux/module.h> 51#include <linux/module.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/i2c.h> 53#include <linux/i2c.h>
54#include <linux/kthread.h>
55#include <linux/freezer.h>
54#include <linux/videodev2.h> 56#include <linux/videodev2.h>
55#include <media/v4l2-common.h> 57#include <media/v4l2-device.h>
56#include <media/v4l2-ioctl.h> 58#include <media/v4l2-ioctl.h>
57#include <media/v4l2-i2c-drv-legacy.h> 59#include <media/v4l2-i2c-drv-legacy.h>
58#include <media/tvaudio.h>
59#include <media/msp3400.h> 60#include <media/msp3400.h>
60#include <linux/kthread.h> 61#include <media/tvaudio.h>
61#include <linux/freezer.h>
62#include "msp3400-driver.h" 62#include "msp3400-driver.h"
63 63
64/* ---------------------------------------------------------------------- */ 64/* ---------------------------------------------------------------------- */
@@ -265,7 +265,7 @@ static char *scart_names[] = {
265 265
266void msp_set_scart(struct i2c_client *client, int in, int out) 266void msp_set_scart(struct i2c_client *client, int in, int out)
267{ 267{
268 struct msp_state *state = i2c_get_clientdata(client); 268 struct msp_state *state = to_state(i2c_get_clientdata(client));
269 269
270 state->in_scart = in; 270 state->in_scart = in;
271 271
@@ -289,7 +289,7 @@ void msp_set_scart(struct i2c_client *client, int in, int out)
289 289
290void msp_set_audio(struct i2c_client *client) 290void msp_set_audio(struct i2c_client *client)
291{ 291{
292 struct msp_state *state = i2c_get_clientdata(client); 292 struct msp_state *state = to_state(i2c_get_clientdata(client));
293 int bal = 0, bass, treble, loudness; 293 int bal = 0, bass, treble, loudness;
294 int val = 0; 294 int val = 0;
295 int reallymuted = state->muted | state->scan_in_progress; 295 int reallymuted = state->muted | state->scan_in_progress;
@@ -336,7 +336,7 @@ void msp_set_audio(struct i2c_client *client)
336 336
337static void msp_wake_thread(struct i2c_client *client) 337static void msp_wake_thread(struct i2c_client *client)
338{ 338{
339 struct msp_state *state = i2c_get_clientdata(client); 339 struct msp_state *state = to_state(i2c_get_clientdata(client));
340 340
341 if (NULL == state->kthread) 341 if (NULL == state->kthread)
342 return; 342 return;
@@ -390,9 +390,9 @@ static int msp_mode_v4l1_to_v4l2(int mode)
390} 390}
391#endif 391#endif
392 392
393static int msp_get_ctrl(struct i2c_client *client, struct v4l2_control *ctrl) 393static int msp_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
394{ 394{
395 struct msp_state *state = i2c_get_clientdata(client); 395 struct msp_state *state = to_state(sd);
396 396
397 switch (ctrl->id) { 397 switch (ctrl->id) {
398 case V4L2_CID_AUDIO_VOLUME: 398 case V4L2_CID_AUDIO_VOLUME:
@@ -433,9 +433,10 @@ static int msp_get_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
433 return 0; 433 return 0;
434} 434}
435 435
436static int msp_set_ctrl(struct i2c_client *client, struct v4l2_control *ctrl) 436static int msp_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
437{ 437{
438 struct msp_state *state = i2c_get_clientdata(client); 438 struct msp_state *state = to_state(sd);
439 struct i2c_client *client = v4l2_get_subdevdata(sd);
439 440
440 switch (ctrl->id) { 441 switch (ctrl->id) {
441 case V4L2_CID_AUDIO_VOLUME: 442 case V4L2_CID_AUDIO_VOLUME:
@@ -481,40 +482,16 @@ static int msp_set_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
481 return 0; 482 return 0;
482} 483}
483 484
484static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg) 485#ifdef CONFIG_VIDEO_ALLOW_V4L1
486static int msp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
485{ 487{
486 struct msp_state *state = i2c_get_clientdata(client); 488 struct msp_state *state = to_state(sd);
487 489 struct i2c_client *client = v4l2_get_subdevdata(sd);
488 if (msp_debug >= 2)
489 v4l_i2c_print_ioctl(client, cmd);
490 490
491 switch (cmd) { 491 switch (cmd) {
492 case AUDC_SET_RADIO:
493 if (state->radio)
494 return 0;
495 state->radio = 1;
496 v4l_dbg(1, msp_debug, client, "switching to radio mode\n");
497 state->watch_stereo = 0;
498 switch (state->opmode) {
499 case OPMODE_MANUAL:
500 /* set msp3400 to FM radio mode */
501 msp3400c_set_mode(client, MSP_MODE_FM_RADIO);
502 msp3400c_set_carrier(client, MSP_CARRIER(10.7),
503 MSP_CARRIER(10.7));
504 msp_set_audio(client);
505 break;
506 case OPMODE_AUTODETECT:
507 case OPMODE_AUTOSELECT:
508 /* the thread will do for us */
509 msp_wake_thread(client);
510 break;
511 }
512 break;
513
514 /* --- v4l ioctls --- */ 492 /* --- v4l ioctls --- */
515 /* take care: bttv does userspace copying, we'll get a 493 /* take care: bttv does userspace copying, we'll get a
516 kernel pointer here... */ 494 kernel pointer here... */
517#ifdef CONFIG_VIDEO_ALLOW_V4L1
518 case VIDIOCGAUDIO: 495 case VIDIOCGAUDIO:
519 { 496 {
520 struct video_audio *va = arg; 497 struct video_audio *va = arg;
@@ -588,105 +565,137 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
588 msp_wake_thread(client); 565 msp_wake_thread(client);
589 break; 566 break;
590 } 567 }
591#endif 568 default:
592 case VIDIOC_S_FREQUENCY: 569 return -ENOIOCTLCMD;
593 {
594 /* new channel -- kick audio carrier scan */
595 msp_wake_thread(client);
596 break;
597 } 570 }
571 return 0;
572}
573#endif
598 574
599 /* --- v4l2 ioctls --- */ 575/* --- v4l2 ioctls --- */
600 case VIDIOC_S_STD: 576static int msp_s_radio(struct v4l2_subdev *sd)
601 { 577{
602 v4l2_std_id *id = arg; 578 struct msp_state *state = to_state(sd);
603 int update = state->radio || state->v4l2_std != *id; 579 struct i2c_client *client = v4l2_get_subdevdata(sd);
604 580
605 state->v4l2_std = *id; 581 if (state->radio)
606 state->radio = 0;
607 if (update)
608 msp_wake_thread(client);
609 return 0; 582 return 0;
583 state->radio = 1;
584 v4l_dbg(1, msp_debug, client, "switching to radio mode\n");
585 state->watch_stereo = 0;
586 switch (state->opmode) {
587 case OPMODE_MANUAL:
588 /* set msp3400 to FM radio mode */
589 msp3400c_set_mode(client, MSP_MODE_FM_RADIO);
590 msp3400c_set_carrier(client, MSP_CARRIER(10.7),
591 MSP_CARRIER(10.7));
592 msp_set_audio(client);
593 break;
594 case OPMODE_AUTODETECT:
595 case OPMODE_AUTOSELECT:
596 /* the thread will do for us */
597 msp_wake_thread(client);
598 break;
610 } 599 }
600 return 0;
601}
611 602
612 case VIDIOC_INT_G_AUDIO_ROUTING: 603static int msp_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
613 { 604{
614 struct v4l2_routing *rt = arg; 605 struct i2c_client *client = v4l2_get_subdevdata(sd);
615 606
616 *rt = state->routing; 607 /* new channel -- kick audio carrier scan */
617 break; 608 msp_wake_thread(client);
618 } 609 return 0;
610}
619 611
620 case VIDIOC_INT_S_AUDIO_ROUTING: 612static int msp_s_std(struct v4l2_subdev *sd, v4l2_std_id id)
621 { 613{
622 struct v4l2_routing *rt = arg; 614 struct msp_state *state = to_state(sd);
623 int tuner = (rt->input >> 3) & 1; 615 struct i2c_client *client = v4l2_get_subdevdata(sd);
624 int sc_in = rt->input & 0x7; 616 int update = state->radio || state->v4l2_std != id;
625 int sc1_out = rt->output & 0xf; 617
626 int sc2_out = (rt->output >> 4) & 0xf; 618 state->v4l2_std = id;
627 u16 val, reg; 619 state->radio = 0;
628 int i; 620 if (update)
629 int extern_input = 1;
630
631 if (state->routing.input == rt->input &&
632 state->routing.output == rt->output)
633 break;
634 state->routing = *rt;
635 /* check if the tuner input is used */
636 for (i = 0; i < 5; i++) {
637 if (((rt->input >> (4 + i * 4)) & 0xf) == 0)
638 extern_input = 0;
639 }
640 state->mode = extern_input ? MSP_MODE_EXTERN : MSP_MODE_AM_DETECT;
641 state->rxsubchans = V4L2_TUNER_SUB_STEREO;
642 msp_set_scart(client, sc_in, 0);
643 msp_set_scart(client, sc1_out, 1);
644 msp_set_scart(client, sc2_out, 2);
645 msp_set_audmode(client);
646 reg = (state->opmode == OPMODE_AUTOSELECT) ? 0x30 : 0xbb;
647 val = msp_read_dem(client, reg);
648 msp_write_dem(client, reg, (val & ~0x100) | (tuner << 8));
649 /* wake thread when a new input is chosen */
650 msp_wake_thread(client); 621 msp_wake_thread(client);
651 break; 622 return 0;
623}
624
625static int msp_s_routing(struct v4l2_subdev *sd, const struct v4l2_routing *rt)
626{
627 struct msp_state *state = to_state(sd);
628 struct i2c_client *client = v4l2_get_subdevdata(sd);
629 int tuner = (rt->input >> 3) & 1;
630 int sc_in = rt->input & 0x7;
631 int sc1_out = rt->output & 0xf;
632 int sc2_out = (rt->output >> 4) & 0xf;
633 u16 val, reg;
634 int i;
635 int extern_input = 1;
636
637 if (state->routing.input == rt->input &&
638 state->routing.output == rt->output)
639 return 0;
640 state->routing = *rt;
641 /* check if the tuner input is used */
642 for (i = 0; i < 5; i++) {
643 if (((rt->input >> (4 + i * 4)) & 0xf) == 0)
644 extern_input = 0;
652 } 645 }
646 state->mode = extern_input ? MSP_MODE_EXTERN : MSP_MODE_AM_DETECT;
647 state->rxsubchans = V4L2_TUNER_SUB_STEREO;
648 msp_set_scart(client, sc_in, 0);
649 msp_set_scart(client, sc1_out, 1);
650 msp_set_scart(client, sc2_out, 2);
651 msp_set_audmode(client);
652 reg = (state->opmode == OPMODE_AUTOSELECT) ? 0x30 : 0xbb;
653 val = msp_read_dem(client, reg);
654 msp_write_dem(client, reg, (val & ~0x100) | (tuner << 8));
655 /* wake thread when a new input is chosen */
656 msp_wake_thread(client);
657 return 0;
658}
653 659
654 case VIDIOC_G_TUNER: 660static int msp_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
655 { 661{
656 struct v4l2_tuner *vt = arg; 662 struct msp_state *state = to_state(sd);
663 struct i2c_client *client = v4l2_get_subdevdata(sd);
657 664
658 if (state->radio) 665 if (state->radio)
659 break; 666 return 0;
660 if (state->opmode == OPMODE_AUTOSELECT) 667 if (state->opmode == OPMODE_AUTOSELECT)
661 msp_detect_stereo(client); 668 msp_detect_stereo(client);
662 vt->audmode = state->audmode; 669 vt->audmode = state->audmode;
663 vt->rxsubchans = state->rxsubchans; 670 vt->rxsubchans = state->rxsubchans;
664 vt->capability |= V4L2_TUNER_CAP_STEREO | 671 vt->capability |= V4L2_TUNER_CAP_STEREO |
665 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 672 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
666 break; 673 return 0;
667 } 674}
668 675
669 case VIDIOC_S_TUNER: 676static int msp_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
670 { 677{
671 struct v4l2_tuner *vt = (struct v4l2_tuner *)arg; 678 struct msp_state *state = to_state(sd);
679 struct i2c_client *client = v4l2_get_subdevdata(sd);
672 680
673 if (state->radio) /* TODO: add mono/stereo support for radio */ 681 if (state->radio) /* TODO: add mono/stereo support for radio */
674 break; 682 return 0;
675 if (state->audmode == vt->audmode) 683 if (state->audmode == vt->audmode)
676 break; 684 return 0;
677 state->audmode = vt->audmode; 685 state->audmode = vt->audmode;
678 /* only set audmode */ 686 /* only set audmode */
679 msp_set_audmode(client); 687 msp_set_audmode(client);
680 break; 688 return 0;
681 } 689}
682 690
683 case VIDIOC_INT_I2S_CLOCK_FREQ: 691static int msp_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
684 { 692{
685 u32 *a = (u32 *)arg; 693 struct msp_state *state = to_state(sd);
694 struct i2c_client *client = v4l2_get_subdevdata(sd);
686 695
687 v4l_dbg(1, msp_debug, client, "Setting I2S speed to %d\n", *a); 696 v4l_dbg(1, msp_debug, client, "Setting I2S speed to %d\n", freq);
688 697
689 switch (*a) { 698 switch (freq) {
690 case 1024000: 699 case 1024000:
691 state->i2s_mode = 0; 700 state->i2s_mode = 0;
692 break; 701 break;
@@ -695,24 +704,24 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
695 break; 704 break;
696 default: 705 default:
697 return -EINVAL; 706 return -EINVAL;
698 }
699 break;
700 } 707 }
708 return 0;
709}
701 710
702 case VIDIOC_QUERYCTRL: 711static int msp_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
703 { 712{
704 struct v4l2_queryctrl *qc = arg; 713 struct msp_state *state = to_state(sd);
705 714
706 switch (qc->id) { 715 switch (qc->id) {
707 case V4L2_CID_AUDIO_VOLUME: 716 case V4L2_CID_AUDIO_VOLUME:
708 case V4L2_CID_AUDIO_MUTE: 717 case V4L2_CID_AUDIO_MUTE:
709 return v4l2_ctrl_query_fill_std(qc); 718 return v4l2_ctrl_query_fill_std(qc);
710 default: 719 default:
711 break; 720 break;
712 } 721 }
713 if (!state->has_sound_processing) 722 if (!state->has_sound_processing)
714 return -EINVAL; 723 return -EINVAL;
715 switch (qc->id) { 724 switch (qc->id) {
716 case V4L2_CID_AUDIO_LOUDNESS: 725 case V4L2_CID_AUDIO_LOUDNESS:
717 case V4L2_CID_AUDIO_BALANCE: 726 case V4L2_CID_AUDIO_BALANCE:
718 case V4L2_CID_AUDIO_BASS: 727 case V4L2_CID_AUDIO_BASS:
@@ -720,32 +729,38 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
720 return v4l2_ctrl_query_fill_std(qc); 729 return v4l2_ctrl_query_fill_std(qc);
721 default: 730 default:
722 return -EINVAL; 731 return -EINVAL;
723 }
724 } 732 }
733 return 0;
734}
725 735
726 case VIDIOC_G_CTRL: 736static int msp_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
727 return msp_get_ctrl(client, arg); 737{
728 738 struct msp_state *state = to_state(sd);
729 case VIDIOC_S_CTRL: 739 struct i2c_client *client = v4l2_get_subdevdata(sd);
730 return msp_set_ctrl(client, arg);
731 740
732 case VIDIOC_LOG_STATUS: 741 return v4l2_chip_ident_i2c_client(client, chip, state->ident,
733 { 742 (state->rev1 << 16) | state->rev2);
734 const char *p; 743}
735 744
736 if (state->opmode == OPMODE_AUTOSELECT) 745static int msp_log_status(struct v4l2_subdev *sd)
737 msp_detect_stereo(client); 746{
738 v4l_info(client, "%s rev1 = 0x%04x rev2 = 0x%04x\n", 747 struct msp_state *state = to_state(sd);
739 client->name, state->rev1, state->rev2); 748 struct i2c_client *client = v4l2_get_subdevdata(sd);
740 v4l_info(client, "Audio: volume %d%s\n", 749 const char *p;
741 state->volume, state->muted ? " (muted)" : ""); 750
742 if (state->has_sound_processing) { 751 if (state->opmode == OPMODE_AUTOSELECT)
743 v4l_info(client, "Audio: balance %d bass %d treble %d loudness %s\n", 752 msp_detect_stereo(client);
744 state->balance, state->bass, 753 v4l_info(client, "%s rev1 = 0x%04x rev2 = 0x%04x\n",
745 state->treble, 754 client->name, state->rev1, state->rev2);
746 state->loudness ? "on" : "off"); 755 v4l_info(client, "Audio: volume %d%s\n",
747 } 756 state->volume, state->muted ? " (muted)" : "");
748 switch (state->mode) { 757 if (state->has_sound_processing) {
758 v4l_info(client, "Audio: balance %d bass %d treble %d loudness %s\n",
759 state->balance, state->bass,
760 state->treble,
761 state->loudness ? "on" : "off");
762 }
763 switch (state->mode) {
749 case MSP_MODE_AM_DETECT: p = "AM (for carrier detect)"; break; 764 case MSP_MODE_AM_DETECT: p = "AM (for carrier detect)"; break;
750 case MSP_MODE_FM_RADIO: p = "FM Radio"; break; 765 case MSP_MODE_FM_RADIO: p = "FM Radio"; break;
751 case MSP_MODE_FM_TERRA: p = "Terrestial FM-mono/stereo"; break; 766 case MSP_MODE_FM_TERRA: p = "Terrestial FM-mono/stereo"; break;
@@ -756,36 +771,25 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
756 case MSP_MODE_BTSC: p = "BTSC"; break; 771 case MSP_MODE_BTSC: p = "BTSC"; break;
757 case MSP_MODE_EXTERN: p = "External input"; break; 772 case MSP_MODE_EXTERN: p = "External input"; break;
758 default: p = "unknown"; break; 773 default: p = "unknown"; break;
759 } 774 }
760 if (state->mode == MSP_MODE_EXTERN) { 775 if (state->mode == MSP_MODE_EXTERN) {
761 v4l_info(client, "Mode: %s\n", p); 776 v4l_info(client, "Mode: %s\n", p);
762 } else if (state->opmode == OPMODE_MANUAL) { 777 } else if (state->opmode == OPMODE_MANUAL) {
763 v4l_info(client, "Mode: %s (%s%s)\n", p, 778 v4l_info(client, "Mode: %s (%s%s)\n", p,
764 (state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono", 779 (state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono",
765 (state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : ""); 780 (state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : "");
766 } else { 781 } else {
767 if (state->opmode == OPMODE_AUTODETECT) 782 if (state->opmode == OPMODE_AUTODETECT)
768 v4l_info(client, "Mode: %s\n", p); 783 v4l_info(client, "Mode: %s\n", p);
769 v4l_info(client, "Standard: %s (%s%s)\n", 784 v4l_info(client, "Standard: %s (%s%s)\n",
770 msp_standard_std_name(state->std), 785 msp_standard_std_name(state->std),
771 (state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono", 786 (state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono",
772 (state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : ""); 787 (state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : "");
773 }
774 v4l_info(client, "Audmode: 0x%04x\n", state->audmode);
775 v4l_info(client, "Routing: 0x%08x (input) 0x%08x (output)\n",
776 state->routing.input, state->routing.output);
777 v4l_info(client, "ACB: 0x%04x\n", state->acb);
778 break;
779 }
780
781 case VIDIOC_G_CHIP_IDENT:
782 return v4l2_chip_ident_i2c_client(client, arg, state->ident,
783 (state->rev1 << 16) | state->rev2);
784
785 default:
786 /* unknown */
787 return -EINVAL;
788 } 788 }
789 v4l_info(client, "Audmode: 0x%04x\n", state->audmode);
790 v4l_info(client, "Routing: 0x%08x (input) 0x%08x (output)\n",
791 state->routing.input, state->routing.output);
792 v4l_info(client, "ACB: 0x%04x\n", state->acb);
789 return 0; 793 return 0;
790} 794}
791 795
@@ -803,11 +807,49 @@ static int msp_resume(struct i2c_client *client)
803 return 0; 807 return 0;
804} 808}
805 809
810static int msp_command(struct i2c_client *client, unsigned cmd, void *arg)
811{
812 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
813}
814
815/* ----------------------------------------------------------------------- */
816
817static const struct v4l2_subdev_core_ops msp_core_ops = {
818 .log_status = msp_log_status,
819 .g_chip_ident = msp_g_chip_ident,
820 .g_ctrl = msp_g_ctrl,
821 .s_ctrl = msp_s_ctrl,
822 .queryctrl = msp_queryctrl,
823#ifdef CONFIG_VIDEO_ALLOW_V4L1
824 .ioctl = msp_ioctl,
825#endif
826};
827
828static const struct v4l2_subdev_tuner_ops msp_tuner_ops = {
829 .s_frequency = msp_s_frequency,
830 .g_tuner = msp_g_tuner,
831 .s_tuner = msp_s_tuner,
832 .s_radio = msp_s_radio,
833 .s_std = msp_s_std,
834};
835
836static const struct v4l2_subdev_audio_ops msp_audio_ops = {
837 .s_routing = msp_s_routing,
838 .s_i2s_clock_freq = msp_s_i2s_clock_freq,
839};
840
841static const struct v4l2_subdev_ops msp_ops = {
842 .core = &msp_core_ops,
843 .tuner = &msp_tuner_ops,
844 .audio = &msp_audio_ops,
845};
846
806/* ----------------------------------------------------------------------- */ 847/* ----------------------------------------------------------------------- */
807 848
808static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id) 849static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
809{ 850{
810 struct msp_state *state; 851 struct msp_state *state;
852 struct v4l2_subdev *sd;
811 int (*thread_func)(void *data) = NULL; 853 int (*thread_func)(void *data) = NULL;
812 int msp_hard; 854 int msp_hard;
813 int msp_family; 855 int msp_family;
@@ -827,7 +869,8 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
827 if (!state) 869 if (!state)
828 return -ENOMEM; 870 return -ENOMEM;
829 871
830 i2c_set_clientdata(client, state); 872 sd = &state->sd;
873 v4l2_i2c_subdev_init(sd, client, &msp_ops);
831 874
832 state->v4l2_std = V4L2_STD_NTSC; 875 state->v4l2_std = V4L2_STD_NTSC;
833 state->audmode = V4L2_TUNER_MODE_STEREO; 876 state->audmode = V4L2_TUNER_MODE_STEREO;
@@ -972,8 +1015,9 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
972 1015
973static int msp_remove(struct i2c_client *client) 1016static int msp_remove(struct i2c_client *client)
974{ 1017{
975 struct msp_state *state = i2c_get_clientdata(client); 1018 struct msp_state *state = to_state(i2c_get_clientdata(client));
976 1019
1020 v4l2_device_unregister_subdev(&state->sd);
977 /* shutdown control thread */ 1021 /* shutdown control thread */
978 if (state->kthread) { 1022 if (state->kthread) {
979 state->restart = 1; 1023 state->restart = 1;
diff --git a/drivers/media/video/msp3400-driver.h b/drivers/media/video/msp3400-driver.h
index ab69a290e5dc..3fe1c1b10f53 100644
--- a/drivers/media/video/msp3400-driver.h
+++ b/drivers/media/video/msp3400-driver.h
@@ -5,6 +5,7 @@
5#define MSP3400_DRIVER_H 5#define MSP3400_DRIVER_H
6 6
7#include <media/msp3400.h> 7#include <media/msp3400.h>
8#include <media/v4l2-device.h>
8 9
9/* ---------------------------------------------------------------------- */ 10/* ---------------------------------------------------------------------- */
10 11
@@ -49,6 +50,7 @@ extern int msp_dolby;
49extern int msp_stereo_thresh; 50extern int msp_stereo_thresh;
50 51
51struct msp_state { 52struct msp_state {
53 struct v4l2_subdev sd;
52 int rev1, rev2; 54 int rev1, rev2;
53 int ident; 55 int ident;
54 u8 has_nicam; 56 u8 has_nicam;
@@ -96,6 +98,11 @@ struct msp_state {
96 unsigned int watch_stereo:1; 98 unsigned int watch_stereo:1;
97}; 99};
98 100
101static inline struct msp_state *to_state(struct v4l2_subdev *sd)
102{
103 return container_of(sd, struct msp_state, sd);
104}
105
99/* msp3400-driver.c */ 106/* msp3400-driver.c */
100int msp_write_dem(struct i2c_client *client, int addr, int val); 107int msp_write_dem(struct i2c_client *client, int addr, int val);
101int msp_write_dsp(struct i2c_client *client, int addr, int val); 108int msp_write_dsp(struct i2c_client *client, int addr, int val);
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index 846a14a61fd1..a655e9c30146 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -159,7 +159,7 @@ const char *msp_standard_std_name(int std)
159 159
160static void msp_set_source(struct i2c_client *client, u16 src) 160static void msp_set_source(struct i2c_client *client, u16 src)
161{ 161{
162 struct msp_state *state = i2c_get_clientdata(client); 162 struct msp_state *state = to_state(i2c_get_clientdata(client));
163 163
164 if (msp_dolby) { 164 if (msp_dolby) {
165 msp_write_dsp(client, 0x0008, 0x0520); /* I2S1 */ 165 msp_write_dsp(client, 0x0008, 0x0520); /* I2S1 */
@@ -186,7 +186,7 @@ void msp3400c_set_carrier(struct i2c_client *client, int cdo1, int cdo2)
186 186
187void msp3400c_set_mode(struct i2c_client *client, int mode) 187void msp3400c_set_mode(struct i2c_client *client, int mode)
188{ 188{
189 struct msp_state *state = i2c_get_clientdata(client); 189 struct msp_state *state = to_state(i2c_get_clientdata(client));
190 struct msp3400c_init_data_dem *data = &msp3400c_init_data[mode]; 190 struct msp3400c_init_data_dem *data = &msp3400c_init_data[mode];
191 int tuner = (state->routing.input >> 3) & 1; 191 int tuner = (state->routing.input >> 3) & 1;
192 int i; 192 int i;
@@ -227,7 +227,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
227 static char *strmode[] = { 227 static char *strmode[] = {
228 "mono", "stereo", "lang2", "lang1", "lang1+lang2" 228 "mono", "stereo", "lang2", "lang1", "lang1+lang2"
229 }; 229 };
230 struct msp_state *state = i2c_get_clientdata(client); 230 struct msp_state *state = to_state(i2c_get_clientdata(client));
231 char *modestr = (state->audmode >= 0 && state->audmode < 5) ? 231 char *modestr = (state->audmode >= 0 && state->audmode < 5) ?
232 strmode[state->audmode] : "unknown"; 232 strmode[state->audmode] : "unknown";
233 int src = 0; /* channel source: FM/AM, nicam or SCART */ 233 int src = 0; /* channel source: FM/AM, nicam or SCART */
@@ -356,7 +356,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
356 356
357static void msp3400c_print_mode(struct i2c_client *client) 357static void msp3400c_print_mode(struct i2c_client *client)
358{ 358{
359 struct msp_state *state = i2c_get_clientdata(client); 359 struct msp_state *state = to_state(i2c_get_clientdata(client));
360 360
361 if (state->main == state->second) 361 if (state->main == state->second)
362 v4l_dbg(1, msp_debug, client, 362 v4l_dbg(1, msp_debug, client,
@@ -385,7 +385,7 @@ static void msp3400c_print_mode(struct i2c_client *client)
385 385
386static int msp3400c_detect_stereo(struct i2c_client *client) 386static int msp3400c_detect_stereo(struct i2c_client *client)
387{ 387{
388 struct msp_state *state = i2c_get_clientdata(client); 388 struct msp_state *state = to_state(i2c_get_clientdata(client));
389 int val; 389 int val;
390 int rxsubchans = state->rxsubchans; 390 int rxsubchans = state->rxsubchans;
391 int newnicam = state->nicam_on; 391 int newnicam = state->nicam_on;
@@ -463,7 +463,7 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
463/* stereo/multilang monitoring */ 463/* stereo/multilang monitoring */
464static void watch_stereo(struct i2c_client *client) 464static void watch_stereo(struct i2c_client *client)
465{ 465{
466 struct msp_state *state = i2c_get_clientdata(client); 466 struct msp_state *state = to_state(i2c_get_clientdata(client));
467 467
468 if (msp_detect_stereo(client)) 468 if (msp_detect_stereo(client))
469 msp_set_audmode(client); 469 msp_set_audmode(client);
@@ -475,7 +475,7 @@ static void watch_stereo(struct i2c_client *client)
475int msp3400c_thread(void *data) 475int msp3400c_thread(void *data)
476{ 476{
477 struct i2c_client *client = data; 477 struct i2c_client *client = data;
478 struct msp_state *state = i2c_get_clientdata(client); 478 struct msp_state *state = to_state(i2c_get_clientdata(client));
479 struct msp3400c_carrier_detect *cd; 479 struct msp3400c_carrier_detect *cd;
480 int count, max1, max2, val1, val2, val, i; 480 int count, max1, max2, val1, val2, val, i;
481 481
@@ -659,7 +659,7 @@ no_second:
659int msp3410d_thread(void *data) 659int msp3410d_thread(void *data)
660{ 660{
661 struct i2c_client *client = data; 661 struct i2c_client *client = data;
662 struct msp_state *state = i2c_get_clientdata(client); 662 struct msp_state *state = to_state(i2c_get_clientdata(client));
663 int val, i, std, count; 663 int val, i, std, count;
664 664
665 v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n"); 665 v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
@@ -825,7 +825,7 @@ restart:
825 825
826static int msp34xxg_modus(struct i2c_client *client) 826static int msp34xxg_modus(struct i2c_client *client)
827{ 827{
828 struct msp_state *state = i2c_get_clientdata(client); 828 struct msp_state *state = to_state(i2c_get_clientdata(client));
829 829
830 if (state->radio) { 830 if (state->radio) {
831 v4l_dbg(1, msp_debug, client, "selected radio modus\n"); 831 v4l_dbg(1, msp_debug, client, "selected radio modus\n");
@@ -852,7 +852,7 @@ static int msp34xxg_modus(struct i2c_client *client)
852 852
853static void msp34xxg_set_source(struct i2c_client *client, u16 reg, int in) 853static void msp34xxg_set_source(struct i2c_client *client, u16 reg, int in)
854 { 854 {
855 struct msp_state *state = i2c_get_clientdata(client); 855 struct msp_state *state = to_state(i2c_get_clientdata(client));
856 int source, matrix; 856 int source, matrix;
857 857
858 switch (state->audmode) { 858 switch (state->audmode) {
@@ -895,7 +895,7 @@ static void msp34xxg_set_source(struct i2c_client *client, u16 reg, int in)
895 895
896static void msp34xxg_set_sources(struct i2c_client *client) 896static void msp34xxg_set_sources(struct i2c_client *client)
897{ 897{
898 struct msp_state *state = i2c_get_clientdata(client); 898 struct msp_state *state = to_state(i2c_get_clientdata(client));
899 u32 in = state->routing.input; 899 u32 in = state->routing.input;
900 900
901 msp34xxg_set_source(client, 0x0008, (in >> 4) & 0xf); 901 msp34xxg_set_source(client, 0x0008, (in >> 4) & 0xf);
@@ -911,7 +911,7 @@ static void msp34xxg_set_sources(struct i2c_client *client)
911/* (re-)initialize the msp34xxg */ 911/* (re-)initialize the msp34xxg */
912static void msp34xxg_reset(struct i2c_client *client) 912static void msp34xxg_reset(struct i2c_client *client)
913{ 913{
914 struct msp_state *state = i2c_get_clientdata(client); 914 struct msp_state *state = to_state(i2c_get_clientdata(client));
915 int tuner = (state->routing.input >> 3) & 1; 915 int tuner = (state->routing.input >> 3) & 1;
916 int modus; 916 int modus;
917 917
@@ -954,7 +954,7 @@ static void msp34xxg_reset(struct i2c_client *client)
954int msp34xxg_thread(void *data) 954int msp34xxg_thread(void *data)
955{ 955{
956 struct i2c_client *client = data; 956 struct i2c_client *client = data;
957 struct msp_state *state = i2c_get_clientdata(client); 957 struct msp_state *state = to_state(i2c_get_clientdata(client));
958 int val, i; 958 int val, i;
959 959
960 v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n"); 960 v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n");
@@ -1049,7 +1049,7 @@ unmute:
1049 1049
1050static int msp34xxg_detect_stereo(struct i2c_client *client) 1050static int msp34xxg_detect_stereo(struct i2c_client *client)
1051{ 1051{
1052 struct msp_state *state = i2c_get_clientdata(client); 1052 struct msp_state *state = to_state(i2c_get_clientdata(client));
1053 int status = msp_read_dem(client, 0x0200); 1053 int status = msp_read_dem(client, 0x0200);
1054 int is_bilingual = status & 0x100; 1054 int is_bilingual = status & 0x100;
1055 int is_stereo = status & 0x40; 1055 int is_stereo = status & 0x40;
@@ -1078,7 +1078,7 @@ static int msp34xxg_detect_stereo(struct i2c_client *client)
1078 1078
1079static void msp34xxg_set_audmode(struct i2c_client *client) 1079static void msp34xxg_set_audmode(struct i2c_client *client)
1080{ 1080{
1081 struct msp_state *state = i2c_get_clientdata(client); 1081 struct msp_state *state = to_state(i2c_get_clientdata(client));
1082 1082
1083 if (state->std == 0x20) { 1083 if (state->std == 0x20) {
1084 if ((state->rxsubchans & V4L2_TUNER_SUB_SAP) && 1084 if ((state->rxsubchans & V4L2_TUNER_SUB_SAP) &&
@@ -1095,7 +1095,7 @@ static void msp34xxg_set_audmode(struct i2c_client *client)
1095 1095
1096void msp_set_audmode(struct i2c_client *client) 1096void msp_set_audmode(struct i2c_client *client)
1097{ 1097{
1098 struct msp_state *state = i2c_get_clientdata(client); 1098 struct msp_state *state = to_state(i2c_get_clientdata(client));
1099 1099
1100 switch (state->opmode) { 1100 switch (state->opmode) {
1101 case OPMODE_MANUAL: 1101 case OPMODE_MANUAL:
@@ -1110,7 +1110,7 @@ void msp_set_audmode(struct i2c_client *client)
1110 1110
1111int msp_detect_stereo(struct i2c_client *client) 1111int msp_detect_stereo(struct i2c_client *client)
1112{ 1112{
1113 struct msp_state *state = i2c_get_clientdata(client); 1113 struct msp_state *state = to_state(i2c_get_clientdata(client));
1114 1114
1115 switch (state->opmode) { 1115 switch (state->opmode) {
1116 case OPMODE_MANUAL: 1116 case OPMODE_MANUAL:
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 0c524376b67e..1a1a12453672 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -272,21 +272,20 @@ static int mt9m001_set_bus_param(struct soc_camera_device *icd,
272static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd) 272static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd)
273{ 273{
274 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 274 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
275 unsigned int width_flag = SOCAM_DATAWIDTH_10; 275 struct soc_camera_link *icl = mt9m001->client->dev.platform_data;
276 /* MT9M001 has all capture_format parameters fixed */
277 unsigned long flags = SOCAM_DATAWIDTH_10 | SOCAM_PCLK_SAMPLE_RISING |
278 SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |
279 SOCAM_MASTER;
276 280
277 if (bus_switch_possible(mt9m001)) 281 if (bus_switch_possible(mt9m001))
278 width_flag |= SOCAM_DATAWIDTH_8; 282 flags |= SOCAM_DATAWIDTH_8;
279 283
280 /* MT9M001 has all capture_format parameters fixed */ 284 return soc_camera_apply_sensor_flags(icl, flags);
281 return SOCAM_PCLK_SAMPLE_RISING |
282 SOCAM_HSYNC_ACTIVE_HIGH |
283 SOCAM_VSYNC_ACTIVE_HIGH |
284 SOCAM_MASTER |
285 width_flag;
286} 285}
287 286
288static int mt9m001_set_fmt_cap(struct soc_camera_device *icd, 287static int mt9m001_set_fmt(struct soc_camera_device *icd,
289 __u32 pixfmt, struct v4l2_rect *rect) 288 __u32 pixfmt, struct v4l2_rect *rect)
290{ 289{
291 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 290 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
292 int ret; 291 int ret;
@@ -298,7 +297,7 @@ static int mt9m001_set_fmt_cap(struct soc_camera_device *icd,
298 ret = reg_write(icd, MT9M001_VERTICAL_BLANKING, vblank); 297 ret = reg_write(icd, MT9M001_VERTICAL_BLANKING, vblank);
299 298
300 /* The caller provides a supported format, as verified per 299 /* The caller provides a supported format, as verified per
301 * call to icd->try_fmt_cap() */ 300 * call to icd->try_fmt() */
302 if (!ret) 301 if (!ret)
303 ret = reg_write(icd, MT9M001_COLUMN_START, rect->left); 302 ret = reg_write(icd, MT9M001_COLUMN_START, rect->left);
304 if (!ret) 303 if (!ret)
@@ -325,18 +324,20 @@ static int mt9m001_set_fmt_cap(struct soc_camera_device *icd,
325 return ret; 324 return ret;
326} 325}
327 326
328static int mt9m001_try_fmt_cap(struct soc_camera_device *icd, 327static int mt9m001_try_fmt(struct soc_camera_device *icd,
329 struct v4l2_format *f) 328 struct v4l2_format *f)
330{ 329{
331 if (f->fmt.pix.height < 32 + icd->y_skip_top) 330 struct v4l2_pix_format *pix = &f->fmt.pix;
332 f->fmt.pix.height = 32 + icd->y_skip_top; 331
333 if (f->fmt.pix.height > 1024 + icd->y_skip_top) 332 if (pix->height < 32 + icd->y_skip_top)
334 f->fmt.pix.height = 1024 + icd->y_skip_top; 333 pix->height = 32 + icd->y_skip_top;
335 if (f->fmt.pix.width < 48) 334 if (pix->height > 1024 + icd->y_skip_top)
336 f->fmt.pix.width = 48; 335 pix->height = 1024 + icd->y_skip_top;
337 if (f->fmt.pix.width > 1280) 336 if (pix->width < 48)
338 f->fmt.pix.width = 1280; 337 pix->width = 48;
339 f->fmt.pix.width &= ~0x01; /* has to be even, unsure why was ~3 */ 338 if (pix->width > 1280)
339 pix->width = 1280;
340 pix->width &= ~0x01; /* has to be even, unsure why was ~3 */
340 341
341 return 0; 342 return 0;
342} 343}
@@ -447,8 +448,8 @@ static struct soc_camera_ops mt9m001_ops = {
447 .release = mt9m001_release, 448 .release = mt9m001_release,
448 .start_capture = mt9m001_start_capture, 449 .start_capture = mt9m001_start_capture,
449 .stop_capture = mt9m001_stop_capture, 450 .stop_capture = mt9m001_stop_capture,
450 .set_fmt_cap = mt9m001_set_fmt_cap, 451 .set_fmt = mt9m001_set_fmt,
451 .try_fmt_cap = mt9m001_try_fmt_cap, 452 .try_fmt = mt9m001_try_fmt,
452 .set_bus_param = mt9m001_set_bus_param, 453 .set_bus_param = mt9m001_set_bus_param,
453 .query_bus_param = mt9m001_query_bus_param, 454 .query_bus_param = mt9m001_query_bus_param,
454 .controls = mt9m001_controls, 455 .controls = mt9m001_controls,
@@ -578,6 +579,7 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
578static int mt9m001_video_probe(struct soc_camera_device *icd) 579static int mt9m001_video_probe(struct soc_camera_device *icd)
579{ 580{
580 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 581 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
582 struct soc_camera_link *icl = mt9m001->client->dev.platform_data;
581 s32 data; 583 s32 data;
582 int ret; 584 int ret;
583 585
@@ -588,7 +590,7 @@ static int mt9m001_video_probe(struct soc_camera_device *icd)
588 return -ENODEV; 590 return -ENODEV;
589 591
590 /* Enable the chip */ 592 /* Enable the chip */
591 data = reg_write(&mt9m001->icd, MT9M001_CHIP_ENABLE, 1); 593 data = reg_write(icd, MT9M001_CHIP_ENABLE, 1);
592 dev_dbg(&icd->dev, "write: %d\n", data); 594 dev_dbg(&icd->dev, "write: %d\n", data);
593 595
594 /* Read out the chip version register */ 596 /* Read out the chip version register */
@@ -600,7 +602,7 @@ static int mt9m001_video_probe(struct soc_camera_device *icd)
600 case 0x8421: 602 case 0x8421:
601 mt9m001->model = V4L2_IDENT_MT9M001C12ST; 603 mt9m001->model = V4L2_IDENT_MT9M001C12ST;
602 icd->formats = mt9m001_colour_formats; 604 icd->formats = mt9m001_colour_formats;
603 if (mt9m001->client->dev.platform_data) 605 if (gpio_is_valid(icl->gpio))
604 icd->num_formats = ARRAY_SIZE(mt9m001_colour_formats); 606 icd->num_formats = ARRAY_SIZE(mt9m001_colour_formats);
605 else 607 else
606 icd->num_formats = 1; 608 icd->num_formats = 1;
@@ -608,7 +610,7 @@ static int mt9m001_video_probe(struct soc_camera_device *icd)
608 case 0x8431: 610 case 0x8431:
609 mt9m001->model = V4L2_IDENT_MT9M001C12STM; 611 mt9m001->model = V4L2_IDENT_MT9M001C12STM;
610 icd->formats = mt9m001_monochrome_formats; 612 icd->formats = mt9m001_monochrome_formats;
611 if (mt9m001->client->dev.platform_data) 613 if (gpio_is_valid(icl->gpio))
612 icd->num_formats = ARRAY_SIZE(mt9m001_monochrome_formats); 614 icd->num_formats = ARRAY_SIZE(mt9m001_monochrome_formats);
613 else 615 else
614 icd->num_formats = 1; 616 icd->num_formats = 1;
@@ -640,8 +642,8 @@ static void mt9m001_video_remove(struct soc_camera_device *icd)
640 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 642 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
641 643
642 dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9m001->client->addr, 644 dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9m001->client->addr,
643 mt9m001->icd.dev.parent, mt9m001->icd.vdev); 645 icd->dev.parent, icd->vdev);
644 soc_camera_video_stop(&mt9m001->icd); 646 soc_camera_video_stop(icd);
645} 647}
646 648
647static int mt9m001_probe(struct i2c_client *client, 649static int mt9m001_probe(struct i2c_client *client,
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index da0b2d553fd0..c89ea41fe259 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Driver for MT9M111 CMOS Image Sensor from Micron 2 * Driver for MT9M111/MT9M112 CMOS Image Sensor from Micron
3 * 3 *
4 * Copyright (C) 2008, Robert Jarzmik <robert.jarzmik@free.fr> 4 * Copyright (C) 2008, Robert Jarzmik <robert.jarzmik@free.fr>
5 * 5 *
@@ -19,7 +19,7 @@
19#include <media/soc_camera.h> 19#include <media/soc_camera.h>
20 20
21/* 21/*
22 * mt9m111 i2c address is 0x5d or 0x48 (depending on SAddr pin) 22 * mt9m111 and mt9m112 i2c address is 0x5d or 0x48 (depending on SAddr pin)
23 * The platform has to define i2c_board_info and call i2c_register_board_info() 23 * The platform has to define i2c_board_info and call i2c_register_board_info()
24 */ 24 */
25 25
@@ -90,7 +90,7 @@
90#define MT9M111_OUTPUT_FORMAT_CTRL2_B 0x19b 90#define MT9M111_OUTPUT_FORMAT_CTRL2_B 0x19b
91 91
92#define MT9M111_OPMODE_AUTOEXPO_EN (1 << 14) 92#define MT9M111_OPMODE_AUTOEXPO_EN (1 << 14)
93 93#define MT9M111_OPMODE_AUTOWHITEBAL_EN (1 << 1)
94 94
95#define MT9M111_OUTFMT_PROCESSED_BAYER (1 << 14) 95#define MT9M111_OUTFMT_PROCESSED_BAYER (1 << 14)
96#define MT9M111_OUTFMT_BYPASS_IFP (1 << 10) 96#define MT9M111_OUTFMT_BYPASS_IFP (1 << 10)
@@ -128,9 +128,14 @@
128 .colorspace = _colorspace } 128 .colorspace = _colorspace }
129#define RGB_FMT(_name, _depth, _fourcc) \ 129#define RGB_FMT(_name, _depth, _fourcc) \
130 COL_FMT(_name, _depth, _fourcc, V4L2_COLORSPACE_SRGB) 130 COL_FMT(_name, _depth, _fourcc, V4L2_COLORSPACE_SRGB)
131#define JPG_FMT(_name, _depth, _fourcc) \
132 COL_FMT(_name, _depth, _fourcc, V4L2_COLORSPACE_JPEG)
131 133
132static const struct soc_camera_data_format mt9m111_colour_formats[] = { 134static const struct soc_camera_data_format mt9m111_colour_formats[] = {
133 COL_FMT("YCrYCb 8 bit", 8, V4L2_PIX_FMT_YUYV, V4L2_COLORSPACE_JPEG), 135 JPG_FMT("CbYCrY 16 bit", 16, V4L2_PIX_FMT_UYVY),
136 JPG_FMT("CrYCbY 16 bit", 16, V4L2_PIX_FMT_VYUY),
137 JPG_FMT("YCbYCr 16 bit", 16, V4L2_PIX_FMT_YUYV),
138 JPG_FMT("YCrYCb 16 bit", 16, V4L2_PIX_FMT_YVYU),
134 RGB_FMT("RGB 565", 16, V4L2_PIX_FMT_RGB565), 139 RGB_FMT("RGB 565", 16, V4L2_PIX_FMT_RGB565),
135 RGB_FMT("RGB 555", 16, V4L2_PIX_FMT_RGB555), 140 RGB_FMT("RGB 555", 16, V4L2_PIX_FMT_RGB555),
136 RGB_FMT("Bayer (sRGB) 10 bit", 10, V4L2_PIX_FMT_SBGGR16), 141 RGB_FMT("Bayer (sRGB) 10 bit", 10, V4L2_PIX_FMT_SBGGR16),
@@ -145,7 +150,7 @@ enum mt9m111_context {
145struct mt9m111 { 150struct mt9m111 {
146 struct i2c_client *client; 151 struct i2c_client *client;
147 struct soc_camera_device icd; 152 struct soc_camera_device icd;
148 int model; /* V4L2_IDENT_MT9M111* codes from v4l2-chip-ident.h */ 153 int model; /* V4L2_IDENT_MT9M11x* codes from v4l2-chip-ident.h */
149 enum mt9m111_context context; 154 enum mt9m111_context context;
150 unsigned int left, top, width, height; 155 unsigned int left, top, width, height;
151 u32 pixfmt; 156 u32 pixfmt;
@@ -158,6 +163,7 @@ struct mt9m111 {
158 unsigned int swap_rgb_red_blue:1; 163 unsigned int swap_rgb_red_blue:1;
159 unsigned int swap_yuv_y_chromas:1; 164 unsigned int swap_yuv_y_chromas:1;
160 unsigned int swap_yuv_cb_cr:1; 165 unsigned int swap_yuv_cb_cr:1;
166 unsigned int autowhitebalance:1;
161}; 167};
162 168
163static int reg_page_map_set(struct i2c_client *client, const u16 reg) 169static int reg_page_map_set(struct i2c_client *client, const u16 reg)
@@ -410,9 +416,13 @@ static int mt9m111_stop_capture(struct soc_camera_device *icd)
410 416
411static unsigned long mt9m111_query_bus_param(struct soc_camera_device *icd) 417static unsigned long mt9m111_query_bus_param(struct soc_camera_device *icd)
412{ 418{
413 return SOCAM_MASTER | SOCAM_PCLK_SAMPLE_RISING | 419 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
420 struct soc_camera_link *icl = mt9m111->client->dev.platform_data;
421 unsigned long flags = SOCAM_MASTER | SOCAM_PCLK_SAMPLE_RISING |
414 SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH | 422 SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |
415 SOCAM_DATAWIDTH_8; 423 SOCAM_DATAWIDTH_8;
424
425 return soc_camera_apply_sensor_flags(icl, flags);
416} 426}
417 427
418static int mt9m111_set_bus_param(struct soc_camera_device *icd, unsigned long f) 428static int mt9m111_set_bus_param(struct soc_camera_device *icd, unsigned long f)
@@ -438,7 +448,24 @@ static int mt9m111_set_pixfmt(struct soc_camera_device *icd, u32 pixfmt)
438 case V4L2_PIX_FMT_RGB565: 448 case V4L2_PIX_FMT_RGB565:
439 ret = mt9m111_setfmt_rgb565(icd); 449 ret = mt9m111_setfmt_rgb565(icd);
440 break; 450 break;
451 case V4L2_PIX_FMT_UYVY:
452 mt9m111->swap_yuv_y_chromas = 0;
453 mt9m111->swap_yuv_cb_cr = 0;
454 ret = mt9m111_setfmt_yuv(icd);
455 break;
456 case V4L2_PIX_FMT_VYUY:
457 mt9m111->swap_yuv_y_chromas = 0;
458 mt9m111->swap_yuv_cb_cr = 1;
459 ret = mt9m111_setfmt_yuv(icd);
460 break;
441 case V4L2_PIX_FMT_YUYV: 461 case V4L2_PIX_FMT_YUYV:
462 mt9m111->swap_yuv_y_chromas = 1;
463 mt9m111->swap_yuv_cb_cr = 0;
464 ret = mt9m111_setfmt_yuv(icd);
465 break;
466 case V4L2_PIX_FMT_YVYU:
467 mt9m111->swap_yuv_y_chromas = 1;
468 mt9m111->swap_yuv_cb_cr = 1;
442 ret = mt9m111_setfmt_yuv(icd); 469 ret = mt9m111_setfmt_yuv(icd);
443 break; 470 break;
444 default: 471 default:
@@ -452,8 +479,8 @@ static int mt9m111_set_pixfmt(struct soc_camera_device *icd, u32 pixfmt)
452 return ret; 479 return ret;
453} 480}
454 481
455static int mt9m111_set_fmt_cap(struct soc_camera_device *icd, 482static int mt9m111_set_fmt(struct soc_camera_device *icd,
456 __u32 pixfmt, struct v4l2_rect *rect) 483 __u32 pixfmt, struct v4l2_rect *rect)
457{ 484{
458 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); 485 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
459 int ret; 486 int ret;
@@ -473,13 +500,15 @@ static int mt9m111_set_fmt_cap(struct soc_camera_device *icd,
473 return ret; 500 return ret;
474} 501}
475 502
476static int mt9m111_try_fmt_cap(struct soc_camera_device *icd, 503static int mt9m111_try_fmt(struct soc_camera_device *icd,
477 struct v4l2_format *f) 504 struct v4l2_format *f)
478{ 505{
479 if (f->fmt.pix.height > MT9M111_MAX_HEIGHT) 506 struct v4l2_pix_format *pix = &f->fmt.pix;
480 f->fmt.pix.height = MT9M111_MAX_HEIGHT; 507
481 if (f->fmt.pix.width > MT9M111_MAX_WIDTH) 508 if (pix->height > MT9M111_MAX_HEIGHT)
482 f->fmt.pix.width = MT9M111_MAX_WIDTH; 509 pix->height = MT9M111_MAX_HEIGHT;
510 if (pix->width > MT9M111_MAX_WIDTH)
511 pix->width = MT9M111_MAX_WIDTH;
483 512
484 return 0; 513 return 0;
485} 514}
@@ -597,8 +626,8 @@ static struct soc_camera_ops mt9m111_ops = {
597 .release = mt9m111_release, 626 .release = mt9m111_release,
598 .start_capture = mt9m111_start_capture, 627 .start_capture = mt9m111_start_capture,
599 .stop_capture = mt9m111_stop_capture, 628 .stop_capture = mt9m111_stop_capture,
600 .set_fmt_cap = mt9m111_set_fmt_cap, 629 .set_fmt = mt9m111_set_fmt,
601 .try_fmt_cap = mt9m111_try_fmt_cap, 630 .try_fmt = mt9m111_try_fmt,
602 .query_bus_param = mt9m111_query_bus_param, 631 .query_bus_param = mt9m111_query_bus_param,
603 .set_bus_param = mt9m111_set_bus_param, 632 .set_bus_param = mt9m111_set_bus_param,
604 .controls = mt9m111_controls, 633 .controls = mt9m111_controls,
@@ -634,18 +663,15 @@ static int mt9m111_set_flip(struct soc_camera_device *icd, int flip, int mask)
634 663
635static int mt9m111_get_global_gain(struct soc_camera_device *icd) 664static int mt9m111_get_global_gain(struct soc_camera_device *icd)
636{ 665{
637 unsigned int data, gain; 666 int data;
638 667
639 data = reg_read(GLOBAL_GAIN); 668 data = reg_read(GLOBAL_GAIN);
640 if (data >= 0) 669 if (data >= 0)
641 gain = ((data & (1 << 10)) * 2) 670 return (data & 0x2f) * (1 << ((data >> 10) & 1)) *
642 | ((data & (1 << 9)) * 2) 671 (1 << ((data >> 9) & 1));
643 | (data & 0x2f); 672 return data;
644 else
645 gain = data;
646
647 return gain;
648} 673}
674
649static int mt9m111_set_global_gain(struct soc_camera_device *icd, int gain) 675static int mt9m111_set_global_gain(struct soc_camera_device *icd, int gain)
650{ 676{
651 u16 val; 677 u16 val;
@@ -679,6 +705,23 @@ static int mt9m111_set_autoexposure(struct soc_camera_device *icd, int on)
679 705
680 return ret; 706 return ret;
681} 707}
708
709static int mt9m111_set_autowhitebalance(struct soc_camera_device *icd, int on)
710{
711 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
712 int ret;
713
714 if (on)
715 ret = reg_set(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN);
716 else
717 ret = reg_clear(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN);
718
719 if (!ret)
720 mt9m111->autowhitebalance = on;
721
722 return ret;
723}
724
682static int mt9m111_get_control(struct soc_camera_device *icd, 725static int mt9m111_get_control(struct soc_camera_device *icd,
683 struct v4l2_control *ctrl) 726 struct v4l2_control *ctrl)
684{ 727{
@@ -715,6 +758,9 @@ static int mt9m111_get_control(struct soc_camera_device *icd,
715 case V4L2_CID_EXPOSURE_AUTO: 758 case V4L2_CID_EXPOSURE_AUTO:
716 ctrl->value = mt9m111->autoexposure; 759 ctrl->value = mt9m111->autoexposure;
717 break; 760 break;
761 case V4L2_CID_AUTO_WHITE_BALANCE:
762 ctrl->value = mt9m111->autowhitebalance;
763 break;
718 } 764 }
719 return 0; 765 return 0;
720} 766}
@@ -748,6 +794,9 @@ static int mt9m111_set_control(struct soc_camera_device *icd,
748 case V4L2_CID_EXPOSURE_AUTO: 794 case V4L2_CID_EXPOSURE_AUTO:
749 ret = mt9m111_set_autoexposure(icd, ctrl->value); 795 ret = mt9m111_set_autoexposure(icd, ctrl->value);
750 break; 796 break;
797 case V4L2_CID_AUTO_WHITE_BALANCE:
798 ret = mt9m111_set_autowhitebalance(icd, ctrl->value);
799 break;
751 default: 800 default:
752 ret = -EINVAL; 801 ret = -EINVAL;
753 } 802 }
@@ -766,6 +815,7 @@ static int mt9m111_restore_state(struct soc_camera_device *icd)
766 mt9m111_set_flip(icd, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS); 815 mt9m111_set_flip(icd, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS);
767 mt9m111_set_global_gain(icd, icd->gain); 816 mt9m111_set_global_gain(icd, icd->gain);
768 mt9m111_set_autoexposure(icd, mt9m111->autoexposure); 817 mt9m111_set_autoexposure(icd, mt9m111->autoexposure);
818 mt9m111_set_autowhitebalance(icd, mt9m111->autowhitebalance);
769 return 0; 819 return 0;
770} 820}
771 821
@@ -798,7 +848,7 @@ static int mt9m111_init(struct soc_camera_device *icd)
798 if (!ret) 848 if (!ret)
799 ret = mt9m111_set_autoexposure(icd, mt9m111->autoexposure); 849 ret = mt9m111_set_autoexposure(icd, mt9m111->autoexposure);
800 if (ret) 850 if (ret)
801 dev_err(&icd->dev, "mt9m111 init failed: %d\n", ret); 851 dev_err(&icd->dev, "mt9m11x init failed: %d\n", ret);
802 return ret; 852 return ret;
803} 853}
804 854
@@ -808,7 +858,7 @@ static int mt9m111_release(struct soc_camera_device *icd)
808 858
809 ret = mt9m111_disable(icd); 859 ret = mt9m111_disable(icd);
810 if (ret < 0) 860 if (ret < 0)
811 dev_err(&icd->dev, "mt9m111 release failed: %d\n", ret); 861 dev_err(&icd->dev, "mt9m11x release failed: %d\n", ret);
812 862
813 return ret; 863 return ret;
814} 864}
@@ -841,25 +891,30 @@ static int mt9m111_video_probe(struct soc_camera_device *icd)
841 data = reg_read(CHIP_VERSION); 891 data = reg_read(CHIP_VERSION);
842 892
843 switch (data) { 893 switch (data) {
844 case 0x143a: 894 case 0x143a: /* MT9M111 */
845 mt9m111->model = V4L2_IDENT_MT9M111; 895 mt9m111->model = V4L2_IDENT_MT9M111;
846 icd->formats = mt9m111_colour_formats; 896 break;
847 icd->num_formats = ARRAY_SIZE(mt9m111_colour_formats); 897 case 0x148c: /* MT9M112 */
898 mt9m111->model = V4L2_IDENT_MT9M112;
848 break; 899 break;
849 default: 900 default:
850 ret = -ENODEV; 901 ret = -ENODEV;
851 dev_err(&icd->dev, 902 dev_err(&icd->dev,
852 "No MT9M111 chip detected, register read %x\n", data); 903 "No MT9M11x chip detected, register read %x\n", data);
853 goto ei2c; 904 goto ei2c;
854 } 905 }
855 906
856 dev_info(&icd->dev, "Detected a MT9M111 chip ID 0x143a\n"); 907 icd->formats = mt9m111_colour_formats;
908 icd->num_formats = ARRAY_SIZE(mt9m111_colour_formats);
909
910 dev_info(&icd->dev, "Detected a MT9M11x chip ID %x\n", data);
857 911
858 ret = soc_camera_video_start(icd); 912 ret = soc_camera_video_start(icd);
859 if (ret) 913 if (ret)
860 goto eisis; 914 goto eisis;
861 915
862 mt9m111->autoexposure = 1; 916 mt9m111->autoexposure = 1;
917 mt9m111->autowhitebalance = 1;
863 918
864 mt9m111->swap_rgb_even_odd = 1; 919 mt9m111->swap_rgb_even_odd = 1;
865 mt9m111->swap_rgb_red_blue = 1; 920 mt9m111->swap_rgb_red_blue = 1;
@@ -889,7 +944,7 @@ static int mt9m111_probe(struct i2c_client *client,
889 int ret; 944 int ret;
890 945
891 if (!icl) { 946 if (!icl) {
892 dev_err(&client->dev, "MT9M111 driver needs platform data\n"); 947 dev_err(&client->dev, "MT9M11x driver needs platform data\n");
893 return -EINVAL; 948 return -EINVAL;
894 } 949 }
895 950
@@ -968,6 +1023,6 @@ static void __exit mt9m111_mod_exit(void)
968module_init(mt9m111_mod_init); 1023module_init(mt9m111_mod_init);
969module_exit(mt9m111_mod_exit); 1024module_exit(mt9m111_mod_exit);
970 1025
971MODULE_DESCRIPTION("Micron MT9M111 Camera driver"); 1026MODULE_DESCRIPTION("Micron MT9M111/MT9M112 Camera driver");
972MODULE_AUTHOR("Robert Jarzmik"); 1027MODULE_AUTHOR("Robert Jarzmik");
973MODULE_LICENSE("GPL"); 1028MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
new file mode 100644
index 000000000000..1a9d53966d06
--- /dev/null
+++ b/drivers/media/video/mt9t031.c
@@ -0,0 +1,736 @@
1/*
2 * Driver for MT9T031 CMOS Image Sensor from Micron
3 *
4 * Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering <lg@denx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/videodev2.h>
12#include <linux/slab.h>
13#include <linux/i2c.h>
14#include <linux/log2.h>
15
16#include <media/v4l2-common.h>
17#include <media/v4l2-chip-ident.h>
18#include <media/soc_camera.h>
19
20/* mt9t031 i2c address 0x5d
21 * The platform has to define i2c_board_info
22 * and call i2c_register_board_info() */
23
24/* mt9t031 selected register addresses */
25#define MT9T031_CHIP_VERSION 0x00
26#define MT9T031_ROW_START 0x01
27#define MT9T031_COLUMN_START 0x02
28#define MT9T031_WINDOW_HEIGHT 0x03
29#define MT9T031_WINDOW_WIDTH 0x04
30#define MT9T031_HORIZONTAL_BLANKING 0x05
31#define MT9T031_VERTICAL_BLANKING 0x06
32#define MT9T031_OUTPUT_CONTROL 0x07
33#define MT9T031_SHUTTER_WIDTH_UPPER 0x08
34#define MT9T031_SHUTTER_WIDTH 0x09
35#define MT9T031_PIXEL_CLOCK_CONTROL 0x0a
36#define MT9T031_FRAME_RESTART 0x0b
37#define MT9T031_SHUTTER_DELAY 0x0c
38#define MT9T031_RESET 0x0d
39#define MT9T031_READ_MODE_1 0x1e
40#define MT9T031_READ_MODE_2 0x20
41#define MT9T031_READ_MODE_3 0x21
42#define MT9T031_ROW_ADDRESS_MODE 0x22
43#define MT9T031_COLUMN_ADDRESS_MODE 0x23
44#define MT9T031_GLOBAL_GAIN 0x35
45#define MT9T031_CHIP_ENABLE 0xF8
46
47#define MT9T031_MAX_HEIGHT 1536
48#define MT9T031_MAX_WIDTH 2048
49#define MT9T031_MIN_HEIGHT 2
50#define MT9T031_MIN_WIDTH 2
51#define MT9T031_HORIZONTAL_BLANK 142
52#define MT9T031_VERTICAL_BLANK 25
53#define MT9T031_COLUMN_SKIP 32
54#define MT9T031_ROW_SKIP 20
55
56#define MT9T031_BUS_PARAM (SOCAM_PCLK_SAMPLE_RISING | \
57 SOCAM_PCLK_SAMPLE_FALLING | SOCAM_HSYNC_ACTIVE_HIGH | \
58 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH | \
59 SOCAM_MASTER | SOCAM_DATAWIDTH_10)
60
61static const struct soc_camera_data_format mt9t031_colour_formats[] = {
62 {
63 .name = "Bayer (sRGB) 10 bit",
64 .depth = 10,
65 .fourcc = V4L2_PIX_FMT_SGRBG10,
66 .colorspace = V4L2_COLORSPACE_SRGB,
67 }
68};
69
70struct mt9t031 {
71 struct i2c_client *client;
72 struct soc_camera_device icd;
73 int model; /* V4L2_IDENT_MT9T031* codes from v4l2-chip-ident.h */
74 unsigned char autoexposure;
75 u16 xskip;
76 u16 yskip;
77};
78
79static int reg_read(struct soc_camera_device *icd, const u8 reg)
80{
81 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
82 struct i2c_client *client = mt9t031->client;
83 s32 data = i2c_smbus_read_word_data(client, reg);
84 return data < 0 ? data : swab16(data);
85}
86
87static int reg_write(struct soc_camera_device *icd, const u8 reg,
88 const u16 data)
89{
90 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
91 return i2c_smbus_write_word_data(mt9t031->client, reg, swab16(data));
92}
93
94static int reg_set(struct soc_camera_device *icd, const u8 reg,
95 const u16 data)
96{
97 int ret;
98
99 ret = reg_read(icd, reg);
100 if (ret < 0)
101 return ret;
102 return reg_write(icd, reg, ret | data);
103}
104
105static int reg_clear(struct soc_camera_device *icd, const u8 reg,
106 const u16 data)
107{
108 int ret;
109
110 ret = reg_read(icd, reg);
111 if (ret < 0)
112 return ret;
113 return reg_write(icd, reg, ret & ~data);
114}
115
116static int set_shutter(struct soc_camera_device *icd, const u32 data)
117{
118 int ret;
119
120 ret = reg_write(icd, MT9T031_SHUTTER_WIDTH_UPPER, data >> 16);
121
122 if (ret >= 0)
123 ret = reg_write(icd, MT9T031_SHUTTER_WIDTH, data & 0xffff);
124
125 return ret;
126}
127
128static int get_shutter(struct soc_camera_device *icd, u32 *data)
129{
130 int ret;
131
132 ret = reg_read(icd, MT9T031_SHUTTER_WIDTH_UPPER);
133 *data = ret << 16;
134
135 if (ret >= 0)
136 ret = reg_read(icd, MT9T031_SHUTTER_WIDTH);
137 *data |= ret & 0xffff;
138
139 return ret < 0 ? ret : 0;
140}
141
142static int mt9t031_init(struct soc_camera_device *icd)
143{
144 int ret;
145
146 /* Disable chip output, synchronous option update */
147 dev_dbg(icd->vdev->parent, "%s\n", __func__);
148
149 ret = reg_write(icd, MT9T031_RESET, 1);
150 if (ret >= 0)
151 ret = reg_write(icd, MT9T031_RESET, 0);
152 if (ret >= 0)
153 ret = reg_clear(icd, MT9T031_OUTPUT_CONTROL, 3);
154
155 return ret >= 0 ? 0 : -EIO;
156}
157
158static int mt9t031_release(struct soc_camera_device *icd)
159{
160 /* Disable the chip */
161 reg_clear(icd, MT9T031_OUTPUT_CONTROL, 3);
162 return 0;
163}
164
165static int mt9t031_start_capture(struct soc_camera_device *icd)
166{
167 /* Switch to master "normal" mode */
168 if (reg_set(icd, MT9T031_OUTPUT_CONTROL, 3) < 0)
169 return -EIO;
170 return 0;
171}
172
173static int mt9t031_stop_capture(struct soc_camera_device *icd)
174{
175 /* Stop sensor readout */
176 if (reg_clear(icd, MT9T031_OUTPUT_CONTROL, 3) < 0)
177 return -EIO;
178 return 0;
179}
180
181static int mt9t031_set_bus_param(struct soc_camera_device *icd,
182 unsigned long flags)
183{
184 /* The caller should have queried our parameters, check anyway */
185 if (flags & ~MT9T031_BUS_PARAM)
186 return -EINVAL;
187
188 if (flags & SOCAM_PCLK_SAMPLE_FALLING)
189 reg_set(icd, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
190 else
191 reg_clear(icd, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
192
193 return 0;
194}
195
196static unsigned long mt9t031_query_bus_param(struct soc_camera_device *icd)
197{
198 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
199 struct soc_camera_link *icl = mt9t031->client->dev.platform_data;
200
201 return soc_camera_apply_sensor_flags(icl, MT9T031_BUS_PARAM);
202}
203
204static int mt9t031_set_fmt(struct soc_camera_device *icd,
205 __u32 pixfmt, struct v4l2_rect *rect)
206{
207 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
208 int ret;
209 const u16 hblank = MT9T031_HORIZONTAL_BLANK,
210 vblank = MT9T031_VERTICAL_BLANK;
211 u16 xbin, xskip = mt9t031->xskip, ybin, yskip = mt9t031->yskip,
212 width = rect->width * xskip, height = rect->height * yskip;
213
214 if (pixfmt) {
215 /* S_FMT - use binning and skipping for scaling, recalculate */
216 /* Is this more optimal than just a division? */
217 for (xskip = 8; xskip > 1; xskip--)
218 if (rect->width * xskip <= icd->width_max)
219 break;
220
221 for (yskip = 8; yskip > 1; yskip--)
222 if (rect->height * yskip <= icd->height_max)
223 break;
224
225 width = rect->width * xskip;
226 height = rect->height * yskip;
227
228 dev_dbg(&icd->dev, "xskip %u, width %u, yskip %u, height %u\n",
229 xskip, width, yskip, height);
230 }
231
232 xbin = min(xskip, (u16)3);
233 ybin = min(yskip, (u16)3);
234
235 /* Make sure we don't exceed frame limits */
236 if (rect->left + width > icd->width_max)
237 rect->left = (icd->width_max - width) / 2;
238
239 if (rect->top + height > icd->height_max)
240 rect->top = (icd->height_max - height) / 2;
241
242 /* Could just do roundup(rect->left, [xy]bin); but this is cheaper */
243 switch (xbin) {
244 case 2:
245 rect->left = (rect->left + 1) & ~1;
246 break;
247 case 3:
248 rect->left = roundup(rect->left, 3);
249 }
250
251 switch (ybin) {
252 case 2:
253 rect->top = (rect->top + 1) & ~1;
254 break;
255 case 3:
256 rect->top = roundup(rect->top, 3);
257 }
258
259 /* Blanking and start values - default... */
260 ret = reg_write(icd, MT9T031_HORIZONTAL_BLANKING, hblank);
261 if (ret >= 0)
262 ret = reg_write(icd, MT9T031_VERTICAL_BLANKING, vblank);
263
264 if (pixfmt) {
265 /* Binning, skipping */
266 if (ret >= 0)
267 ret = reg_write(icd, MT9T031_COLUMN_ADDRESS_MODE,
268 ((xbin - 1) << 4) | (xskip - 1));
269 if (ret >= 0)
270 ret = reg_write(icd, MT9T031_ROW_ADDRESS_MODE,
271 ((ybin - 1) << 4) | (yskip - 1));
272 }
273 dev_dbg(&icd->dev, "new left %u, top %u\n", rect->left, rect->top);
274
275 /* The caller provides a supported format, as guaranteed by
276 * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap() */
277 if (ret >= 0)
278 ret = reg_write(icd, MT9T031_COLUMN_START, rect->left);
279 if (ret >= 0)
280 ret = reg_write(icd, MT9T031_ROW_START, rect->top);
281 if (ret >= 0)
282 ret = reg_write(icd, MT9T031_WINDOW_WIDTH, width - 1);
283 if (ret >= 0)
284 ret = reg_write(icd, MT9T031_WINDOW_HEIGHT,
285 height + icd->y_skip_top - 1);
286 if (ret >= 0 && mt9t031->autoexposure) {
287 ret = set_shutter(icd, height + icd->y_skip_top + vblank);
288 if (ret >= 0) {
289 const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
290 const struct v4l2_queryctrl *qctrl =
291 soc_camera_find_qctrl(icd->ops,
292 V4L2_CID_EXPOSURE);
293 icd->exposure = (shutter_max / 2 + (height +
294 icd->y_skip_top + vblank - 1) *
295 (qctrl->maximum - qctrl->minimum)) /
296 shutter_max + qctrl->minimum;
297 }
298 }
299
300 if (!ret && pixfmt) {
301 mt9t031->xskip = xskip;
302 mt9t031->yskip = yskip;
303 }
304
305 return ret < 0 ? ret : 0;
306}
307
308static int mt9t031_try_fmt(struct soc_camera_device *icd,
309 struct v4l2_format *f)
310{
311 struct v4l2_pix_format *pix = &f->fmt.pix;
312
313 if (pix->height < icd->height_min)
314 pix->height = icd->height_min;
315 if (pix->height > icd->height_max)
316 pix->height = icd->height_max;
317 if (pix->width < icd->width_min)
318 pix->width = icd->width_min;
319 if (pix->width > icd->width_max)
320 pix->width = icd->width_max;
321
322 pix->width &= ~0x01; /* has to be even */
323 pix->height &= ~0x01; /* has to be even */
324
325 return 0;
326}
327
328static int mt9t031_get_chip_id(struct soc_camera_device *icd,
329 struct v4l2_chip_ident *id)
330{
331 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
332
333 if (id->match_type != V4L2_CHIP_MATCH_I2C_ADDR)
334 return -EINVAL;
335
336 if (id->match_chip != mt9t031->client->addr)
337 return -ENODEV;
338
339 id->ident = mt9t031->model;
340 id->revision = 0;
341
342 return 0;
343}
344
345#ifdef CONFIG_VIDEO_ADV_DEBUG
346static int mt9t031_get_register(struct soc_camera_device *icd,
347 struct v4l2_register *reg)
348{
349 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
350
351 if (reg->match_type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
352 return -EINVAL;
353
354 if (reg->match_chip != mt9t031->client->addr)
355 return -ENODEV;
356
357 reg->val = reg_read(icd, reg->reg);
358
359 if (reg->val > 0xffff)
360 return -EIO;
361
362 return 0;
363}
364
365static int mt9t031_set_register(struct soc_camera_device *icd,
366 struct v4l2_register *reg)
367{
368 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
369
370 if (reg->match_type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
371 return -EINVAL;
372
373 if (reg->match_chip != mt9t031->client->addr)
374 return -ENODEV;
375
376 if (reg_write(icd, reg->reg, reg->val) < 0)
377 return -EIO;
378
379 return 0;
380}
381#endif
382
383static const struct v4l2_queryctrl mt9t031_controls[] = {
384 {
385 .id = V4L2_CID_VFLIP,
386 .type = V4L2_CTRL_TYPE_BOOLEAN,
387 .name = "Flip Vertically",
388 .minimum = 0,
389 .maximum = 1,
390 .step = 1,
391 .default_value = 0,
392 }, {
393 .id = V4L2_CID_GAIN,
394 .type = V4L2_CTRL_TYPE_INTEGER,
395 .name = "Gain",
396 .minimum = 0,
397 .maximum = 127,
398 .step = 1,
399 .default_value = 64,
400 .flags = V4L2_CTRL_FLAG_SLIDER,
401 }, {
402 .id = V4L2_CID_EXPOSURE,
403 .type = V4L2_CTRL_TYPE_INTEGER,
404 .name = "Exposure",
405 .minimum = 1,
406 .maximum = 255,
407 .step = 1,
408 .default_value = 255,
409 .flags = V4L2_CTRL_FLAG_SLIDER,
410 }, {
411 .id = V4L2_CID_EXPOSURE_AUTO,
412 .type = V4L2_CTRL_TYPE_BOOLEAN,
413 .name = "Automatic Exposure",
414 .minimum = 0,
415 .maximum = 1,
416 .step = 1,
417 .default_value = 1,
418 }
419};
420
421static int mt9t031_video_probe(struct soc_camera_device *);
422static void mt9t031_video_remove(struct soc_camera_device *);
423static int mt9t031_get_control(struct soc_camera_device *, struct v4l2_control *);
424static int mt9t031_set_control(struct soc_camera_device *, struct v4l2_control *);
425
426static struct soc_camera_ops mt9t031_ops = {
427 .owner = THIS_MODULE,
428 .probe = mt9t031_video_probe,
429 .remove = mt9t031_video_remove,
430 .init = mt9t031_init,
431 .release = mt9t031_release,
432 .start_capture = mt9t031_start_capture,
433 .stop_capture = mt9t031_stop_capture,
434 .set_fmt = mt9t031_set_fmt,
435 .try_fmt = mt9t031_try_fmt,
436 .set_bus_param = mt9t031_set_bus_param,
437 .query_bus_param = mt9t031_query_bus_param,
438 .controls = mt9t031_controls,
439 .num_controls = ARRAY_SIZE(mt9t031_controls),
440 .get_control = mt9t031_get_control,
441 .set_control = mt9t031_set_control,
442 .get_chip_id = mt9t031_get_chip_id,
443#ifdef CONFIG_VIDEO_ADV_DEBUG
444 .get_register = mt9t031_get_register,
445 .set_register = mt9t031_set_register,
446#endif
447};
448
449static int mt9t031_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl)
450{
451 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
452 int data;
453
454 switch (ctrl->id) {
455 case V4L2_CID_VFLIP:
456 data = reg_read(icd, MT9T031_READ_MODE_2);
457 if (data < 0)
458 return -EIO;
459 ctrl->value = !!(data & 0x8000);
460 break;
461 case V4L2_CID_HFLIP:
462 data = reg_read(icd, MT9T031_READ_MODE_2);
463 if (data < 0)
464 return -EIO;
465 ctrl->value = !!(data & 0x4000);
466 break;
467 case V4L2_CID_EXPOSURE_AUTO:
468 ctrl->value = mt9t031->autoexposure;
469 break;
470 }
471 return 0;
472}
473
474static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl)
475{
476 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
477 const struct v4l2_queryctrl *qctrl;
478 int data;
479
480 qctrl = soc_camera_find_qctrl(&mt9t031_ops, ctrl->id);
481
482 if (!qctrl)
483 return -EINVAL;
484
485 switch (ctrl->id) {
486 case V4L2_CID_VFLIP:
487 if (ctrl->value)
488 data = reg_set(icd, MT9T031_READ_MODE_2, 0x8000);
489 else
490 data = reg_clear(icd, MT9T031_READ_MODE_2, 0x8000);
491 if (data < 0)
492 return -EIO;
493 break;
494 case V4L2_CID_HFLIP:
495 if (ctrl->value)
496 data = reg_set(icd, MT9T031_READ_MODE_2, 0x4000);
497 else
498 data = reg_clear(icd, MT9T031_READ_MODE_2, 0x4000);
499 if (data < 0)
500 return -EIO;
501 break;
502 case V4L2_CID_GAIN:
503 if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
504 return -EINVAL;
505 /* See Datasheet Table 7, Gain settings. */
506 if (ctrl->value <= qctrl->default_value) {
507 /* Pack it into 0..1 step 0.125, register values 0..8 */
508 unsigned long range = qctrl->default_value - qctrl->minimum;
509 data = ((ctrl->value - qctrl->minimum) * 8 + range / 2) / range;
510
511 dev_dbg(&icd->dev, "Setting gain %d\n", data);
512 data = reg_write(icd, MT9T031_GLOBAL_GAIN, data);
513 if (data < 0)
514 return -EIO;
515 } else {
516 /* Pack it into 1.125..15 variable step, register values 9..67 */
517 /* We assume qctrl->maximum - qctrl->default_value - 1 > 0 */
518 unsigned long range = qctrl->maximum - qctrl->default_value - 1;
519 unsigned long gain = ((ctrl->value - qctrl->default_value - 1) *
520 111 + range / 2) / range + 9;
521
522 if (gain <= 32)
523 data = gain;
524 else if (gain <= 64)
525 data = ((gain - 32) * 16 + 16) / 32 + 80;
526 else
527 data = ((gain - 64) * 7 + 28) / 56 + 96;
528
529 dev_dbg(&icd->dev, "Setting gain from %d to %d\n",
530 reg_read(icd, MT9T031_GLOBAL_GAIN), data);
531 data = reg_write(icd, MT9T031_GLOBAL_GAIN, data);
532 if (data < 0)
533 return -EIO;
534 }
535
536 /* Success */
537 icd->gain = ctrl->value;
538 break;
539 case V4L2_CID_EXPOSURE:
540 /* mt9t031 has maximum == default */
541 if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
542 return -EINVAL;
543 else {
544 const unsigned long range = qctrl->maximum - qctrl->minimum;
545 const u32 shutter = ((ctrl->value - qctrl->minimum) * 1048 +
546 range / 2) / range + 1;
547 u32 old;
548
549 get_shutter(icd, &old);
550 dev_dbg(&icd->dev, "Setting shutter width from %u to %u\n",
551 old, shutter);
552 if (set_shutter(icd, shutter) < 0)
553 return -EIO;
554 icd->exposure = ctrl->value;
555 mt9t031->autoexposure = 0;
556 }
557 break;
558 case V4L2_CID_EXPOSURE_AUTO:
559 if (ctrl->value) {
560 const u16 vblank = MT9T031_VERTICAL_BLANK;
561 const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
562 if (set_shutter(icd, icd->height +
563 icd->y_skip_top + vblank) < 0)
564 return -EIO;
565 qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE);
566 icd->exposure = (shutter_max / 2 + (icd->height +
567 icd->y_skip_top + vblank - 1) *
568 (qctrl->maximum - qctrl->minimum)) /
569 shutter_max + qctrl->minimum;
570 mt9t031->autoexposure = 1;
571 } else
572 mt9t031->autoexposure = 0;
573 break;
574 }
575 return 0;
576}
577
578/* Interface active, can use i2c. If it fails, it can indeed mean, that
579 * this wasn't our capture interface, so, we wait for the right one */
580static int mt9t031_video_probe(struct soc_camera_device *icd)
581{
582 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
583 s32 data;
584 int ret;
585
586 /* We must have a parent by now. And it cannot be a wrong one.
587 * So this entire test is completely redundant. */
588 if (!icd->dev.parent ||
589 to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
590 return -ENODEV;
591
592 /* Enable the chip */
593 data = reg_write(icd, MT9T031_CHIP_ENABLE, 1);
594 dev_dbg(&icd->dev, "write: %d\n", data);
595
596 /* Read out the chip version register */
597 data = reg_read(icd, MT9T031_CHIP_VERSION);
598
599 switch (data) {
600 case 0x1621:
601 mt9t031->model = V4L2_IDENT_MT9T031;
602 icd->formats = mt9t031_colour_formats;
603 icd->num_formats = ARRAY_SIZE(mt9t031_colour_formats);
604 break;
605 default:
606 ret = -ENODEV;
607 dev_err(&icd->dev,
608 "No MT9T031 chip detected, register read %x\n", data);
609 goto ei2c;
610 }
611
612 dev_info(&icd->dev, "Detected a MT9T031 chip ID %x\n", data);
613
614 /* Now that we know the model, we can start video */
615 ret = soc_camera_video_start(icd);
616 if (ret)
617 goto evstart;
618
619 return 0;
620
621evstart:
622ei2c:
623 return ret;
624}
625
626static void mt9t031_video_remove(struct soc_camera_device *icd)
627{
628 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
629
630 dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9t031->client->addr,
631 icd->dev.parent, icd->vdev);
632 soc_camera_video_stop(icd);
633}
634
635static int mt9t031_probe(struct i2c_client *client,
636 const struct i2c_device_id *did)
637{
638 struct mt9t031 *mt9t031;
639 struct soc_camera_device *icd;
640 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
641 struct soc_camera_link *icl = client->dev.platform_data;
642 int ret;
643
644 if (!icl) {
645 dev_err(&client->dev, "MT9T031 driver needs platform data\n");
646 return -EINVAL;
647 }
648
649 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
650 dev_warn(&adapter->dev,
651 "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
652 return -EIO;
653 }
654
655 mt9t031 = kzalloc(sizeof(struct mt9t031), GFP_KERNEL);
656 if (!mt9t031)
657 return -ENOMEM;
658
659 mt9t031->client = client;
660 i2c_set_clientdata(client, mt9t031);
661
662 /* Second stage probe - when a capture adapter is there */
663 icd = &mt9t031->icd;
664 icd->ops = &mt9t031_ops;
665 icd->control = &client->dev;
666 icd->x_min = MT9T031_COLUMN_SKIP;
667 icd->y_min = MT9T031_ROW_SKIP;
668 icd->x_current = icd->x_min;
669 icd->y_current = icd->y_min;
670 icd->width_min = MT9T031_MIN_WIDTH;
671 icd->width_max = MT9T031_MAX_WIDTH;
672 icd->height_min = MT9T031_MIN_HEIGHT;
673 icd->height_max = MT9T031_MAX_HEIGHT;
674 icd->y_skip_top = 0;
675 icd->iface = icl->bus_id;
676 /* Simulated autoexposure. If enabled, we calculate shutter width
677 * ourselves in the driver based on vertical blanking and frame width */
678 mt9t031->autoexposure = 1;
679
680 mt9t031->xskip = 1;
681 mt9t031->yskip = 1;
682
683 ret = soc_camera_device_register(icd);
684 if (ret)
685 goto eisdr;
686
687 return 0;
688
689eisdr:
690 i2c_set_clientdata(client, NULL);
691 kfree(mt9t031);
692 return ret;
693}
694
695static int mt9t031_remove(struct i2c_client *client)
696{
697 struct mt9t031 *mt9t031 = i2c_get_clientdata(client);
698
699 soc_camera_device_unregister(&mt9t031->icd);
700 i2c_set_clientdata(client, NULL);
701 kfree(mt9t031);
702
703 return 0;
704}
705
706static const struct i2c_device_id mt9t031_id[] = {
707 { "mt9t031", 0 },
708 { }
709};
710MODULE_DEVICE_TABLE(i2c, mt9t031_id);
711
712static struct i2c_driver mt9t031_i2c_driver = {
713 .driver = {
714 .name = "mt9t031",
715 },
716 .probe = mt9t031_probe,
717 .remove = mt9t031_remove,
718 .id_table = mt9t031_id,
719};
720
721static int __init mt9t031_mod_init(void)
722{
723 return i2c_add_driver(&mt9t031_i2c_driver);
724}
725
726static void __exit mt9t031_mod_exit(void)
727{
728 i2c_del_driver(&mt9t031_i2c_driver);
729}
730
731module_init(mt9t031_mod_init);
732module_exit(mt9t031_mod_exit);
733
734MODULE_DESCRIPTION("Micron MT9T031 Camera driver");
735MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
736MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index 2584201059d8..14a5f9c21ffa 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -273,6 +273,7 @@ static int mt9v022_set_bus_param(struct soc_camera_device *icd,
273 unsigned long flags) 273 unsigned long flags)
274{ 274{
275 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 275 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
276 struct soc_camera_link *icl = mt9v022->client->dev.platform_data;
276 unsigned int width_flag = flags & SOCAM_DATAWIDTH_MASK; 277 unsigned int width_flag = flags & SOCAM_DATAWIDTH_MASK;
277 int ret; 278 int ret;
278 u16 pixclk = 0; 279 u16 pixclk = 0;
@@ -296,6 +297,8 @@ static int mt9v022_set_bus_param(struct soc_camera_device *icd,
296 mt9v022->datawidth = width_flag == SOCAM_DATAWIDTH_8 ? 8 : 10; 297 mt9v022->datawidth = width_flag == SOCAM_DATAWIDTH_8 ? 8 : 10;
297 } 298 }
298 299
300 flags = soc_camera_apply_sensor_flags(icl, flags);
301
299 if (flags & SOCAM_PCLK_SAMPLE_RISING) 302 if (flags & SOCAM_PCLK_SAMPLE_RISING)
300 pixclk |= 0x10; 303 pixclk |= 0x10;
301 304
@@ -337,14 +340,14 @@ static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd)
337 width_flag; 340 width_flag;
338} 341}
339 342
340static int mt9v022_set_fmt_cap(struct soc_camera_device *icd, 343static int mt9v022_set_fmt(struct soc_camera_device *icd,
341 __u32 pixfmt, struct v4l2_rect *rect) 344 __u32 pixfmt, struct v4l2_rect *rect)
342{ 345{
343 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 346 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
344 int ret; 347 int ret;
345 348
346 /* The caller provides a supported format, as verified per call to 349 /* The caller provides a supported format, as verified per call to
347 * icd->try_fmt_cap(), datawidth is from our supported format list */ 350 * icd->try_fmt(), datawidth is from our supported format list */
348 switch (pixfmt) { 351 switch (pixfmt) {
349 case V4L2_PIX_FMT_GREY: 352 case V4L2_PIX_FMT_GREY:
350 case V4L2_PIX_FMT_Y16: 353 case V4L2_PIX_FMT_Y16:
@@ -400,18 +403,20 @@ static int mt9v022_set_fmt_cap(struct soc_camera_device *icd,
400 return 0; 403 return 0;
401} 404}
402 405
403static int mt9v022_try_fmt_cap(struct soc_camera_device *icd, 406static int mt9v022_try_fmt(struct soc_camera_device *icd,
404 struct v4l2_format *f) 407 struct v4l2_format *f)
405{ 408{
406 if (f->fmt.pix.height < 32 + icd->y_skip_top) 409 struct v4l2_pix_format *pix = &f->fmt.pix;
407 f->fmt.pix.height = 32 + icd->y_skip_top; 410
408 if (f->fmt.pix.height > 480 + icd->y_skip_top) 411 if (pix->height < 32 + icd->y_skip_top)
409 f->fmt.pix.height = 480 + icd->y_skip_top; 412 pix->height = 32 + icd->y_skip_top;
410 if (f->fmt.pix.width < 48) 413 if (pix->height > 480 + icd->y_skip_top)
411 f->fmt.pix.width = 48; 414 pix->height = 480 + icd->y_skip_top;
412 if (f->fmt.pix.width > 752) 415 if (pix->width < 48)
413 f->fmt.pix.width = 752; 416 pix->width = 48;
414 f->fmt.pix.width &= ~0x03; /* ? */ 417 if (pix->width > 752)
418 pix->width = 752;
419 pix->width &= ~0x03; /* ? */
415 420
416 return 0; 421 return 0;
417} 422}
@@ -538,8 +543,8 @@ static struct soc_camera_ops mt9v022_ops = {
538 .release = mt9v022_release, 543 .release = mt9v022_release,
539 .start_capture = mt9v022_start_capture, 544 .start_capture = mt9v022_start_capture,
540 .stop_capture = mt9v022_stop_capture, 545 .stop_capture = mt9v022_stop_capture,
541 .set_fmt_cap = mt9v022_set_fmt_cap, 546 .set_fmt = mt9v022_set_fmt,
542 .try_fmt_cap = mt9v022_try_fmt_cap, 547 .try_fmt = mt9v022_try_fmt,
543 .set_bus_param = mt9v022_set_bus_param, 548 .set_bus_param = mt9v022_set_bus_param,
544 .query_bus_param = mt9v022_query_bus_param, 549 .query_bus_param = mt9v022_query_bus_param,
545 .controls = mt9v022_controls, 550 .controls = mt9v022_controls,
@@ -690,6 +695,7 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
690static int mt9v022_video_probe(struct soc_camera_device *icd) 695static int mt9v022_video_probe(struct soc_camera_device *icd)
691{ 696{
692 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 697 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
698 struct soc_camera_link *icl = mt9v022->client->dev.platform_data;
693 s32 data; 699 s32 data;
694 int ret; 700 int ret;
695 701
@@ -725,7 +731,7 @@ static int mt9v022_video_probe(struct soc_camera_device *icd)
725 ret = reg_write(icd, MT9V022_PIXEL_OPERATION_MODE, 4 | 0x11); 731 ret = reg_write(icd, MT9V022_PIXEL_OPERATION_MODE, 4 | 0x11);
726 mt9v022->model = V4L2_IDENT_MT9V022IX7ATC; 732 mt9v022->model = V4L2_IDENT_MT9V022IX7ATC;
727 icd->formats = mt9v022_colour_formats; 733 icd->formats = mt9v022_colour_formats;
728 if (mt9v022->client->dev.platform_data) 734 if (gpio_is_valid(icl->gpio))
729 icd->num_formats = ARRAY_SIZE(mt9v022_colour_formats); 735 icd->num_formats = ARRAY_SIZE(mt9v022_colour_formats);
730 else 736 else
731 icd->num_formats = 1; 737 icd->num_formats = 1;
@@ -733,7 +739,7 @@ static int mt9v022_video_probe(struct soc_camera_device *icd)
733 ret = reg_write(icd, MT9V022_PIXEL_OPERATION_MODE, 0x11); 739 ret = reg_write(icd, MT9V022_PIXEL_OPERATION_MODE, 0x11);
734 mt9v022->model = V4L2_IDENT_MT9V022IX7ATM; 740 mt9v022->model = V4L2_IDENT_MT9V022IX7ATM;
735 icd->formats = mt9v022_monochrome_formats; 741 icd->formats = mt9v022_monochrome_formats;
736 if (mt9v022->client->dev.platform_data) 742 if (gpio_is_valid(icl->gpio))
737 icd->num_formats = ARRAY_SIZE(mt9v022_monochrome_formats); 743 icd->num_formats = ARRAY_SIZE(mt9v022_monochrome_formats);
738 else 744 else
739 icd->num_formats = 1; 745 icd->num_formats = 1;
@@ -760,8 +766,8 @@ static void mt9v022_video_remove(struct soc_camera_device *icd)
760 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 766 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
761 767
762 dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9v022->client->addr, 768 dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9v022->client->addr,
763 mt9v022->icd.dev.parent, mt9v022->icd.vdev); 769 icd->dev.parent, icd->vdev);
764 soc_camera_video_stop(&mt9v022->icd); 770 soc_camera_video_stop(icd);
765} 771}
766 772
767static int mt9v022_probe(struct i2c_client *client, 773static int mt9v022_probe(struct i2c_client *client,
diff --git a/drivers/media/video/omap24xxcam-dma.c b/drivers/media/video/omap24xxcam-dma.c
new file mode 100644
index 000000000000..1d54b86c936b
--- /dev/null
+++ b/drivers/media/video/omap24xxcam-dma.c
@@ -0,0 +1,601 @@
1/*
2 * drivers/media/video/omap24xxcam-dma.c
3 *
4 * Copyright (C) 2004 MontaVista Software, Inc.
5 * Copyright (C) 2004 Texas Instruments.
6 * Copyright (C) 2007 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@nokia.com>
9 *
10 * Based on code from Andy Lowe <source@mvista.com> and
11 * David Cohen <david.cohen@indt.org.br>.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * version 2 as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 * 02110-1301 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/io.h>
30#include <linux/scatterlist.h>
31
32#include "omap24xxcam.h"
33
34/*
35 *
36 * DMA hardware.
37 *
38 */
39
40/* Ack all interrupt on CSR and IRQSTATUS_L0 */
41static void omap24xxcam_dmahw_ack_all(unsigned long base)
42{
43 u32 csr;
44 int i;
45
46 for (i = 0; i < NUM_CAMDMA_CHANNELS; ++i) {
47 csr = omap24xxcam_reg_in(base, CAMDMA_CSR(i));
48 /* ack interrupt in CSR */
49 omap24xxcam_reg_out(base, CAMDMA_CSR(i), csr);
50 }
51 omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, 0xf);
52}
53
54/* Ack dmach on CSR and IRQSTATUS_L0 */
55static u32 omap24xxcam_dmahw_ack_ch(unsigned long base, int dmach)
56{
57 u32 csr;
58
59 csr = omap24xxcam_reg_in(base, CAMDMA_CSR(dmach));
60 /* ack interrupt in CSR */
61 omap24xxcam_reg_out(base, CAMDMA_CSR(dmach), csr);
62 /* ack interrupt in IRQSTATUS */
63 omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, (1 << dmach));
64
65 return csr;
66}
67
68static int omap24xxcam_dmahw_running(unsigned long base, int dmach)
69{
70 return omap24xxcam_reg_in(base, CAMDMA_CCR(dmach)) & CAMDMA_CCR_ENABLE;
71}
72
73static void omap24xxcam_dmahw_transfer_setup(unsigned long base, int dmach,
74 dma_addr_t start, u32 len)
75{
76 omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
77 CAMDMA_CCR_SEL_SRC_DST_SYNC
78 | CAMDMA_CCR_BS
79 | CAMDMA_CCR_DST_AMODE_POST_INC
80 | CAMDMA_CCR_SRC_AMODE_POST_INC
81 | CAMDMA_CCR_FS
82 | CAMDMA_CCR_WR_ACTIVE
83 | CAMDMA_CCR_RD_ACTIVE
84 | CAMDMA_CCR_SYNCHRO_CAMERA);
85 omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(dmach), 0);
86 omap24xxcam_reg_out(base, CAMDMA_CEN(dmach), len);
87 omap24xxcam_reg_out(base, CAMDMA_CFN(dmach), 1);
88 omap24xxcam_reg_out(base, CAMDMA_CSDP(dmach),
89 CAMDMA_CSDP_WRITE_MODE_POSTED
90 | CAMDMA_CSDP_DST_BURST_EN_32
91 | CAMDMA_CSDP_DST_PACKED
92 | CAMDMA_CSDP_SRC_BURST_EN_32
93 | CAMDMA_CSDP_SRC_PACKED
94 | CAMDMA_CSDP_DATA_TYPE_8BITS);
95 omap24xxcam_reg_out(base, CAMDMA_CSSA(dmach), 0);
96 omap24xxcam_reg_out(base, CAMDMA_CDSA(dmach), start);
97 omap24xxcam_reg_out(base, CAMDMA_CSEI(dmach), 0);
98 omap24xxcam_reg_out(base, CAMDMA_CSFI(dmach), DMA_THRESHOLD);
99 omap24xxcam_reg_out(base, CAMDMA_CDEI(dmach), 0);
100 omap24xxcam_reg_out(base, CAMDMA_CDFI(dmach), 0);
101 omap24xxcam_reg_out(base, CAMDMA_CSR(dmach),
102 CAMDMA_CSR_MISALIGNED_ERR
103 | CAMDMA_CSR_SECURE_ERR
104 | CAMDMA_CSR_TRANS_ERR
105 | CAMDMA_CSR_BLOCK
106 | CAMDMA_CSR_DROP);
107 omap24xxcam_reg_out(base, CAMDMA_CICR(dmach),
108 CAMDMA_CICR_MISALIGNED_ERR_IE
109 | CAMDMA_CICR_SECURE_ERR_IE
110 | CAMDMA_CICR_TRANS_ERR_IE
111 | CAMDMA_CICR_BLOCK_IE
112 | CAMDMA_CICR_DROP_IE);
113}
114
115static void omap24xxcam_dmahw_transfer_start(unsigned long base, int dmach)
116{
117 omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
118 CAMDMA_CCR_SEL_SRC_DST_SYNC
119 | CAMDMA_CCR_BS
120 | CAMDMA_CCR_DST_AMODE_POST_INC
121 | CAMDMA_CCR_SRC_AMODE_POST_INC
122 | CAMDMA_CCR_ENABLE
123 | CAMDMA_CCR_FS
124 | CAMDMA_CCR_SYNCHRO_CAMERA);
125}
126
127static void omap24xxcam_dmahw_transfer_chain(unsigned long base, int dmach,
128 int free_dmach)
129{
130 int prev_dmach, ch;
131
132 if (dmach == 0)
133 prev_dmach = NUM_CAMDMA_CHANNELS - 1;
134 else
135 prev_dmach = dmach - 1;
136 omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(prev_dmach),
137 CAMDMA_CLNK_CTRL_ENABLE_LNK | dmach);
138 /* Did we chain the DMA transfer before the previous one
139 * finished?
140 */
141 ch = (dmach + free_dmach) % NUM_CAMDMA_CHANNELS;
142 while (!(omap24xxcam_reg_in(base, CAMDMA_CCR(ch))
143 & CAMDMA_CCR_ENABLE)) {
144 if (ch == dmach) {
145 /* The previous transfer has ended and this one
146 * hasn't started, so we must not have chained
147 * to the previous one in time. We'll have to
148 * start it now.
149 */
150 omap24xxcam_dmahw_transfer_start(base, dmach);
151 break;
152 } else
153 ch = (ch + 1) % NUM_CAMDMA_CHANNELS;
154 }
155}
156
157/* Abort all chained DMA transfers. After all transfers have been
158 * aborted and the DMA controller is idle, the completion routines for
159 * any aborted transfers will be called in sequence. The DMA
160 * controller may not be idle after this routine completes, because
161 * the completion routines might start new transfers.
162 */
163static void omap24xxcam_dmahw_abort_ch(unsigned long base, int dmach)
164{
165 /* mask all interrupts from this channel */
166 omap24xxcam_reg_out(base, CAMDMA_CICR(dmach), 0);
167 /* unlink this channel */
168 omap24xxcam_reg_merge(base, CAMDMA_CLNK_CTRL(dmach), 0,
169 CAMDMA_CLNK_CTRL_ENABLE_LNK);
170 /* disable this channel */
171 omap24xxcam_reg_merge(base, CAMDMA_CCR(dmach), 0, CAMDMA_CCR_ENABLE);
172}
173
174static void omap24xxcam_dmahw_init(unsigned long base)
175{
176 omap24xxcam_reg_out(base, CAMDMA_OCP_SYSCONFIG,
177 CAMDMA_OCP_SYSCONFIG_MIDLEMODE_FSTANDBY
178 | CAMDMA_OCP_SYSCONFIG_SIDLEMODE_FIDLE
179 | CAMDMA_OCP_SYSCONFIG_AUTOIDLE);
180
181 omap24xxcam_reg_merge(base, CAMDMA_GCR, 0x10,
182 CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH);
183
184 omap24xxcam_reg_out(base, CAMDMA_IRQENABLE_L0, 0xf);
185}
186
187/*
188 *
189 * Individual DMA channel handling.
190 *
191 */
192
193/* Start a DMA transfer from the camera to memory.
194 * Returns zero if the transfer was successfully started, or non-zero if all
195 * DMA channels are already in use or starting is currently inhibited.
196 */
197static int omap24xxcam_dma_start(struct omap24xxcam_dma *dma, dma_addr_t start,
198 u32 len, dma_callback_t callback, void *arg)
199{
200 unsigned long flags;
201 int dmach;
202
203 spin_lock_irqsave(&dma->lock, flags);
204
205 if (!dma->free_dmach || atomic_read(&dma->dma_stop)) {
206 spin_unlock_irqrestore(&dma->lock, flags);
207 return -EBUSY;
208 }
209
210 dmach = dma->next_dmach;
211
212 dma->ch_state[dmach].callback = callback;
213 dma->ch_state[dmach].arg = arg;
214
215 omap24xxcam_dmahw_transfer_setup(dma->base, dmach, start, len);
216
217 /* We're ready to start the DMA transfer. */
218
219 if (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
220 /* A transfer is already in progress, so try to chain to it. */
221 omap24xxcam_dmahw_transfer_chain(dma->base, dmach,
222 dma->free_dmach);
223 } else {
224 /* No transfer is in progress, so we'll just start this one
225 * now.
226 */
227 omap24xxcam_dmahw_transfer_start(dma->base, dmach);
228 }
229
230 dma->next_dmach = (dma->next_dmach + 1) % NUM_CAMDMA_CHANNELS;
231 dma->free_dmach--;
232
233 spin_unlock_irqrestore(&dma->lock, flags);
234
235 return 0;
236}
237
238/* Abort all chained DMA transfers. After all transfers have been
239 * aborted and the DMA controller is idle, the completion routines for
240 * any aborted transfers will be called in sequence. The DMA
241 * controller may not be idle after this routine completes, because
242 * the completion routines might start new transfers.
243 */
244static void omap24xxcam_dma_abort(struct omap24xxcam_dma *dma, u32 csr)
245{
246 unsigned long flags;
247 int dmach, i, free_dmach;
248 dma_callback_t callback;
249 void *arg;
250
251 spin_lock_irqsave(&dma->lock, flags);
252
253 /* stop any DMA transfers in progress */
254 dmach = (dma->next_dmach + dma->free_dmach) % NUM_CAMDMA_CHANNELS;
255 for (i = 0; i < NUM_CAMDMA_CHANNELS; i++) {
256 omap24xxcam_dmahw_abort_ch(dma->base, dmach);
257 dmach = (dmach + 1) % NUM_CAMDMA_CHANNELS;
258 }
259
260 /* We have to be careful here because the callback routine
261 * might start a new DMA transfer, and we only want to abort
262 * transfers that were started before this routine was called.
263 */
264 free_dmach = dma->free_dmach;
265 while ((dma->free_dmach < NUM_CAMDMA_CHANNELS) &&
266 (free_dmach < NUM_CAMDMA_CHANNELS)) {
267 dmach = (dma->next_dmach + dma->free_dmach)
268 % NUM_CAMDMA_CHANNELS;
269 callback = dma->ch_state[dmach].callback;
270 arg = dma->ch_state[dmach].arg;
271 dma->free_dmach++;
272 free_dmach++;
273 if (callback) {
274 /* leave interrupts disabled during callback */
275 spin_unlock(&dma->lock);
276 (*callback) (dma, csr, arg);
277 spin_lock(&dma->lock);
278 }
279 }
280
281 spin_unlock_irqrestore(&dma->lock, flags);
282}
283
284/* Abort all chained DMA transfers. After all transfers have been
285 * aborted and the DMA controller is idle, the completion routines for
286 * any aborted transfers will be called in sequence. If the completion
287 * routines attempt to start a new DMA transfer it will fail, so the
288 * DMA controller will be idle after this routine completes.
289 */
290static void omap24xxcam_dma_stop(struct omap24xxcam_dma *dma, u32 csr)
291{
292 atomic_inc(&dma->dma_stop);
293 omap24xxcam_dma_abort(dma, csr);
294 atomic_dec(&dma->dma_stop);
295}
296
297/* Camera DMA interrupt service routine. */
298void omap24xxcam_dma_isr(struct omap24xxcam_dma *dma)
299{
300 int dmach;
301 dma_callback_t callback;
302 void *arg;
303 u32 csr;
304 const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
305 | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
306 | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
307
308 spin_lock(&dma->lock);
309
310 if (dma->free_dmach == NUM_CAMDMA_CHANNELS) {
311 /* A camera DMA interrupt occurred while all channels
312 * are idle, so we'll acknowledge the interrupt in the
313 * IRQSTATUS register and exit.
314 */
315 omap24xxcam_dmahw_ack_all(dma->base);
316 spin_unlock(&dma->lock);
317 return;
318 }
319
320 while (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
321 dmach = (dma->next_dmach + dma->free_dmach)
322 % NUM_CAMDMA_CHANNELS;
323 if (omap24xxcam_dmahw_running(dma->base, dmach)) {
324 /* This buffer hasn't finished yet, so we're done. */
325 break;
326 }
327 csr = omap24xxcam_dmahw_ack_ch(dma->base, dmach);
328 if (csr & csr_error) {
329 /* A DMA error occurred, so stop all DMA
330 * transfers in progress.
331 */
332 spin_unlock(&dma->lock);
333 omap24xxcam_dma_stop(dma, csr);
334 return;
335 } else {
336 callback = dma->ch_state[dmach].callback;
337 arg = dma->ch_state[dmach].arg;
338 dma->free_dmach++;
339 if (callback) {
340 spin_unlock(&dma->lock);
341 (*callback) (dma, csr, arg);
342 spin_lock(&dma->lock);
343 }
344 }
345 }
346
347 spin_unlock(&dma->lock);
348
349 omap24xxcam_sgdma_process(
350 container_of(dma, struct omap24xxcam_sgdma, dma));
351}
352
353void omap24xxcam_dma_hwinit(struct omap24xxcam_dma *dma)
354{
355 unsigned long flags;
356
357 spin_lock_irqsave(&dma->lock, flags);
358
359 omap24xxcam_dmahw_init(dma->base);
360
361 spin_unlock_irqrestore(&dma->lock, flags);
362}
363
364static void omap24xxcam_dma_init(struct omap24xxcam_dma *dma,
365 unsigned long base)
366{
367 int ch;
368
369 /* group all channels on DMA IRQ0 and unmask irq */
370 spin_lock_init(&dma->lock);
371 dma->base = base;
372 dma->free_dmach = NUM_CAMDMA_CHANNELS;
373 dma->next_dmach = 0;
374 for (ch = 0; ch < NUM_CAMDMA_CHANNELS; ch++) {
375 dma->ch_state[ch].callback = NULL;
376 dma->ch_state[ch].arg = NULL;
377 }
378}
379
380/*
381 *
382 * Scatter-gather DMA.
383 *
384 * High-level DMA construct for transferring whole picture frames to
385 * memory that is discontinuous.
386 *
387 */
388
389/* DMA completion routine for the scatter-gather DMA fragments. */
390static void omap24xxcam_sgdma_callback(struct omap24xxcam_dma *dma, u32 csr,
391 void *arg)
392{
393 struct omap24xxcam_sgdma *sgdma =
394 container_of(dma, struct omap24xxcam_sgdma, dma);
395 int sgslot = (int)arg;
396 struct sgdma_state *sg_state;
397 const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
398 | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
399 | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
400
401 spin_lock(&sgdma->lock);
402
403 /* We got an interrupt, we can remove the timer */
404 del_timer(&sgdma->reset_timer);
405
406 sg_state = sgdma->sg_state + sgslot;
407 if (!sg_state->queued_sglist) {
408 spin_unlock(&sgdma->lock);
409 printk(KERN_ERR "%s: sgdma completed when none queued!\n",
410 __func__);
411 return;
412 }
413
414 sg_state->csr |= csr;
415 if (!--sg_state->queued_sglist) {
416 /* Queue for this sglist is empty, so check to see if we're
417 * done.
418 */
419 if ((sg_state->next_sglist == sg_state->sglen)
420 || (sg_state->csr & csr_error)) {
421 sgdma_callback_t callback = sg_state->callback;
422 void *arg = sg_state->arg;
423 u32 sg_csr = sg_state->csr;
424 /* All done with this sglist */
425 sgdma->free_sgdma++;
426 if (callback) {
427 spin_unlock(&sgdma->lock);
428 (*callback) (sgdma, sg_csr, arg);
429 return;
430 }
431 }
432 }
433
434 spin_unlock(&sgdma->lock);
435}
436
437/* Start queued scatter-gather DMA transfers. */
438void omap24xxcam_sgdma_process(struct omap24xxcam_sgdma *sgdma)
439{
440 unsigned long flags;
441 int queued_sgdma, sgslot;
442 struct sgdma_state *sg_state;
443 const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
444 | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
445 | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
446
447 spin_lock_irqsave(&sgdma->lock, flags);
448
449 queued_sgdma = NUM_SG_DMA - sgdma->free_sgdma;
450 sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
451 while (queued_sgdma > 0) {
452 sg_state = sgdma->sg_state + sgslot;
453 while ((sg_state->next_sglist < sg_state->sglen) &&
454 !(sg_state->csr & csr_error)) {
455 const struct scatterlist *sglist;
456 unsigned int len;
457
458 sglist = sg_state->sglist + sg_state->next_sglist;
459 /* try to start the next DMA transfer */
460 if (sg_state->next_sglist + 1 == sg_state->sglen) {
461 /*
462 * On the last sg, we handle the case where
463 * cam->img.pix.sizeimage % PAGE_ALIGN != 0
464 */
465 len = sg_state->len - sg_state->bytes_read;
466 } else {
467 len = sg_dma_len(sglist);
468 }
469
470 if (omap24xxcam_dma_start(&sgdma->dma,
471 sg_dma_address(sglist),
472 len,
473 omap24xxcam_sgdma_callback,
474 (void *)sgslot)) {
475 /* DMA start failed */
476 spin_unlock_irqrestore(&sgdma->lock, flags);
477 return;
478 } else {
479 unsigned long expires;
480 /* DMA start was successful */
481 sg_state->next_sglist++;
482 sg_state->bytes_read += len;
483 sg_state->queued_sglist++;
484
485 /* We start the reset timer */
486 expires = jiffies + HZ;
487 mod_timer(&sgdma->reset_timer, expires);
488 }
489 }
490 queued_sgdma--;
491 sgslot = (sgslot + 1) % NUM_SG_DMA;
492 }
493
494 spin_unlock_irqrestore(&sgdma->lock, flags);
495}
496
497/*
498 * Queue a scatter-gather DMA transfer from the camera to memory.
499 * Returns zero if the transfer was successfully queued, or non-zero
500 * if all of the scatter-gather slots are already in use.
501 */
502int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
503 const struct scatterlist *sglist, int sglen,
504 int len, sgdma_callback_t callback, void *arg)
505{
506 unsigned long flags;
507 struct sgdma_state *sg_state;
508
509 if ((sglen < 0) || ((sglen > 0) & !sglist))
510 return -EINVAL;
511
512 spin_lock_irqsave(&sgdma->lock, flags);
513
514 if (!sgdma->free_sgdma) {
515 spin_unlock_irqrestore(&sgdma->lock, flags);
516 return -EBUSY;
517 }
518
519 sg_state = sgdma->sg_state + sgdma->next_sgdma;
520
521 sg_state->sglist = sglist;
522 sg_state->sglen = sglen;
523 sg_state->next_sglist = 0;
524 sg_state->bytes_read = 0;
525 sg_state->len = len;
526 sg_state->queued_sglist = 0;
527 sg_state->csr = 0;
528 sg_state->callback = callback;
529 sg_state->arg = arg;
530
531 sgdma->next_sgdma = (sgdma->next_sgdma + 1) % NUM_SG_DMA;
532 sgdma->free_sgdma--;
533
534 spin_unlock_irqrestore(&sgdma->lock, flags);
535
536 omap24xxcam_sgdma_process(sgdma);
537
538 return 0;
539}
540
541/* Sync scatter-gather DMA by aborting any DMA transfers currently in progress.
542 * Any queued scatter-gather DMA transactions that have not yet been started
543 * will remain queued. The DMA controller will be idle after this routine
544 * completes. When the scatter-gather queue is restarted, the next
545 * scatter-gather DMA transfer will begin at the start of a new transaction.
546 */
547void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma)
548{
549 unsigned long flags;
550 int sgslot;
551 struct sgdma_state *sg_state;
552 u32 csr = CAMDMA_CSR_TRANS_ERR;
553
554 /* stop any DMA transfers in progress */
555 omap24xxcam_dma_stop(&sgdma->dma, csr);
556
557 spin_lock_irqsave(&sgdma->lock, flags);
558
559 if (sgdma->free_sgdma < NUM_SG_DMA) {
560 sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
561 sg_state = sgdma->sg_state + sgslot;
562 if (sg_state->next_sglist != 0) {
563 /* This DMA transfer was in progress, so abort it. */
564 sgdma_callback_t callback = sg_state->callback;
565 void *arg = sg_state->arg;
566 sgdma->free_sgdma++;
567 if (callback) {
568 /* leave interrupts masked */
569 spin_unlock(&sgdma->lock);
570 (*callback) (sgdma, csr, arg);
571 spin_lock(&sgdma->lock);
572 }
573 }
574 }
575
576 spin_unlock_irqrestore(&sgdma->lock, flags);
577}
578
579void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
580 unsigned long base,
581 void (*reset_callback)(unsigned long data),
582 unsigned long reset_callback_data)
583{
584 int sg;
585
586 spin_lock_init(&sgdma->lock);
587 sgdma->free_sgdma = NUM_SG_DMA;
588 sgdma->next_sgdma = 0;
589 for (sg = 0; sg < NUM_SG_DMA; sg++) {
590 sgdma->sg_state[sg].sglen = 0;
591 sgdma->sg_state[sg].next_sglist = 0;
592 sgdma->sg_state[sg].bytes_read = 0;
593 sgdma->sg_state[sg].queued_sglist = 0;
594 sgdma->sg_state[sg].csr = 0;
595 sgdma->sg_state[sg].callback = NULL;
596 sgdma->sg_state[sg].arg = NULL;
597 }
598
599 omap24xxcam_dma_init(&sgdma->dma, base);
600 setup_timer(&sgdma->reset_timer, reset_callback, reset_callback_data);
601}
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
new file mode 100644
index 000000000000..85c3c7c92af1
--- /dev/null
+++ b/drivers/media/video/omap24xxcam.c
@@ -0,0 +1,1908 @@
1/*
2 * drivers/media/video/omap24xxcam.c
3 *
4 * OMAP 2 camera block driver.
5 *
6 * Copyright (C) 2004 MontaVista Software, Inc.
7 * Copyright (C) 2004 Texas Instruments.
8 * Copyright (C) 2007-2008 Nokia Corporation.
9 *
10 * Contact: Sakari Ailus <sakari.ailus@nokia.com>
11 *
12 * Based on code from Andy Lowe <source@mvista.com>
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * version 2 as published by the Free Software Foundation.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26 * 02110-1301 USA
27 */
28
29#include <linux/delay.h>
30#include <linux/kernel.h>
31#include <linux/interrupt.h>
32#include <linux/videodev2.h>
33#include <linux/pci.h> /* needed for videobufs */
34#include <linux/version.h>
35#include <linux/platform_device.h>
36#include <linux/clk.h>
37#include <linux/io.h>
38
39#include <media/v4l2-common.h>
40#include <media/v4l2-ioctl.h>
41
42#include "omap24xxcam.h"
43
44#define OMAP24XXCAM_VERSION KERNEL_VERSION(0, 0, 0)
45
46#define RESET_TIMEOUT_NS 10000
47
48static void omap24xxcam_reset(struct omap24xxcam_device *cam);
49static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam);
50static void omap24xxcam_device_unregister(struct v4l2_int_device *s);
51static int omap24xxcam_remove(struct platform_device *pdev);
52
53/* module parameters */
54static int video_nr = -1; /* video device minor (-1 ==> auto assign) */
55/*
56 * Maximum amount of memory to use for capture buffers.
57 * Default is 4800KB, enough to double-buffer SXGA.
58 */
59static int capture_mem = 1280 * 960 * 2 * 2;
60
61static struct v4l2_int_device omap24xxcam;
62
63/*
64 *
65 * Clocks.
66 *
67 */
68
69static void omap24xxcam_clock_put(struct omap24xxcam_device *cam)
70{
71 if (cam->ick != NULL && !IS_ERR(cam->ick))
72 clk_put(cam->ick);
73 if (cam->fck != NULL && !IS_ERR(cam->fck))
74 clk_put(cam->fck);
75
76 cam->ick = cam->fck = NULL;
77}
78
79static int omap24xxcam_clock_get(struct omap24xxcam_device *cam)
80{
81 int rval = 0;
82
83 cam->fck = clk_get(cam->dev, "cam_fck");
84 if (IS_ERR(cam->fck)) {
85 dev_err(cam->dev, "can't get cam_fck");
86 rval = PTR_ERR(cam->fck);
87 omap24xxcam_clock_put(cam);
88 return rval;
89 }
90
91 cam->ick = clk_get(cam->dev, "cam_ick");
92 if (IS_ERR(cam->ick)) {
93 dev_err(cam->dev, "can't get cam_ick");
94 rval = PTR_ERR(cam->ick);
95 omap24xxcam_clock_put(cam);
96 }
97
98 return rval;
99}
100
101static void omap24xxcam_clock_on(struct omap24xxcam_device *cam)
102{
103 clk_enable(cam->fck);
104 clk_enable(cam->ick);
105}
106
107static void omap24xxcam_clock_off(struct omap24xxcam_device *cam)
108{
109 clk_disable(cam->fck);
110 clk_disable(cam->ick);
111}
112
113/*
114 *
115 * Camera core
116 *
117 */
118
119/*
120 * Set xclk.
121 *
122 * To disable xclk, use value zero.
123 */
124static void omap24xxcam_core_xclk_set(const struct omap24xxcam_device *cam,
125 u32 xclk)
126{
127 if (xclk) {
128 u32 divisor = CAM_MCLK / xclk;
129
130 if (divisor == 1)
131 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
132 CC_CTRL_XCLK,
133 CC_CTRL_XCLK_DIV_BYPASS);
134 else
135 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
136 CC_CTRL_XCLK, divisor);
137 } else
138 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
139 CC_CTRL_XCLK, CC_CTRL_XCLK_DIV_STABLE_LOW);
140}
141
142static void omap24xxcam_core_hwinit(const struct omap24xxcam_device *cam)
143{
144 /*
145 * Setting the camera core AUTOIDLE bit causes problems with frame
146 * synchronization, so we will clear the AUTOIDLE bit instead.
147 */
148 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_SYSCONFIG,
149 CC_SYSCONFIG_AUTOIDLE);
150
151 /* program the camera interface DMA packet size */
152 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL_DMA,
153 CC_CTRL_DMA_EN | (DMA_THRESHOLD / 4 - 1));
154
155 /* enable camera core error interrupts */
156 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQENABLE,
157 CC_IRQENABLE_FW_ERR_IRQ
158 | CC_IRQENABLE_FSC_ERR_IRQ
159 | CC_IRQENABLE_SSC_ERR_IRQ
160 | CC_IRQENABLE_FIFO_OF_IRQ);
161}
162
163/*
164 * Enable the camera core.
165 *
166 * Data transfer to the camera DMA starts from next starting frame.
167 */
168static void omap24xxcam_core_enable(const struct omap24xxcam_device *cam)
169{
170
171 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL,
172 cam->cc_ctrl);
173}
174
175/*
176 * Disable camera core.
177 *
178 * The data transfer will be stopped immediately (CC_CTRL_CC_RST). The
179 * core internal state machines will be reset. Use
180 * CC_CTRL_CC_FRAME_TRIG instead if you want to transfer the current
181 * frame completely.
182 */
183static void omap24xxcam_core_disable(const struct omap24xxcam_device *cam)
184{
185 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL,
186 CC_CTRL_CC_RST);
187}
188
189/* Interrupt service routine for camera core interrupts. */
190static void omap24xxcam_core_isr(struct omap24xxcam_device *cam)
191{
192 u32 cc_irqstatus;
193 const u32 cc_irqstatus_err =
194 CC_IRQSTATUS_FW_ERR_IRQ
195 | CC_IRQSTATUS_FSC_ERR_IRQ
196 | CC_IRQSTATUS_SSC_ERR_IRQ
197 | CC_IRQSTATUS_FIFO_UF_IRQ
198 | CC_IRQSTATUS_FIFO_OF_IRQ;
199
200 cc_irqstatus = omap24xxcam_reg_in(cam->mmio_base + CC_REG_OFFSET,
201 CC_IRQSTATUS);
202 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQSTATUS,
203 cc_irqstatus);
204
205 if (cc_irqstatus & cc_irqstatus_err
206 && !atomic_read(&cam->in_reset)) {
207 dev_dbg(cam->dev, "resetting camera, cc_irqstatus 0x%x\n",
208 cc_irqstatus);
209 omap24xxcam_reset(cam);
210 }
211}
212
213/*
214 *
215 * videobuf_buffer handling.
216 *
217 * Memory for mmapped videobuf_buffers is not allocated
218 * conventionally, but by several kmalloc allocations and then
219 * creating the scatterlist on our own. User-space buffers are handled
220 * normally.
221 *
222 */
223
224/*
225 * Free the memory-mapped buffer memory allocated for a
226 * videobuf_buffer and the associated scatterlist.
227 */
228static void omap24xxcam_vbq_free_mmap_buffer(struct videobuf_buffer *vb)
229{
230 struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
231 size_t alloc_size;
232 struct page *page;
233 int i;
234
235 if (dma->sglist == NULL)
236 return;
237
238 i = dma->sglen;
239 while (i) {
240 i--;
241 alloc_size = sg_dma_len(&dma->sglist[i]);
242 page = sg_page(&dma->sglist[i]);
243 do {
244 ClearPageReserved(page++);
245 } while (alloc_size -= PAGE_SIZE);
246 __free_pages(sg_page(&dma->sglist[i]),
247 get_order(sg_dma_len(&dma->sglist[i])));
248 }
249
250 kfree(dma->sglist);
251 dma->sglist = NULL;
252}
253
254/* Release all memory related to the videobuf_queue. */
255static void omap24xxcam_vbq_free_mmap_buffers(struct videobuf_queue *vbq)
256{
257 int i;
258
259 mutex_lock(&vbq->vb_lock);
260
261 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
262 if (NULL == vbq->bufs[i])
263 continue;
264 if (V4L2_MEMORY_MMAP != vbq->bufs[i]->memory)
265 continue;
266 vbq->ops->buf_release(vbq, vbq->bufs[i]);
267 omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]);
268 kfree(vbq->bufs[i]);
269 vbq->bufs[i] = NULL;
270 }
271
272 mutex_unlock(&vbq->vb_lock);
273
274 videobuf_mmap_free(vbq);
275}
276
277/*
278 * Allocate physically as contiguous as possible buffer for video
279 * frame and allocate and build DMA scatter-gather list for it.
280 */
281static int omap24xxcam_vbq_alloc_mmap_buffer(struct videobuf_buffer *vb)
282{
283 unsigned int order;
284 size_t alloc_size, size = vb->bsize; /* vb->bsize is page aligned */
285 struct page *page;
286 int max_pages, err = 0, i = 0;
287 struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
288
289 /*
290 * allocate maximum size scatter-gather list. Note this is
291 * overhead. We may not use as many entries as we allocate
292 */
293 max_pages = vb->bsize >> PAGE_SHIFT;
294 dma->sglist = kcalloc(max_pages, sizeof(*dma->sglist), GFP_KERNEL);
295 if (dma->sglist == NULL) {
296 err = -ENOMEM;
297 goto out;
298 }
299
300 while (size) {
301 order = get_order(size);
302 /*
303 * do not over-allocate even if we would get larger
304 * contiguous chunk that way
305 */
306 if ((PAGE_SIZE << order) > size)
307 order--;
308
309 /* try to allocate as many contiguous pages as possible */
310 page = alloc_pages(GFP_KERNEL | GFP_DMA, order);
311 /* if allocation fails, try to allocate smaller amount */
312 while (page == NULL) {
313 order--;
314 page = alloc_pages(GFP_KERNEL | GFP_DMA, order);
315 if (page == NULL && !order) {
316 err = -ENOMEM;
317 goto out;
318 }
319 }
320 size -= (PAGE_SIZE << order);
321
322 /* append allocated chunk of pages into scatter-gather list */
323 sg_set_page(&dma->sglist[i], page, PAGE_SIZE << order, 0);
324 dma->sglen++;
325 i++;
326
327 alloc_size = (PAGE_SIZE << order);
328
329 /* clear pages before giving them to user space */
330 memset(page_address(page), 0, alloc_size);
331
332 /* mark allocated pages reserved */
333 do {
334 SetPageReserved(page++);
335 } while (alloc_size -= PAGE_SIZE);
336 }
337 /*
338 * REVISIT: not fully correct to assign nr_pages == sglen but
339 * video-buf is passing nr_pages for e.g. unmap_sg calls
340 */
341 dma->nr_pages = dma->sglen;
342 dma->direction = PCI_DMA_FROMDEVICE;
343
344 return 0;
345
346out:
347 omap24xxcam_vbq_free_mmap_buffer(vb);
348 return err;
349}
350
351static int omap24xxcam_vbq_alloc_mmap_buffers(struct videobuf_queue *vbq,
352 unsigned int count)
353{
354 int i, err = 0;
355 struct omap24xxcam_fh *fh =
356 container_of(vbq, struct omap24xxcam_fh, vbq);
357
358 mutex_lock(&vbq->vb_lock);
359
360 for (i = 0; i < count; i++) {
361 err = omap24xxcam_vbq_alloc_mmap_buffer(vbq->bufs[i]);
362 if (err)
363 goto out;
364 dev_dbg(fh->cam->dev, "sglen is %d for buffer %d\n",
365 videobuf_to_dma(vbq->bufs[i])->sglen, i);
366 }
367
368 mutex_unlock(&vbq->vb_lock);
369
370 return 0;
371out:
372 while (i) {
373 i--;
374 omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]);
375 }
376
377 mutex_unlock(&vbq->vb_lock);
378
379 return err;
380}
381
382/*
383 * This routine is called from interrupt context when a scatter-gather DMA
384 * transfer of a videobuf_buffer completes.
385 */
386static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
387 u32 csr, void *arg)
388{
389 struct omap24xxcam_device *cam =
390 container_of(sgdma, struct omap24xxcam_device, sgdma);
391 struct omap24xxcam_fh *fh = cam->streaming->private_data;
392 struct videobuf_buffer *vb = (struct videobuf_buffer *)arg;
393 const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
394 | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
395 | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
396 unsigned long flags;
397
398 spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
399 if (--cam->sgdma_in_queue == 0)
400 omap24xxcam_core_disable(cam);
401 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
402
403 do_gettimeofday(&vb->ts);
404 vb->field_count = atomic_add_return(2, &fh->field_count);
405 if (csr & csr_error) {
406 vb->state = VIDEOBUF_ERROR;
407 if (!atomic_read(&fh->cam->in_reset)) {
408 dev_dbg(cam->dev, "resetting camera, csr 0x%x\n", csr);
409 omap24xxcam_reset(cam);
410 }
411 } else
412 vb->state = VIDEOBUF_DONE;
413 wake_up(&vb->done);
414}
415
416static void omap24xxcam_vbq_release(struct videobuf_queue *vbq,
417 struct videobuf_buffer *vb)
418{
419 struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
420
421 /* wait for buffer, especially to get out of the sgdma queue */
422 videobuf_waiton(vb, 0, 0);
423 if (vb->memory == V4L2_MEMORY_MMAP) {
424 dma_unmap_sg(vbq->dev, dma->sglist, dma->sglen,
425 dma->direction);
426 dma->direction = DMA_NONE;
427 } else {
428 videobuf_dma_unmap(vbq, videobuf_to_dma(vb));
429 videobuf_dma_free(videobuf_to_dma(vb));
430 }
431
432 vb->state = VIDEOBUF_NEEDS_INIT;
433}
434
435/*
436 * Limit the number of available kernel image capture buffers based on the
437 * number requested, the currently selected image size, and the maximum
438 * amount of memory permitted for kernel capture buffers.
439 */
440static int omap24xxcam_vbq_setup(struct videobuf_queue *vbq, unsigned int *cnt,
441 unsigned int *size)
442{
443 struct omap24xxcam_fh *fh = vbq->priv_data;
444
445 if (*cnt <= 0)
446 *cnt = VIDEO_MAX_FRAME; /* supply a default number of buffers */
447
448 if (*cnt > VIDEO_MAX_FRAME)
449 *cnt = VIDEO_MAX_FRAME;
450
451 *size = fh->pix.sizeimage;
452
453 /* accessing fh->cam->capture_mem is ok, it's constant */
454 while (*size * *cnt > fh->cam->capture_mem)
455 (*cnt)--;
456
457 return 0;
458}
459
460static int omap24xxcam_dma_iolock(struct videobuf_queue *vbq,
461 struct videobuf_dmabuf *dma)
462{
463 int err = 0;
464
465 dma->direction = PCI_DMA_FROMDEVICE;
466 if (!dma_map_sg(vbq->dev, dma->sglist, dma->sglen, dma->direction)) {
467 kfree(dma->sglist);
468 dma->sglist = NULL;
469 dma->sglen = 0;
470 err = -EIO;
471 }
472
473 return err;
474}
475
476static int omap24xxcam_vbq_prepare(struct videobuf_queue *vbq,
477 struct videobuf_buffer *vb,
478 enum v4l2_field field)
479{
480 struct omap24xxcam_fh *fh = vbq->priv_data;
481 int err = 0;
482
483 /*
484 * Accessing pix here is okay since it's constant while
485 * streaming is on (and we only get called then).
486 */
487 if (vb->baddr) {
488 /* This is a userspace buffer. */
489 if (fh->pix.sizeimage > vb->bsize) {
490 /* The buffer isn't big enough. */
491 err = -EINVAL;
492 } else
493 vb->size = fh->pix.sizeimage;
494 } else {
495 if (vb->state != VIDEOBUF_NEEDS_INIT) {
496 /*
497 * We have a kernel bounce buffer that has
498 * already been allocated.
499 */
500 if (fh->pix.sizeimage > vb->size) {
501 /*
502 * The image size has been changed to
503 * a larger size since this buffer was
504 * allocated, so we need to free and
505 * reallocate it.
506 */
507 omap24xxcam_vbq_release(vbq, vb);
508 vb->size = fh->pix.sizeimage;
509 }
510 } else {
511 /* We need to allocate a new kernel bounce buffer. */
512 vb->size = fh->pix.sizeimage;
513 }
514 }
515
516 if (err)
517 return err;
518
519 vb->width = fh->pix.width;
520 vb->height = fh->pix.height;
521 vb->field = field;
522
523 if (vb->state == VIDEOBUF_NEEDS_INIT) {
524 if (vb->memory == V4L2_MEMORY_MMAP)
525 /*
526 * we have built the scatter-gather list by ourself so
527 * do the scatter-gather mapping as well
528 */
529 err = omap24xxcam_dma_iolock(vbq, videobuf_to_dma(vb));
530 else
531 err = videobuf_iolock(vbq, vb, NULL);
532 }
533
534 if (!err)
535 vb->state = VIDEOBUF_PREPARED;
536 else
537 omap24xxcam_vbq_release(vbq, vb);
538
539 return err;
540}
541
542static void omap24xxcam_vbq_queue(struct videobuf_queue *vbq,
543 struct videobuf_buffer *vb)
544{
545 struct omap24xxcam_fh *fh = vbq->priv_data;
546 struct omap24xxcam_device *cam = fh->cam;
547 enum videobuf_state state = vb->state;
548 unsigned long flags;
549 int err;
550
551 /*
552 * FIXME: We're marking the buffer active since we have no
553 * pretty way of marking it active exactly when the
554 * scatter-gather transfer starts.
555 */
556 vb->state = VIDEOBUF_ACTIVE;
557
558 err = omap24xxcam_sgdma_queue(&fh->cam->sgdma,
559 videobuf_to_dma(vb)->sglist,
560 videobuf_to_dma(vb)->sglen, vb->size,
561 omap24xxcam_vbq_complete, vb);
562
563 if (!err) {
564 spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
565 if (++cam->sgdma_in_queue == 1
566 && !atomic_read(&cam->in_reset))
567 omap24xxcam_core_enable(cam);
568 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
569 } else {
570 /*
571 * Oops. We're not supposed to get any errors here.
572 * The only way we could get an error is if we ran out
573 * of scatter-gather DMA slots, but we are supposed to
574 * have at least as many scatter-gather DMA slots as
575 * video buffers so that can't happen.
576 */
577 dev_err(cam->dev, "failed to queue a video buffer for dma!\n");
578 dev_err(cam->dev, "likely a bug in the driver!\n");
579 vb->state = state;
580 }
581}
582
583static struct videobuf_queue_ops omap24xxcam_vbq_ops = {
584 .buf_setup = omap24xxcam_vbq_setup,
585 .buf_prepare = omap24xxcam_vbq_prepare,
586 .buf_queue = omap24xxcam_vbq_queue,
587 .buf_release = omap24xxcam_vbq_release,
588};
589
590/*
591 *
592 * OMAP main camera system
593 *
594 */
595
596/*
597 * Reset camera block to power-on state.
598 */
599static void omap24xxcam_poweron_reset(struct omap24xxcam_device *cam)
600{
601 int max_loop = RESET_TIMEOUT_NS;
602
603 /* Reset whole camera subsystem */
604 omap24xxcam_reg_out(cam->mmio_base,
605 CAM_SYSCONFIG,
606 CAM_SYSCONFIG_SOFTRESET);
607
608 /* Wait till it's finished */
609 while (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS)
610 & CAM_SYSSTATUS_RESETDONE)
611 && --max_loop) {
612 ndelay(1);
613 }
614
615 if (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS)
616 & CAM_SYSSTATUS_RESETDONE))
617 dev_err(cam->dev, "camera soft reset timeout\n");
618}
619
620/*
621 * (Re)initialise the camera block.
622 */
623static void omap24xxcam_hwinit(struct omap24xxcam_device *cam)
624{
625 omap24xxcam_poweron_reset(cam);
626
627 /* set the camera subsystem autoidle bit */
628 omap24xxcam_reg_out(cam->mmio_base, CAM_SYSCONFIG,
629 CAM_SYSCONFIG_AUTOIDLE);
630
631 /* set the camera MMU autoidle bit */
632 omap24xxcam_reg_out(cam->mmio_base,
633 CAMMMU_REG_OFFSET + CAMMMU_SYSCONFIG,
634 CAMMMU_SYSCONFIG_AUTOIDLE);
635
636 omap24xxcam_core_hwinit(cam);
637
638 omap24xxcam_dma_hwinit(&cam->sgdma.dma);
639}
640
641/*
642 * Callback for dma transfer stalling.
643 */
644static void omap24xxcam_stalled_dma_reset(unsigned long data)
645{
646 struct omap24xxcam_device *cam = (struct omap24xxcam_device *)data;
647
648 if (!atomic_read(&cam->in_reset)) {
649 dev_dbg(cam->dev, "dma stalled, resetting camera\n");
650 omap24xxcam_reset(cam);
651 }
652}
653
654/*
655 * Stop capture. Mark we're doing a reset, stop DMA transfers and
656 * core. (No new scatter-gather transfers will be queued whilst
657 * in_reset is non-zero.)
658 *
659 * If omap24xxcam_capture_stop is called from several places at
660 * once, only the first call will have an effect. Similarly, the last
661 * call omap24xxcam_streaming_cont will have effect.
662 *
663 * Serialisation is ensured by using cam->core_enable_disable_lock.
664 */
665static void omap24xxcam_capture_stop(struct omap24xxcam_device *cam)
666{
667 unsigned long flags;
668
669 spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
670
671 if (atomic_inc_return(&cam->in_reset) != 1) {
672 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
673 return;
674 }
675
676 omap24xxcam_core_disable(cam);
677
678 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
679
680 omap24xxcam_sgdma_sync(&cam->sgdma);
681}
682
683/*
684 * Reset and continue streaming.
685 *
686 * Note: Resetting the camera FIFO via the CC_RST bit in the CC_CTRL
687 * register is supposed to be sufficient to recover from a camera
688 * interface error, but it doesn't seem to be enough. If we only do
689 * that then subsequent image captures are out of sync by either one
690 * or two times DMA_THRESHOLD bytes. Resetting and re-initializing the
691 * entire camera subsystem prevents the problem with frame
692 * synchronization.
693 */
694static void omap24xxcam_capture_cont(struct omap24xxcam_device *cam)
695{
696 unsigned long flags;
697
698 spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
699
700 if (atomic_read(&cam->in_reset) != 1)
701 goto out;
702
703 omap24xxcam_hwinit(cam);
704
705 omap24xxcam_sensor_if_enable(cam);
706
707 omap24xxcam_sgdma_process(&cam->sgdma);
708
709 if (cam->sgdma_in_queue)
710 omap24xxcam_core_enable(cam);
711
712out:
713 atomic_dec(&cam->in_reset);
714 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
715}
716
717static ssize_t
718omap24xxcam_streaming_show(struct device *dev, struct device_attribute *attr,
719 char *buf)
720{
721 struct omap24xxcam_device *cam = dev_get_drvdata(dev);
722
723 return sprintf(buf, "%s\n", cam->streaming ? "active" : "inactive");
724}
725static DEVICE_ATTR(streaming, S_IRUGO, omap24xxcam_streaming_show, NULL);
726
727/*
728 * Stop capture and restart it. I.e. reset the camera during use.
729 */
730static void omap24xxcam_reset(struct omap24xxcam_device *cam)
731{
732 omap24xxcam_capture_stop(cam);
733 omap24xxcam_capture_cont(cam);
734}
735
736/*
737 * The main interrupt handler.
738 */
739static irqreturn_t omap24xxcam_isr(int irq, void *arg)
740{
741 struct omap24xxcam_device *cam = (struct omap24xxcam_device *)arg;
742 u32 irqstatus;
743 unsigned int irqhandled = 0;
744
745 irqstatus = omap24xxcam_reg_in(cam->mmio_base, CAM_IRQSTATUS);
746
747 if (irqstatus &
748 (CAM_IRQSTATUS_DMA_IRQ2 | CAM_IRQSTATUS_DMA_IRQ1
749 | CAM_IRQSTATUS_DMA_IRQ0)) {
750 omap24xxcam_dma_isr(&cam->sgdma.dma);
751 irqhandled = 1;
752 }
753 if (irqstatus & CAM_IRQSTATUS_CC_IRQ) {
754 omap24xxcam_core_isr(cam);
755 irqhandled = 1;
756 }
757 if (irqstatus & CAM_IRQSTATUS_MMU_IRQ)
758 dev_err(cam->dev, "unhandled camera MMU interrupt!\n");
759
760 return IRQ_RETVAL(irqhandled);
761}
762
763/*
764 *
765 * Sensor handling.
766 *
767 */
768
769/*
770 * Enable the external sensor interface. Try to negotiate interface
771 * parameters with the sensor and start using the new ones. The calls
772 * to sensor_if_enable and sensor_if_disable need not to be balanced.
773 */
774static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam)
775{
776 int rval;
777 struct v4l2_ifparm p;
778
779 rval = vidioc_int_g_ifparm(cam->sdev, &p);
780 if (rval) {
781 dev_err(cam->dev, "vidioc_int_g_ifparm failed with %d\n", rval);
782 return rval;
783 }
784
785 cam->if_type = p.if_type;
786
787 cam->cc_ctrl = CC_CTRL_CC_EN;
788
789 switch (p.if_type) {
790 case V4L2_IF_TYPE_BT656:
791 if (p.u.bt656.frame_start_on_rising_vs)
792 cam->cc_ctrl |= CC_CTRL_NOBT_SYNCHRO;
793 if (p.u.bt656.bt_sync_correct)
794 cam->cc_ctrl |= CC_CTRL_BT_CORRECT;
795 if (p.u.bt656.swap)
796 cam->cc_ctrl |= CC_CTRL_PAR_ORDERCAM;
797 if (p.u.bt656.latch_clk_inv)
798 cam->cc_ctrl |= CC_CTRL_PAR_CLK_POL;
799 if (p.u.bt656.nobt_hs_inv)
800 cam->cc_ctrl |= CC_CTRL_NOBT_HS_POL;
801 if (p.u.bt656.nobt_vs_inv)
802 cam->cc_ctrl |= CC_CTRL_NOBT_VS_POL;
803
804 switch (p.u.bt656.mode) {
805 case V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT:
806 cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT8;
807 break;
808 case V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT:
809 cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT10;
810 break;
811 case V4L2_IF_TYPE_BT656_MODE_NOBT_12BIT:
812 cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT12;
813 break;
814 case V4L2_IF_TYPE_BT656_MODE_BT_8BIT:
815 cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT8;
816 break;
817 case V4L2_IF_TYPE_BT656_MODE_BT_10BIT:
818 cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT10;
819 break;
820 default:
821 dev_err(cam->dev,
822 "bt656 interface mode %d not supported\n",
823 p.u.bt656.mode);
824 return -EINVAL;
825 }
826 /*
827 * The clock rate that the sensor wants has changed.
828 * We have to adjust the xclk from OMAP 2 side to
829 * match the sensor's wish as closely as possible.
830 */
831 if (p.u.bt656.clock_curr != cam->if_u.bt656.xclk) {
832 u32 xclk = p.u.bt656.clock_curr;
833 u32 divisor;
834
835 if (xclk == 0)
836 return -EINVAL;
837
838 if (xclk > CAM_MCLK)
839 xclk = CAM_MCLK;
840
841 divisor = CAM_MCLK / xclk;
842 if (divisor * xclk < CAM_MCLK)
843 divisor++;
844 if (CAM_MCLK / divisor < p.u.bt656.clock_min
845 && divisor > 1)
846 divisor--;
847 if (divisor > 30)
848 divisor = 30;
849
850 xclk = CAM_MCLK / divisor;
851
852 if (xclk < p.u.bt656.clock_min
853 || xclk > p.u.bt656.clock_max)
854 return -EINVAL;
855
856 cam->if_u.bt656.xclk = xclk;
857 }
858 omap24xxcam_core_xclk_set(cam, cam->if_u.bt656.xclk);
859 break;
860 default:
861 /* FIXME: how about other interfaces? */
862 dev_err(cam->dev, "interface type %d not supported\n",
863 p.if_type);
864 return -EINVAL;
865 }
866
867 return 0;
868}
869
870static void omap24xxcam_sensor_if_disable(const struct omap24xxcam_device *cam)
871{
872 switch (cam->if_type) {
873 case V4L2_IF_TYPE_BT656:
874 omap24xxcam_core_xclk_set(cam, 0);
875 break;
876 }
877}
878
879/*
880 * Initialise the sensor hardware.
881 */
882static int omap24xxcam_sensor_init(struct omap24xxcam_device *cam)
883{
884 int err = 0;
885 struct v4l2_int_device *sdev = cam->sdev;
886
887 omap24xxcam_clock_on(cam);
888 err = omap24xxcam_sensor_if_enable(cam);
889 if (err) {
890 dev_err(cam->dev, "sensor interface could not be enabled at "
891 "initialisation, %d\n", err);
892 cam->sdev = NULL;
893 goto out;
894 }
895
896 /* power up sensor during sensor initialization */
897 vidioc_int_s_power(sdev, 1);
898
899 err = vidioc_int_dev_init(sdev);
900 if (err) {
901 dev_err(cam->dev, "cannot initialize sensor, error %d\n", err);
902 /* Sensor init failed --- it's nonexistent to us! */
903 cam->sdev = NULL;
904 goto out;
905 }
906
907 dev_info(cam->dev, "sensor is %s\n", sdev->name);
908
909out:
910 omap24xxcam_sensor_if_disable(cam);
911 omap24xxcam_clock_off(cam);
912
913 vidioc_int_s_power(sdev, 0);
914
915 return err;
916}
917
918static void omap24xxcam_sensor_exit(struct omap24xxcam_device *cam)
919{
920 if (cam->sdev)
921 vidioc_int_dev_exit(cam->sdev);
922}
923
924static void omap24xxcam_sensor_disable(struct omap24xxcam_device *cam)
925{
926 omap24xxcam_sensor_if_disable(cam);
927 omap24xxcam_clock_off(cam);
928 vidioc_int_s_power(cam->sdev, 0);
929}
930
931/*
932 * Power-up and configure camera sensor. It's ready for capturing now.
933 */
934static int omap24xxcam_sensor_enable(struct omap24xxcam_device *cam)
935{
936 int rval;
937
938 omap24xxcam_clock_on(cam);
939
940 omap24xxcam_sensor_if_enable(cam);
941
942 rval = vidioc_int_s_power(cam->sdev, 1);
943 if (rval)
944 goto out;
945
946 rval = vidioc_int_init(cam->sdev);
947 if (rval)
948 goto out;
949
950 return 0;
951
952out:
953 omap24xxcam_sensor_disable(cam);
954
955 return rval;
956}
957
958static void omap24xxcam_sensor_reset_work(struct work_struct *work)
959{
960 struct omap24xxcam_device *cam =
961 container_of(work, struct omap24xxcam_device,
962 sensor_reset_work);
963
964 if (atomic_read(&cam->reset_disable))
965 return;
966
967 omap24xxcam_capture_stop(cam);
968
969 if (vidioc_int_reset(cam->sdev) == 0) {
970 vidioc_int_init(cam->sdev);
971 } else {
972 /* Can't reset it by vidioc_int_reset. */
973 omap24xxcam_sensor_disable(cam);
974 omap24xxcam_sensor_enable(cam);
975 }
976
977 omap24xxcam_capture_cont(cam);
978}
979
980/*
981 *
982 * IOCTL interface.
983 *
984 */
985
986static int vidioc_querycap(struct file *file, void *fh,
987 struct v4l2_capability *cap)
988{
989 struct omap24xxcam_fh *ofh = fh;
990 struct omap24xxcam_device *cam = ofh->cam;
991
992 strlcpy(cap->driver, CAM_NAME, sizeof(cap->driver));
993 strlcpy(cap->card, cam->vfd->name, sizeof(cap->card));
994 cap->version = OMAP24XXCAM_VERSION;
995 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
996
997 return 0;
998}
999
1000static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh,
1001 struct v4l2_fmtdesc *f)
1002{
1003 struct omap24xxcam_fh *ofh = fh;
1004 struct omap24xxcam_device *cam = ofh->cam;
1005 int rval;
1006
1007 rval = vidioc_int_enum_fmt_cap(cam->sdev, f);
1008
1009 return rval;
1010}
1011
1012static int vidioc_g_fmt_vid_cap(struct file *file, void *fh,
1013 struct v4l2_format *f)
1014{
1015 struct omap24xxcam_fh *ofh = fh;
1016 struct omap24xxcam_device *cam = ofh->cam;
1017 int rval;
1018
1019 mutex_lock(&cam->mutex);
1020 rval = vidioc_int_g_fmt_cap(cam->sdev, f);
1021 mutex_unlock(&cam->mutex);
1022
1023 return rval;
1024}
1025
1026static int vidioc_s_fmt_vid_cap(struct file *file, void *fh,
1027 struct v4l2_format *f)
1028{
1029 struct omap24xxcam_fh *ofh = fh;
1030 struct omap24xxcam_device *cam = ofh->cam;
1031 int rval;
1032
1033 mutex_lock(&cam->mutex);
1034 if (cam->streaming) {
1035 rval = -EBUSY;
1036 goto out;
1037 }
1038
1039 rval = vidioc_int_s_fmt_cap(cam->sdev, f);
1040
1041out:
1042 mutex_unlock(&cam->mutex);
1043
1044 if (!rval) {
1045 mutex_lock(&ofh->vbq.vb_lock);
1046 ofh->pix = f->fmt.pix;
1047 mutex_unlock(&ofh->vbq.vb_lock);
1048 }
1049
1050 memset(f, 0, sizeof(*f));
1051 vidioc_g_fmt_vid_cap(file, fh, f);
1052
1053 return rval;
1054}
1055
1056static int vidioc_try_fmt_vid_cap(struct file *file, void *fh,
1057 struct v4l2_format *f)
1058{
1059 struct omap24xxcam_fh *ofh = fh;
1060 struct omap24xxcam_device *cam = ofh->cam;
1061 int rval;
1062
1063 mutex_lock(&cam->mutex);
1064 rval = vidioc_int_try_fmt_cap(cam->sdev, f);
1065 mutex_unlock(&cam->mutex);
1066
1067 return rval;
1068}
1069
1070static int vidioc_reqbufs(struct file *file, void *fh,
1071 struct v4l2_requestbuffers *b)
1072{
1073 struct omap24xxcam_fh *ofh = fh;
1074 struct omap24xxcam_device *cam = ofh->cam;
1075 int rval;
1076
1077 mutex_lock(&cam->mutex);
1078 if (cam->streaming) {
1079 mutex_unlock(&cam->mutex);
1080 return -EBUSY;
1081 }
1082
1083 omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq);
1084 mutex_unlock(&cam->mutex);
1085
1086 rval = videobuf_reqbufs(&ofh->vbq, b);
1087
1088 /*
1089 * Either videobuf_reqbufs failed or the buffers are not
1090 * memory-mapped (which would need special attention).
1091 */
1092 if (rval < 0 || b->memory != V4L2_MEMORY_MMAP)
1093 goto out;
1094
1095 rval = omap24xxcam_vbq_alloc_mmap_buffers(&ofh->vbq, rval);
1096 if (rval)
1097 omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq);
1098
1099out:
1100 return rval;
1101}
1102
1103static int vidioc_querybuf(struct file *file, void *fh,
1104 struct v4l2_buffer *b)
1105{
1106 struct omap24xxcam_fh *ofh = fh;
1107
1108 return videobuf_querybuf(&ofh->vbq, b);
1109}
1110
1111static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
1112{
1113 struct omap24xxcam_fh *ofh = fh;
1114
1115 return videobuf_qbuf(&ofh->vbq, b);
1116}
1117
1118static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
1119{
1120 struct omap24xxcam_fh *ofh = fh;
1121 struct omap24xxcam_device *cam = ofh->cam;
1122 struct videobuf_buffer *vb;
1123 int rval;
1124
1125videobuf_dqbuf_again:
1126 rval = videobuf_dqbuf(&ofh->vbq, b, file->f_flags & O_NONBLOCK);
1127 if (rval)
1128 goto out;
1129
1130 vb = ofh->vbq.bufs[b->index];
1131
1132 mutex_lock(&cam->mutex);
1133 /* _needs_reset returns -EIO if reset is required. */
1134 rval = vidioc_int_g_needs_reset(cam->sdev, (void *)vb->baddr);
1135 mutex_unlock(&cam->mutex);
1136 if (rval == -EIO)
1137 schedule_work(&cam->sensor_reset_work);
1138 else
1139 rval = 0;
1140
1141out:
1142 /*
1143 * This is a hack. We don't want to show -EIO to the user
1144 * space. Requeue the buffer and try again if we're not doing
1145 * this in non-blocking mode.
1146 */
1147 if (rval == -EIO) {
1148 videobuf_qbuf(&ofh->vbq, b);
1149 if (!(file->f_flags & O_NONBLOCK))
1150 goto videobuf_dqbuf_again;
1151 /*
1152 * We don't have a videobuf_buffer now --- maybe next
1153 * time...
1154 */
1155 rval = -EAGAIN;
1156 }
1157
1158 return rval;
1159}
1160
1161static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
1162{
1163 struct omap24xxcam_fh *ofh = fh;
1164 struct omap24xxcam_device *cam = ofh->cam;
1165 int rval;
1166
1167 mutex_lock(&cam->mutex);
1168 if (cam->streaming) {
1169 rval = -EBUSY;
1170 goto out;
1171 }
1172
1173 rval = omap24xxcam_sensor_if_enable(cam);
1174 if (rval) {
1175 dev_dbg(cam->dev, "vidioc_int_g_ifparm failed\n");
1176 goto out;
1177 }
1178
1179 rval = videobuf_streamon(&ofh->vbq);
1180 if (!rval) {
1181 cam->streaming = file;
1182 sysfs_notify(&cam->dev->kobj, NULL, "streaming");
1183 }
1184
1185out:
1186 mutex_unlock(&cam->mutex);
1187
1188 return rval;
1189}
1190
1191static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
1192{
1193 struct omap24xxcam_fh *ofh = fh;
1194 struct omap24xxcam_device *cam = ofh->cam;
1195 struct videobuf_queue *q = &ofh->vbq;
1196 int rval;
1197
1198 atomic_inc(&cam->reset_disable);
1199
1200 flush_scheduled_work();
1201
1202 rval = videobuf_streamoff(q);
1203 if (!rval) {
1204 mutex_lock(&cam->mutex);
1205 cam->streaming = NULL;
1206 mutex_unlock(&cam->mutex);
1207 sysfs_notify(&cam->dev->kobj, NULL, "streaming");
1208 }
1209
1210 atomic_dec(&cam->reset_disable);
1211
1212 return rval;
1213}
1214
1215static int vidioc_enum_input(struct file *file, void *fh,
1216 struct v4l2_input *inp)
1217{
1218 if (inp->index > 0)
1219 return -EINVAL;
1220
1221 strlcpy(inp->name, "camera", sizeof(inp->name));
1222 inp->type = V4L2_INPUT_TYPE_CAMERA;
1223
1224 return 0;
1225}
1226
1227static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
1228{
1229 *i = 0;
1230
1231 return 0;
1232}
1233
1234static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
1235{
1236 if (i > 0)
1237 return -EINVAL;
1238
1239 return 0;
1240}
1241
1242static int vidioc_queryctrl(struct file *file, void *fh,
1243 struct v4l2_queryctrl *a)
1244{
1245 struct omap24xxcam_fh *ofh = fh;
1246 struct omap24xxcam_device *cam = ofh->cam;
1247 int rval;
1248
1249 rval = vidioc_int_queryctrl(cam->sdev, a);
1250
1251 return rval;
1252}
1253
1254static int vidioc_g_ctrl(struct file *file, void *fh,
1255 struct v4l2_control *a)
1256{
1257 struct omap24xxcam_fh *ofh = fh;
1258 struct omap24xxcam_device *cam = ofh->cam;
1259 int rval;
1260
1261 mutex_lock(&cam->mutex);
1262 rval = vidioc_int_g_ctrl(cam->sdev, a);
1263 mutex_unlock(&cam->mutex);
1264
1265 return rval;
1266}
1267
1268static int vidioc_s_ctrl(struct file *file, void *fh,
1269 struct v4l2_control *a)
1270{
1271 struct omap24xxcam_fh *ofh = fh;
1272 struct omap24xxcam_device *cam = ofh->cam;
1273 int rval;
1274
1275 mutex_lock(&cam->mutex);
1276 rval = vidioc_int_s_ctrl(cam->sdev, a);
1277 mutex_unlock(&cam->mutex);
1278
1279 return rval;
1280}
1281
1282static int vidioc_g_parm(struct file *file, void *fh,
1283 struct v4l2_streamparm *a) {
1284 struct omap24xxcam_fh *ofh = fh;
1285 struct omap24xxcam_device *cam = ofh->cam;
1286 int rval;
1287
1288 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1289 return -EINVAL;
1290
1291 mutex_lock(&cam->mutex);
1292 rval = vidioc_int_g_parm(cam->sdev, a);
1293 mutex_unlock(&cam->mutex);
1294
1295 return rval;
1296}
1297
1298static int vidioc_s_parm(struct file *file, void *fh,
1299 struct v4l2_streamparm *a)
1300{
1301 struct omap24xxcam_fh *ofh = fh;
1302 struct omap24xxcam_device *cam = ofh->cam;
1303 struct v4l2_streamparm old_streamparm;
1304 int rval;
1305
1306 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1307 return -EINVAL;
1308
1309 mutex_lock(&cam->mutex);
1310 if (cam->streaming) {
1311 rval = -EBUSY;
1312 goto out;
1313 }
1314
1315 old_streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1316 rval = vidioc_int_g_parm(cam->sdev, &old_streamparm);
1317 if (rval)
1318 goto out;
1319
1320 rval = vidioc_int_s_parm(cam->sdev, a);
1321 if (rval)
1322 goto out;
1323
1324 rval = omap24xxcam_sensor_if_enable(cam);
1325 /*
1326 * Revert to old streaming parameters if enabling sensor
1327 * interface with the new ones failed.
1328 */
1329 if (rval)
1330 vidioc_int_s_parm(cam->sdev, &old_streamparm);
1331
1332out:
1333 mutex_unlock(&cam->mutex);
1334
1335 return rval;
1336}
1337
1338/*
1339 *
1340 * File operations.
1341 *
1342 */
1343
1344static unsigned int omap24xxcam_poll(struct file *file,
1345 struct poll_table_struct *wait)
1346{
1347 struct omap24xxcam_fh *fh = file->private_data;
1348 struct omap24xxcam_device *cam = fh->cam;
1349 struct videobuf_buffer *vb;
1350
1351 mutex_lock(&cam->mutex);
1352 if (cam->streaming != file) {
1353 mutex_unlock(&cam->mutex);
1354 return POLLERR;
1355 }
1356 mutex_unlock(&cam->mutex);
1357
1358 mutex_lock(&fh->vbq.vb_lock);
1359 if (list_empty(&fh->vbq.stream)) {
1360 mutex_unlock(&fh->vbq.vb_lock);
1361 return POLLERR;
1362 }
1363 vb = list_entry(fh->vbq.stream.next, struct videobuf_buffer, stream);
1364 mutex_unlock(&fh->vbq.vb_lock);
1365
1366 poll_wait(file, &vb->done, wait);
1367
1368 if (vb->state == VIDEOBUF_DONE || vb->state == VIDEOBUF_ERROR)
1369 return POLLIN | POLLRDNORM;
1370
1371 return 0;
1372}
1373
1374static int omap24xxcam_mmap_buffers(struct file *file,
1375 struct vm_area_struct *vma)
1376{
1377 struct omap24xxcam_fh *fh = file->private_data;
1378 struct omap24xxcam_device *cam = fh->cam;
1379 struct videobuf_queue *vbq = &fh->vbq;
1380 unsigned int first, last, size, i, j;
1381 int err = 0;
1382
1383 mutex_lock(&cam->mutex);
1384 if (cam->streaming) {
1385 mutex_unlock(&cam->mutex);
1386 return -EBUSY;
1387 }
1388 mutex_unlock(&cam->mutex);
1389 mutex_lock(&vbq->vb_lock);
1390
1391 /* look for first buffer to map */
1392 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
1393 if (NULL == vbq->bufs[first])
1394 continue;
1395 if (V4L2_MEMORY_MMAP != vbq->bufs[first]->memory)
1396 continue;
1397 if (vbq->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
1398 break;
1399 }
1400
1401 /* look for last buffer to map */
1402 for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) {
1403 if (NULL == vbq->bufs[last])
1404 continue;
1405 if (V4L2_MEMORY_MMAP != vbq->bufs[last]->memory)
1406 continue;
1407 size += vbq->bufs[last]->bsize;
1408 if (size == (vma->vm_end - vma->vm_start))
1409 break;
1410 }
1411
1412 size = 0;
1413 for (i = first; i <= last; i++) {
1414 struct videobuf_dmabuf *dma = videobuf_to_dma(vbq->bufs[i]);
1415
1416 for (j = 0; j < dma->sglen; j++) {
1417 err = remap_pfn_range(
1418 vma, vma->vm_start + size,
1419 page_to_pfn(sg_page(&dma->sglist[j])),
1420 sg_dma_len(&dma->sglist[j]), vma->vm_page_prot);
1421 if (err)
1422 goto out;
1423 size += sg_dma_len(&dma->sglist[j]);
1424 }
1425 }
1426
1427out:
1428 mutex_unlock(&vbq->vb_lock);
1429
1430 return err;
1431}
1432
1433static int omap24xxcam_mmap(struct file *file, struct vm_area_struct *vma)
1434{
1435 struct omap24xxcam_fh *fh = file->private_data;
1436 int rval;
1437
1438 /* let the video-buf mapper check arguments and set-up structures */
1439 rval = videobuf_mmap_mapper(&fh->vbq, vma);
1440 if (rval)
1441 return rval;
1442
1443 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1444
1445 /* do mapping to our allocated buffers */
1446 rval = omap24xxcam_mmap_buffers(file, vma);
1447 /*
1448 * In case of error, free vma->vm_private_data allocated by
1449 * videobuf_mmap_mapper.
1450 */
1451 if (rval)
1452 kfree(vma->vm_private_data);
1453
1454 return rval;
1455}
1456
1457static int omap24xxcam_open(struct inode *inode, struct file *file)
1458{
1459 int minor = iminor(inode);
1460 struct omap24xxcam_device *cam = omap24xxcam.priv;
1461 struct omap24xxcam_fh *fh;
1462 struct v4l2_format format;
1463
1464 if (!cam || !cam->vfd || (cam->vfd->minor != minor))
1465 return -ENODEV;
1466
1467 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
1468 if (fh == NULL)
1469 return -ENOMEM;
1470
1471 mutex_lock(&cam->mutex);
1472 if (cam->sdev == NULL || !try_module_get(cam->sdev->module)) {
1473 mutex_unlock(&cam->mutex);
1474 goto out_try_module_get;
1475 }
1476
1477 if (atomic_inc_return(&cam->users) == 1) {
1478 omap24xxcam_hwinit(cam);
1479 if (omap24xxcam_sensor_enable(cam)) {
1480 mutex_unlock(&cam->mutex);
1481 goto out_omap24xxcam_sensor_enable;
1482 }
1483 }
1484 mutex_unlock(&cam->mutex);
1485
1486 fh->cam = cam;
1487 mutex_lock(&cam->mutex);
1488 vidioc_int_g_fmt_cap(cam->sdev, &format);
1489 mutex_unlock(&cam->mutex);
1490 /* FIXME: how about fh->pix when there are more users? */
1491 fh->pix = format.fmt.pix;
1492
1493 file->private_data = fh;
1494
1495 spin_lock_init(&fh->vbq_lock);
1496
1497 videobuf_queue_sg_init(&fh->vbq, &omap24xxcam_vbq_ops, NULL,
1498 &fh->vbq_lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
1499 V4L2_FIELD_NONE,
1500 sizeof(struct videobuf_buffer), fh);
1501
1502 return 0;
1503
1504out_omap24xxcam_sensor_enable:
1505 omap24xxcam_poweron_reset(cam);
1506 module_put(cam->sdev->module);
1507
1508out_try_module_get:
1509 kfree(fh);
1510
1511 return -ENODEV;
1512}
1513
1514static int omap24xxcam_release(struct inode *inode, struct file *file)
1515{
1516 struct omap24xxcam_fh *fh = file->private_data;
1517 struct omap24xxcam_device *cam = fh->cam;
1518
1519 atomic_inc(&cam->reset_disable);
1520
1521 flush_scheduled_work();
1522
1523 /* stop streaming capture */
1524 videobuf_streamoff(&fh->vbq);
1525
1526 mutex_lock(&cam->mutex);
1527 if (cam->streaming == file) {
1528 cam->streaming = NULL;
1529 mutex_unlock(&cam->mutex);
1530 sysfs_notify(&cam->dev->kobj, NULL, "streaming");
1531 } else {
1532 mutex_unlock(&cam->mutex);
1533 }
1534
1535 atomic_dec(&cam->reset_disable);
1536
1537 omap24xxcam_vbq_free_mmap_buffers(&fh->vbq);
1538
1539 /*
1540 * Make sure the reset work we might have scheduled is not
1541 * pending! It may be run *only* if we have users. (And it may
1542 * not be scheduled anymore since streaming is already
1543 * disabled.)
1544 */
1545 flush_scheduled_work();
1546
1547 mutex_lock(&cam->mutex);
1548 if (atomic_dec_return(&cam->users) == 0) {
1549 omap24xxcam_sensor_disable(cam);
1550 omap24xxcam_poweron_reset(cam);
1551 }
1552 mutex_unlock(&cam->mutex);
1553
1554 file->private_data = NULL;
1555
1556 module_put(cam->sdev->module);
1557 kfree(fh);
1558
1559 return 0;
1560}
1561
1562static struct file_operations omap24xxcam_fops = {
1563 .llseek = no_llseek,
1564 .ioctl = video_ioctl2,
1565 .poll = omap24xxcam_poll,
1566 .mmap = omap24xxcam_mmap,
1567 .open = omap24xxcam_open,
1568 .release = omap24xxcam_release,
1569};
1570
1571/*
1572 *
1573 * Power management.
1574 *
1575 */
1576
1577#ifdef CONFIG_PM
1578static int omap24xxcam_suspend(struct platform_device *pdev, pm_message_t state)
1579{
1580 struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
1581
1582 if (atomic_read(&cam->users) == 0)
1583 return 0;
1584
1585 if (!atomic_read(&cam->reset_disable))
1586 omap24xxcam_capture_stop(cam);
1587
1588 omap24xxcam_sensor_disable(cam);
1589 omap24xxcam_poweron_reset(cam);
1590
1591 return 0;
1592}
1593
1594static int omap24xxcam_resume(struct platform_device *pdev)
1595{
1596 struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
1597
1598 if (atomic_read(&cam->users) == 0)
1599 return 0;
1600
1601 omap24xxcam_hwinit(cam);
1602 omap24xxcam_sensor_enable(cam);
1603
1604 if (!atomic_read(&cam->reset_disable))
1605 omap24xxcam_capture_cont(cam);
1606
1607 return 0;
1608}
1609#endif /* CONFIG_PM */
1610
1611static const struct v4l2_ioctl_ops omap24xxcam_ioctl_fops = {
1612 .vidioc_querycap = vidioc_querycap,
1613 .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
1614 .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
1615 .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
1616 .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
1617 .vidioc_reqbufs = vidioc_reqbufs,
1618 .vidioc_querybuf = vidioc_querybuf,
1619 .vidioc_qbuf = vidioc_qbuf,
1620 .vidioc_dqbuf = vidioc_dqbuf,
1621 .vidioc_streamon = vidioc_streamon,
1622 .vidioc_streamoff = vidioc_streamoff,
1623 .vidioc_enum_input = vidioc_enum_input,
1624 .vidioc_g_input = vidioc_g_input,
1625 .vidioc_s_input = vidioc_s_input,
1626 .vidioc_queryctrl = vidioc_queryctrl,
1627 .vidioc_g_ctrl = vidioc_g_ctrl,
1628 .vidioc_s_ctrl = vidioc_s_ctrl,
1629 .vidioc_g_parm = vidioc_g_parm,
1630 .vidioc_s_parm = vidioc_s_parm,
1631};
1632
1633/*
1634 *
1635 * Camera device (i.e. /dev/video).
1636 *
1637 */
1638
1639static int omap24xxcam_device_register(struct v4l2_int_device *s)
1640{
1641 struct omap24xxcam_device *cam = s->u.slave->master->priv;
1642 struct video_device *vfd;
1643 int rval;
1644
1645 /* We already have a slave. */
1646 if (cam->sdev)
1647 return -EBUSY;
1648
1649 cam->sdev = s;
1650
1651 if (device_create_file(cam->dev, &dev_attr_streaming) != 0) {
1652 dev_err(cam->dev, "could not register sysfs entry\n");
1653 rval = -EBUSY;
1654 goto err;
1655 }
1656
1657 /* initialize the video_device struct */
1658 vfd = cam->vfd = video_device_alloc();
1659 if (!vfd) {
1660 dev_err(cam->dev, "could not allocate video device struct\n");
1661 rval = -ENOMEM;
1662 goto err;
1663 }
1664 vfd->release = video_device_release;
1665
1666 vfd->parent = cam->dev;
1667
1668 strlcpy(vfd->name, CAM_NAME, sizeof(vfd->name));
1669 vfd->vfl_type = VID_TYPE_CAPTURE | VID_TYPE_CHROMAKEY;
1670 vfd->fops = &omap24xxcam_fops;
1671 vfd->minor = -1;
1672 vfd->ioctl_ops = &omap24xxcam_ioctl_fops;
1673
1674 omap24xxcam_hwinit(cam);
1675
1676 rval = omap24xxcam_sensor_init(cam);
1677 if (rval)
1678 goto err;
1679
1680 if (video_register_device(vfd, VFL_TYPE_GRABBER, video_nr) < 0) {
1681 dev_err(cam->dev, "could not register V4L device\n");
1682 vfd->minor = -1;
1683 rval = -EBUSY;
1684 goto err;
1685 }
1686
1687 omap24xxcam_poweron_reset(cam);
1688
1689 dev_info(cam->dev, "registered device video%d\n", vfd->minor);
1690
1691 return 0;
1692
1693err:
1694 omap24xxcam_device_unregister(s);
1695
1696 return rval;
1697}
1698
1699static void omap24xxcam_device_unregister(struct v4l2_int_device *s)
1700{
1701 struct omap24xxcam_device *cam = s->u.slave->master->priv;
1702
1703 omap24xxcam_sensor_exit(cam);
1704
1705 if (cam->vfd) {
1706 if (cam->vfd->minor == -1) {
1707 /*
1708 * The device was never registered, so release the
1709 * video_device struct directly.
1710 */
1711 video_device_release(cam->vfd);
1712 } else {
1713 /*
1714 * The unregister function will release the
1715 * video_device struct as well as
1716 * unregistering it.
1717 */
1718 video_unregister_device(cam->vfd);
1719 }
1720 cam->vfd = NULL;
1721 }
1722
1723 device_remove_file(cam->dev, &dev_attr_streaming);
1724
1725 cam->sdev = NULL;
1726}
1727
1728static struct v4l2_int_master omap24xxcam_master = {
1729 .attach = omap24xxcam_device_register,
1730 .detach = omap24xxcam_device_unregister,
1731};
1732
1733static struct v4l2_int_device omap24xxcam = {
1734 .module = THIS_MODULE,
1735 .name = CAM_NAME,
1736 .type = v4l2_int_type_master,
1737 .u = {
1738 .master = &omap24xxcam_master
1739 },
1740};
1741
1742/*
1743 *
1744 * Driver initialisation and deinitialisation.
1745 *
1746 */
1747
1748static int __init omap24xxcam_probe(struct platform_device *pdev)
1749{
1750 struct omap24xxcam_device *cam;
1751 struct resource *mem;
1752 int irq;
1753
1754 cam = kzalloc(sizeof(*cam), GFP_KERNEL);
1755 if (!cam) {
1756 dev_err(&pdev->dev, "could not allocate memory\n");
1757 goto err;
1758 }
1759
1760 platform_set_drvdata(pdev, cam);
1761
1762 cam->dev = &pdev->dev;
1763
1764 /*
1765 * Impose a lower limit on the amount of memory allocated for
1766 * capture. We require at least enough memory to double-buffer
1767 * QVGA (300KB).
1768 */
1769 if (capture_mem < 320 * 240 * 2 * 2)
1770 capture_mem = 320 * 240 * 2 * 2;
1771 cam->capture_mem = capture_mem;
1772
1773 /* request the mem region for the camera registers */
1774 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1775 if (!mem) {
1776 dev_err(cam->dev, "no mem resource?\n");
1777 goto err;
1778 }
1779 if (!request_mem_region(mem->start, (mem->end - mem->start) + 1,
1780 pdev->name)) {
1781 dev_err(cam->dev,
1782 "cannot reserve camera register I/O region\n");
1783 goto err;
1784 }
1785 cam->mmio_base_phys = mem->start;
1786 cam->mmio_size = (mem->end - mem->start) + 1;
1787
1788 /* map the region */
1789 cam->mmio_base = (unsigned long)
1790 ioremap_nocache(cam->mmio_base_phys, cam->mmio_size);
1791 if (!cam->mmio_base) {
1792 dev_err(cam->dev, "cannot map camera register I/O region\n");
1793 goto err;
1794 }
1795
1796 irq = platform_get_irq(pdev, 0);
1797 if (irq <= 0) {
1798 dev_err(cam->dev, "no irq for camera?\n");
1799 goto err;
1800 }
1801
1802 /* install the interrupt service routine */
1803 if (request_irq(irq, omap24xxcam_isr, 0, CAM_NAME, cam)) {
1804 dev_err(cam->dev,
1805 "could not install interrupt service routine\n");
1806 goto err;
1807 }
1808 cam->irq = irq;
1809
1810 if (omap24xxcam_clock_get(cam))
1811 goto err;
1812
1813 INIT_WORK(&cam->sensor_reset_work, omap24xxcam_sensor_reset_work);
1814
1815 mutex_init(&cam->mutex);
1816 spin_lock_init(&cam->core_enable_disable_lock);
1817
1818 omap24xxcam_sgdma_init(&cam->sgdma,
1819 cam->mmio_base + CAMDMA_REG_OFFSET,
1820 omap24xxcam_stalled_dma_reset,
1821 (unsigned long)cam);
1822
1823 omap24xxcam.priv = cam;
1824
1825 if (v4l2_int_device_register(&omap24xxcam))
1826 goto err;
1827
1828 return 0;
1829
1830err:
1831 omap24xxcam_remove(pdev);
1832 return -ENODEV;
1833}
1834
1835static int omap24xxcam_remove(struct platform_device *pdev)
1836{
1837 struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
1838
1839 if (!cam)
1840 return 0;
1841
1842 if (omap24xxcam.priv != NULL)
1843 v4l2_int_device_unregister(&omap24xxcam);
1844 omap24xxcam.priv = NULL;
1845
1846 omap24xxcam_clock_put(cam);
1847
1848 if (cam->irq) {
1849 free_irq(cam->irq, cam);
1850 cam->irq = 0;
1851 }
1852
1853 if (cam->mmio_base) {
1854 iounmap((void *)cam->mmio_base);
1855 cam->mmio_base = 0;
1856 }
1857
1858 if (cam->mmio_base_phys) {
1859 release_mem_region(cam->mmio_base_phys, cam->mmio_size);
1860 cam->mmio_base_phys = 0;
1861 }
1862
1863 kfree(cam);
1864
1865 return 0;
1866}
1867
1868static struct platform_driver omap24xxcam_driver = {
1869 .probe = omap24xxcam_probe,
1870 .remove = omap24xxcam_remove,
1871#ifdef CONFIG_PM
1872 .suspend = omap24xxcam_suspend,
1873 .resume = omap24xxcam_resume,
1874#endif
1875 .driver = {
1876 .name = CAM_NAME,
1877 .owner = THIS_MODULE,
1878 },
1879};
1880
1881/*
1882 *
1883 * Module initialisation and deinitialisation
1884 *
1885 */
1886
1887static int __init omap24xxcam_init(void)
1888{
1889 return platform_driver_register(&omap24xxcam_driver);
1890}
1891
1892static void __exit omap24xxcam_cleanup(void)
1893{
1894 platform_driver_unregister(&omap24xxcam_driver);
1895}
1896
1897MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
1898MODULE_DESCRIPTION("OMAP24xx Video for Linux camera driver");
1899MODULE_LICENSE("GPL");
1900module_param(video_nr, int, 0);
1901MODULE_PARM_DESC(video_nr,
1902 "Minor number for video device (-1 ==> auto assign)");
1903module_param(capture_mem, int, 0);
1904MODULE_PARM_DESC(capture_mem, "Maximum amount of memory for capture "
1905 "buffers (default 4800kiB)");
1906
1907module_init(omap24xxcam_init);
1908module_exit(omap24xxcam_cleanup);
diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
new file mode 100644
index 000000000000..2ce67f5a48d5
--- /dev/null
+++ b/drivers/media/video/omap24xxcam.h
@@ -0,0 +1,593 @@
1/*
2 * drivers/media/video/omap24xxcam.h
3 *
4 * Copyright (C) 2004 MontaVista Software, Inc.
5 * Copyright (C) 2004 Texas Instruments.
6 * Copyright (C) 2007 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@nokia.com>
9 *
10 * Based on code from Andy Lowe <source@mvista.com>.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 * 02110-1301 USA
25 */
26
27#ifndef OMAP24XXCAM_H
28#define OMAP24XXCAM_H
29
30#include <media/videobuf-dma-sg.h>
31#include <media/v4l2-int-device.h>
32
33/*
34 *
35 * General driver related definitions.
36 *
37 */
38
39#define CAM_NAME "omap24xxcam"
40
41#define CAM_MCLK 96000000
42
43/* number of bytes transferred per DMA request */
44#define DMA_THRESHOLD 32
45
46/*
47 * NUM_CAMDMA_CHANNELS is the number of logical channels provided by
48 * the camera DMA controller.
49 */
50#define NUM_CAMDMA_CHANNELS 4
51
52/*
53 * NUM_SG_DMA is the number of scatter-gather DMA transfers that can
54 * be queued. (We don't have any overlay sglists now.)
55 */
56#define NUM_SG_DMA (VIDEO_MAX_FRAME)
57
58/*
59 *
60 * Register definitions.
61 *
62 */
63
64/* subsystem register block offsets */
65#define CC_REG_OFFSET 0x00000400
66#define CAMDMA_REG_OFFSET 0x00000800
67#define CAMMMU_REG_OFFSET 0x00000C00
68
69/* define camera subsystem register offsets */
70#define CAM_REVISION 0x000
71#define CAM_SYSCONFIG 0x010
72#define CAM_SYSSTATUS 0x014
73#define CAM_IRQSTATUS 0x018
74#define CAM_GPO 0x040
75#define CAM_GPI 0x050
76
77/* define camera core register offsets */
78#define CC_REVISION 0x000
79#define CC_SYSCONFIG 0x010
80#define CC_SYSSTATUS 0x014
81#define CC_IRQSTATUS 0x018
82#define CC_IRQENABLE 0x01C
83#define CC_CTRL 0x040
84#define CC_CTRL_DMA 0x044
85#define CC_CTRL_XCLK 0x048
86#define CC_FIFODATA 0x04C
87#define CC_TEST 0x050
88#define CC_GENPAR 0x054
89#define CC_CCPFSCR 0x058
90#define CC_CCPFECR 0x05C
91#define CC_CCPLSCR 0x060
92#define CC_CCPLECR 0x064
93#define CC_CCPDFR 0x068
94
95/* define camera dma register offsets */
96#define CAMDMA_REVISION 0x000
97#define CAMDMA_IRQSTATUS_L0 0x008
98#define CAMDMA_IRQSTATUS_L1 0x00C
99#define CAMDMA_IRQSTATUS_L2 0x010
100#define CAMDMA_IRQSTATUS_L3 0x014
101#define CAMDMA_IRQENABLE_L0 0x018
102#define CAMDMA_IRQENABLE_L1 0x01C
103#define CAMDMA_IRQENABLE_L2 0x020
104#define CAMDMA_IRQENABLE_L3 0x024
105#define CAMDMA_SYSSTATUS 0x028
106#define CAMDMA_OCP_SYSCONFIG 0x02C
107#define CAMDMA_CAPS_0 0x064
108#define CAMDMA_CAPS_2 0x06C
109#define CAMDMA_CAPS_3 0x070
110#define CAMDMA_CAPS_4 0x074
111#define CAMDMA_GCR 0x078
112#define CAMDMA_CCR(n) (0x080 + (n)*0x60)
113#define CAMDMA_CLNK_CTRL(n) (0x084 + (n)*0x60)
114#define CAMDMA_CICR(n) (0x088 + (n)*0x60)
115#define CAMDMA_CSR(n) (0x08C + (n)*0x60)
116#define CAMDMA_CSDP(n) (0x090 + (n)*0x60)
117#define CAMDMA_CEN(n) (0x094 + (n)*0x60)
118#define CAMDMA_CFN(n) (0x098 + (n)*0x60)
119#define CAMDMA_CSSA(n) (0x09C + (n)*0x60)
120#define CAMDMA_CDSA(n) (0x0A0 + (n)*0x60)
121#define CAMDMA_CSEI(n) (0x0A4 + (n)*0x60)
122#define CAMDMA_CSFI(n) (0x0A8 + (n)*0x60)
123#define CAMDMA_CDEI(n) (0x0AC + (n)*0x60)
124#define CAMDMA_CDFI(n) (0x0B0 + (n)*0x60)
125#define CAMDMA_CSAC(n) (0x0B4 + (n)*0x60)
126#define CAMDMA_CDAC(n) (0x0B8 + (n)*0x60)
127#define CAMDMA_CCEN(n) (0x0BC + (n)*0x60)
128#define CAMDMA_CCFN(n) (0x0C0 + (n)*0x60)
129#define CAMDMA_COLOR(n) (0x0C4 + (n)*0x60)
130
131/* define camera mmu register offsets */
132#define CAMMMU_REVISION 0x000
133#define CAMMMU_SYSCONFIG 0x010
134#define CAMMMU_SYSSTATUS 0x014
135#define CAMMMU_IRQSTATUS 0x018
136#define CAMMMU_IRQENABLE 0x01C
137#define CAMMMU_WALKING_ST 0x040
138#define CAMMMU_CNTL 0x044
139#define CAMMMU_FAULT_AD 0x048
140#define CAMMMU_TTB 0x04C
141#define CAMMMU_LOCK 0x050
142#define CAMMMU_LD_TLB 0x054
143#define CAMMMU_CAM 0x058
144#define CAMMMU_RAM 0x05C
145#define CAMMMU_GFLUSH 0x060
146#define CAMMMU_FLUSH_ENTRY 0x064
147#define CAMMMU_READ_CAM 0x068
148#define CAMMMU_READ_RAM 0x06C
149#define CAMMMU_EMU_FAULT_AD 0x070
150
151/* Define bit fields within selected registers */
152#define CAM_REVISION_MAJOR (15 << 4)
153#define CAM_REVISION_MAJOR_SHIFT 4
154#define CAM_REVISION_MINOR (15 << 0)
155#define CAM_REVISION_MINOR_SHIFT 0
156
157#define CAM_SYSCONFIG_SOFTRESET (1 << 1)
158#define CAM_SYSCONFIG_AUTOIDLE (1 << 0)
159
160#define CAM_SYSSTATUS_RESETDONE (1 << 0)
161
162#define CAM_IRQSTATUS_CC_IRQ (1 << 4)
163#define CAM_IRQSTATUS_MMU_IRQ (1 << 3)
164#define CAM_IRQSTATUS_DMA_IRQ2 (1 << 2)
165#define CAM_IRQSTATUS_DMA_IRQ1 (1 << 1)
166#define CAM_IRQSTATUS_DMA_IRQ0 (1 << 0)
167
168#define CAM_GPO_CAM_S_P_EN (1 << 1)
169#define CAM_GPO_CAM_CCP_MODE (1 << 0)
170
171#define CAM_GPI_CC_DMA_REQ1 (1 << 24)
172#define CAP_GPI_CC_DMA_REQ0 (1 << 23)
173#define CAP_GPI_CAM_MSTANDBY (1 << 21)
174#define CAP_GPI_CAM_WAIT (1 << 20)
175#define CAP_GPI_CAM_S_DATA (1 << 17)
176#define CAP_GPI_CAM_S_CLK (1 << 16)
177#define CAP_GPI_CAM_P_DATA (0xFFF << 3)
178#define CAP_GPI_CAM_P_DATA_SHIFT 3
179#define CAP_GPI_CAM_P_VS (1 << 2)
180#define CAP_GPI_CAM_P_HS (1 << 1)
181#define CAP_GPI_CAM_P_CLK (1 << 0)
182
183#define CC_REVISION_MAJOR (15 << 4)
184#define CC_REVISION_MAJOR_SHIFT 4
185#define CC_REVISION_MINOR (15 << 0)
186#define CC_REVISION_MINOR_SHIFT 0
187
188#define CC_SYSCONFIG_SIDLEMODE (3 << 3)
189#define CC_SYSCONFIG_SIDLEMODE_FIDLE (0 << 3)
190#define CC_SYSCONFIG_SIDLEMODE_NIDLE (1 << 3)
191#define CC_SYSCONFIG_SOFTRESET (1 << 1)
192#define CC_SYSCONFIG_AUTOIDLE (1 << 0)
193
194#define CC_SYSSTATUS_RESETDONE (1 << 0)
195
196#define CC_IRQSTATUS_FS_IRQ (1 << 19)
197#define CC_IRQSTATUS_LE_IRQ (1 << 18)
198#define CC_IRQSTATUS_LS_IRQ (1 << 17)
199#define CC_IRQSTATUS_FE_IRQ (1 << 16)
200#define CC_IRQSTATUS_FW_ERR_IRQ (1 << 10)
201#define CC_IRQSTATUS_FSC_ERR_IRQ (1 << 9)
202#define CC_IRQSTATUS_SSC_ERR_IRQ (1 << 8)
203#define CC_IRQSTATUS_FIFO_NOEMPTY_IRQ (1 << 4)
204#define CC_IRQSTATUS_FIFO_FULL_IRQ (1 << 3)
205#define CC_IRQSTATUS_FIFO_THR_IRQ (1 << 2)
206#define CC_IRQSTATUS_FIFO_OF_IRQ (1 << 1)
207#define CC_IRQSTATUS_FIFO_UF_IRQ (1 << 0)
208
209#define CC_IRQENABLE_FS_IRQ (1 << 19)
210#define CC_IRQENABLE_LE_IRQ (1 << 18)
211#define CC_IRQENABLE_LS_IRQ (1 << 17)
212#define CC_IRQENABLE_FE_IRQ (1 << 16)
213#define CC_IRQENABLE_FW_ERR_IRQ (1 << 10)
214#define CC_IRQENABLE_FSC_ERR_IRQ (1 << 9)
215#define CC_IRQENABLE_SSC_ERR_IRQ (1 << 8)
216#define CC_IRQENABLE_FIFO_NOEMPTY_IRQ (1 << 4)
217#define CC_IRQENABLE_FIFO_FULL_IRQ (1 << 3)
218#define CC_IRQENABLE_FIFO_THR_IRQ (1 << 2)
219#define CC_IRQENABLE_FIFO_OF_IRQ (1 << 1)
220#define CC_IRQENABLE_FIFO_UF_IRQ (1 << 0)
221
222#define CC_CTRL_CC_ONE_SHOT (1 << 20)
223#define CC_CTRL_CC_IF_SYNCHRO (1 << 19)
224#define CC_CTRL_CC_RST (1 << 18)
225#define CC_CTRL_CC_FRAME_TRIG (1 << 17)
226#define CC_CTRL_CC_EN (1 << 16)
227#define CC_CTRL_NOBT_SYNCHRO (1 << 13)
228#define CC_CTRL_BT_CORRECT (1 << 12)
229#define CC_CTRL_PAR_ORDERCAM (1 << 11)
230#define CC_CTRL_PAR_CLK_POL (1 << 10)
231#define CC_CTRL_NOBT_HS_POL (1 << 9)
232#define CC_CTRL_NOBT_VS_POL (1 << 8)
233#define CC_CTRL_PAR_MODE (7 << 1)
234#define CC_CTRL_PAR_MODE_SHIFT 1
235#define CC_CTRL_PAR_MODE_NOBT8 (0 << 1)
236#define CC_CTRL_PAR_MODE_NOBT10 (1 << 1)
237#define CC_CTRL_PAR_MODE_NOBT12 (2 << 1)
238#define CC_CTRL_PAR_MODE_BT8 (4 << 1)
239#define CC_CTRL_PAR_MODE_BT10 (5 << 1)
240#define CC_CTRL_PAR_MODE_FIFOTEST (7 << 1)
241#define CC_CTRL_CCP_MODE (1 << 0)
242
243#define CC_CTRL_DMA_EN (1 << 8)
244#define CC_CTRL_DMA_FIFO_THRESHOLD (0x7F << 0)
245#define CC_CTRL_DMA_FIFO_THRESHOLD_SHIFT 0
246
247#define CC_CTRL_XCLK_DIV (0x1F << 0)
248#define CC_CTRL_XCLK_DIV_SHIFT 0
249#define CC_CTRL_XCLK_DIV_STABLE_LOW (0 << 0)
250#define CC_CTRL_XCLK_DIV_STABLE_HIGH (1 << 0)
251#define CC_CTRL_XCLK_DIV_BYPASS (31 << 0)
252
253#define CC_TEST_FIFO_RD_POINTER (0xFF << 24)
254#define CC_TEST_FIFO_RD_POINTER_SHIFT 24
255#define CC_TEST_FIFO_WR_POINTER (0xFF << 16)
256#define CC_TEST_FIFO_WR_POINTER_SHIFT 16
257#define CC_TEST_FIFO_LEVEL (0xFF << 8)
258#define CC_TEST_FIFO_LEVEL_SHIFT 8
259#define CC_TEST_FIFO_LEVEL_PEAK (0xFF << 0)
260#define CC_TEST_FIFO_LEVEL_PEAK_SHIFT 0
261
262#define CC_GENPAR_FIFO_DEPTH (7 << 0)
263#define CC_GENPAR_FIFO_DEPTH_SHIFT 0
264
265#define CC_CCPDFR_ALPHA (0xFF << 8)
266#define CC_CCPDFR_ALPHA_SHIFT 8
267#define CC_CCPDFR_DATAFORMAT (15 << 0)
268#define CC_CCPDFR_DATAFORMAT_SHIFT 0
269#define CC_CCPDFR_DATAFORMAT_YUV422BE (0 << 0)
270#define CC_CCPDFR_DATAFORMAT_YUV422 (1 << 0)
271#define CC_CCPDFR_DATAFORMAT_YUV420 (2 << 0)
272#define CC_CCPDFR_DATAFORMAT_RGB444 (4 << 0)
273#define CC_CCPDFR_DATAFORMAT_RGB565 (5 << 0)
274#define CC_CCPDFR_DATAFORMAT_RGB888NDE (6 << 0)
275#define CC_CCPDFR_DATAFORMAT_RGB888 (7 << 0)
276#define CC_CCPDFR_DATAFORMAT_RAW8NDE (8 << 0)
277#define CC_CCPDFR_DATAFORMAT_RAW8 (9 << 0)
278#define CC_CCPDFR_DATAFORMAT_RAW10NDE (10 << 0)
279#define CC_CCPDFR_DATAFORMAT_RAW10 (11 << 0)
280#define CC_CCPDFR_DATAFORMAT_RAW12NDE (12 << 0)
281#define CC_CCPDFR_DATAFORMAT_RAW12 (13 << 0)
282#define CC_CCPDFR_DATAFORMAT_JPEG8 (15 << 0)
283
284#define CAMDMA_REVISION_MAJOR (15 << 4)
285#define CAMDMA_REVISION_MAJOR_SHIFT 4
286#define CAMDMA_REVISION_MINOR (15 << 0)
287#define CAMDMA_REVISION_MINOR_SHIFT 0
288
289#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE (3 << 12)
290#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_FSTANDBY (0 << 12)
291#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_NSTANDBY (1 << 12)
292#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_SSTANDBY (2 << 12)
293#define CAMDMA_OCP_SYSCONFIG_FUNC_CLOCK (1 << 9)
294#define CAMDMA_OCP_SYSCONFIG_OCP_CLOCK (1 << 8)
295#define CAMDMA_OCP_SYSCONFIG_EMUFREE (1 << 5)
296#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE (3 << 3)
297#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_FIDLE (0 << 3)
298#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_NIDLE (1 << 3)
299#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_SIDLE (2 << 3)
300#define CAMDMA_OCP_SYSCONFIG_SOFTRESET (1 << 1)
301#define CAMDMA_OCP_SYSCONFIG_AUTOIDLE (1 << 0)
302
303#define CAMDMA_SYSSTATUS_RESETDONE (1 << 0)
304
305#define CAMDMA_GCR_ARBITRATION_RATE (0xFF << 16)
306#define CAMDMA_GCR_ARBITRATION_RATE_SHIFT 16
307#define CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH (0xFF << 0)
308#define CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH_SHIFT 0
309
310#define CAMDMA_CCR_SEL_SRC_DST_SYNC (1 << 24)
311#define CAMDMA_CCR_PREFETCH (1 << 23)
312#define CAMDMA_CCR_SUPERVISOR (1 << 22)
313#define CAMDMA_CCR_SECURE (1 << 21)
314#define CAMDMA_CCR_BS (1 << 18)
315#define CAMDMA_CCR_TRANSPARENT_COPY_ENABLE (1 << 17)
316#define CAMDMA_CCR_CONSTANT_FILL_ENABLE (1 << 16)
317#define CAMDMA_CCR_DST_AMODE (3 << 14)
318#define CAMDMA_CCR_DST_AMODE_CONST_ADDR (0 << 14)
319#define CAMDMA_CCR_DST_AMODE_POST_INC (1 << 14)
320#define CAMDMA_CCR_DST_AMODE_SGL_IDX (2 << 14)
321#define CAMDMA_CCR_DST_AMODE_DBL_IDX (3 << 14)
322#define CAMDMA_CCR_SRC_AMODE (3 << 12)
323#define CAMDMA_CCR_SRC_AMODE_CONST_ADDR (0 << 12)
324#define CAMDMA_CCR_SRC_AMODE_POST_INC (1 << 12)
325#define CAMDMA_CCR_SRC_AMODE_SGL_IDX (2 << 12)
326#define CAMDMA_CCR_SRC_AMODE_DBL_IDX (3 << 12)
327#define CAMDMA_CCR_WR_ACTIVE (1 << 10)
328#define CAMDMA_CCR_RD_ACTIVE (1 << 9)
329#define CAMDMA_CCR_SUSPEND_SENSITIVE (1 << 8)
330#define CAMDMA_CCR_ENABLE (1 << 7)
331#define CAMDMA_CCR_PRIO (1 << 6)
332#define CAMDMA_CCR_FS (1 << 5)
333#define CAMDMA_CCR_SYNCHRO ((3 << 19) | (31 << 0))
334#define CAMDMA_CCR_SYNCHRO_CAMERA 0x01
335
336#define CAMDMA_CLNK_CTRL_ENABLE_LNK (1 << 15)
337#define CAMDMA_CLNK_CTRL_NEXTLCH_ID (0x1F << 0)
338#define CAMDMA_CLNK_CTRL_NEXTLCH_ID_SHIFT 0
339
340#define CAMDMA_CICR_MISALIGNED_ERR_IE (1 << 11)
341#define CAMDMA_CICR_SUPERVISOR_ERR_IE (1 << 10)
342#define CAMDMA_CICR_SECURE_ERR_IE (1 << 9)
343#define CAMDMA_CICR_TRANS_ERR_IE (1 << 8)
344#define CAMDMA_CICR_PACKET_IE (1 << 7)
345#define CAMDMA_CICR_BLOCK_IE (1 << 5)
346#define CAMDMA_CICR_LAST_IE (1 << 4)
347#define CAMDMA_CICR_FRAME_IE (1 << 3)
348#define CAMDMA_CICR_HALF_IE (1 << 2)
349#define CAMDMA_CICR_DROP_IE (1 << 1)
350
351#define CAMDMA_CSR_MISALIGNED_ERR (1 << 11)
352#define CAMDMA_CSR_SUPERVISOR_ERR (1 << 10)
353#define CAMDMA_CSR_SECURE_ERR (1 << 9)
354#define CAMDMA_CSR_TRANS_ERR (1 << 8)
355#define CAMDMA_CSR_PACKET (1 << 7)
356#define CAMDMA_CSR_SYNC (1 << 6)
357#define CAMDMA_CSR_BLOCK (1 << 5)
358#define CAMDMA_CSR_LAST (1 << 4)
359#define CAMDMA_CSR_FRAME (1 << 3)
360#define CAMDMA_CSR_HALF (1 << 2)
361#define CAMDMA_CSR_DROP (1 << 1)
362
363#define CAMDMA_CSDP_SRC_ENDIANNESS (1 << 21)
364#define CAMDMA_CSDP_SRC_ENDIANNESS_LOCK (1 << 20)
365#define CAMDMA_CSDP_DST_ENDIANNESS (1 << 19)
366#define CAMDMA_CSDP_DST_ENDIANNESS_LOCK (1 << 18)
367#define CAMDMA_CSDP_WRITE_MODE (3 << 16)
368#define CAMDMA_CSDP_WRITE_MODE_WRNP (0 << 16)
369#define CAMDMA_CSDP_WRITE_MODE_POSTED (1 << 16)
370#define CAMDMA_CSDP_WRITE_MODE_POSTED_LAST_WRNP (2 << 16)
371#define CAMDMA_CSDP_DST_BURST_EN (3 << 14)
372#define CAMDMA_CSDP_DST_BURST_EN_1 (0 << 14)
373#define CAMDMA_CSDP_DST_BURST_EN_16 (1 << 14)
374#define CAMDMA_CSDP_DST_BURST_EN_32 (2 << 14)
375#define CAMDMA_CSDP_DST_BURST_EN_64 (3 << 14)
376#define CAMDMA_CSDP_DST_PACKED (1 << 13)
377#define CAMDMA_CSDP_WR_ADD_TRSLT (15 << 9)
378#define CAMDMA_CSDP_WR_ADD_TRSLT_ENABLE_MREQADD (3 << 9)
379#define CAMDMA_CSDP_SRC_BURST_EN (3 << 7)
380#define CAMDMA_CSDP_SRC_BURST_EN_1 (0 << 7)
381#define CAMDMA_CSDP_SRC_BURST_EN_16 (1 << 7)
382#define CAMDMA_CSDP_SRC_BURST_EN_32 (2 << 7)
383#define CAMDMA_CSDP_SRC_BURST_EN_64 (3 << 7)
384#define CAMDMA_CSDP_SRC_PACKED (1 << 6)
385#define CAMDMA_CSDP_RD_ADD_TRSLT (15 << 2)
386#define CAMDMA_CSDP_RD_ADD_TRSLT_ENABLE_MREQADD (3 << 2)
387#define CAMDMA_CSDP_DATA_TYPE (3 << 0)
388#define CAMDMA_CSDP_DATA_TYPE_8BITS (0 << 0)
389#define CAMDMA_CSDP_DATA_TYPE_16BITS (1 << 0)
390#define CAMDMA_CSDP_DATA_TYPE_32BITS (2 << 0)
391
392#define CAMMMU_SYSCONFIG_AUTOIDLE (1 << 0)
393
394/*
395 *
396 * Declarations.
397 *
398 */
399
400/* forward declarations */
401struct omap24xxcam_sgdma;
402struct omap24xxcam_dma;
403
404typedef void (*sgdma_callback_t)(struct omap24xxcam_sgdma *cam,
405 u32 status, void *arg);
406typedef void (*dma_callback_t)(struct omap24xxcam_dma *cam,
407 u32 status, void *arg);
408
409struct channel_state {
410 dma_callback_t callback;
411 void *arg;
412};
413
414/* sgdma state for each of the possible videobuf_buffers + 2 overlays */
415struct sgdma_state {
416 const struct scatterlist *sglist;
417 int sglen; /* number of sglist entries */
418 int next_sglist; /* index of next sglist entry to process */
419 unsigned int bytes_read; /* number of bytes read */
420 unsigned int len; /* total length of sglist (excluding
421 * bytes due to page alignment) */
422 int queued_sglist; /* number of sglist entries queued for DMA */
423 u32 csr; /* DMA return code */
424 sgdma_callback_t callback;
425 void *arg;
426};
427
428/* physical DMA channel management */
429struct omap24xxcam_dma {
430 spinlock_t lock; /* Lock for the whole structure. */
431
432 unsigned long base; /* base address for dma controller */
433
434 /* While dma_stop!=0, an attempt to start a new DMA transfer will
435 * fail.
436 */
437 atomic_t dma_stop;
438 int free_dmach; /* number of dma channels free */
439 int next_dmach; /* index of next dma channel to use */
440 struct channel_state ch_state[NUM_CAMDMA_CHANNELS];
441};
442
443/* scatter-gather DMA (scatterlist stuff) management */
444struct omap24xxcam_sgdma {
445 struct omap24xxcam_dma dma;
446
447 spinlock_t lock; /* Lock for the fields below. */
448 int free_sgdma; /* number of free sg dma slots */
449 int next_sgdma; /* index of next sg dma slot to use */
450 struct sgdma_state sg_state[NUM_SG_DMA];
451
452 /* Reset timer data */
453 struct timer_list reset_timer;
454};
455
456/* per-device data structure */
457struct omap24xxcam_device {
458 /*** mutex ***/
459 /*
460 * mutex serialises access to this structure. Also camera
461 * opening and releasing is synchronised by this.
462 */
463 struct mutex mutex;
464
465 /*** general driver state information ***/
466 atomic_t users;
467 /*
468 * Lock to serialise core enabling and disabling and access to
469 * sgdma_in_queue.
470 */
471 spinlock_t core_enable_disable_lock;
472 /*
473 * Number or sgdma requests in scatter-gather queue, protected
474 * by the lock above.
475 */
476 int sgdma_in_queue;
477 /*
478 * Sensor interface parameters: interface type, CC_CTRL
479 * register value and interface specific data.
480 */
481 int if_type;
482 union {
483 struct parallel {
484 u32 xclk;
485 } bt656;
486 } if_u;
487 u32 cc_ctrl;
488
489 /*** subsystem structures ***/
490 struct omap24xxcam_sgdma sgdma;
491
492 /*** hardware resources ***/
493 unsigned int irq;
494 unsigned long mmio_base;
495 unsigned long mmio_base_phys;
496 unsigned long mmio_size;
497
498 /*** interfaces and device ***/
499 struct v4l2_int_device *sdev;
500 struct device *dev;
501 struct video_device *vfd;
502
503 /*** camera and sensor reset related stuff ***/
504 struct work_struct sensor_reset_work;
505 /*
506 * We're in the middle of a reset. Don't enable core if this
507 * is non-zero! This exists to help decisionmaking in a case
508 * where videobuf_qbuf is called while we are in the middle of
509 * a reset.
510 */
511 atomic_t in_reset;
512 /*
513 * Non-zero if we don't want any resets for now. Used to
514 * prevent reset work to run when we're about to stop
515 * streaming.
516 */
517 atomic_t reset_disable;
518
519 /*** video device parameters ***/
520 int capture_mem;
521
522 /*** camera module clocks ***/
523 struct clk *fck;
524 struct clk *ick;
525
526 /*** capture data ***/
527 /* file handle, if streaming is on */
528 struct file *streaming;
529};
530
531/* Per-file handle data. */
532struct omap24xxcam_fh {
533 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
534 struct videobuf_queue vbq;
535 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
536 atomic_t field_count; /* field counter for videobuf_buffer */
537 /* accessing cam here doesn't need serialisation: it's constant */
538 struct omap24xxcam_device *cam;
539};
540
541/*
542 *
543 * Register I/O functions.
544 *
545 */
546
547static inline u32 omap24xxcam_reg_in(unsigned long base, u32 offset)
548{
549 return readl(base + offset);
550}
551
552static inline u32 omap24xxcam_reg_out(unsigned long base, u32 offset,
553 u32 val)
554{
555 writel(val, base + offset);
556 return val;
557}
558
559static inline u32 omap24xxcam_reg_merge(unsigned long base, u32 offset,
560 u32 val, u32 mask)
561{
562 u32 addr = base + offset;
563 u32 new_val = (readl(addr) & ~mask) | (val & mask);
564
565 writel(new_val, addr);
566 return new_val;
567}
568
569/*
570 *
571 * Function prototypes.
572 *
573 */
574
575/* dma prototypes */
576
577void omap24xxcam_dma_hwinit(struct omap24xxcam_dma *dma);
578void omap24xxcam_dma_isr(struct omap24xxcam_dma *dma);
579
580/* sgdma prototypes */
581
582void omap24xxcam_sgdma_process(struct omap24xxcam_sgdma *sgdma);
583int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
584 const struct scatterlist *sglist, int sglen,
585 int len, sgdma_callback_t callback, void *arg);
586void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma);
587void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
588 unsigned long base,
589 void (*reset_callback)(unsigned long data),
590 unsigned long reset_callback_data);
591void omap24xxcam_sgdma_exit(struct omap24xxcam_sgdma *sgdma);
592
593#endif
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 210f1240b331..6ee9b69cc4a9 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -4011,8 +4011,7 @@ ov51x_v4l1_close(struct inode *inode, struct file *file)
4011 4011
4012/* Do not call this function directly! */ 4012/* Do not call this function directly! */
4013static int 4013static int
4014ov51x_v4l1_ioctl_internal(struct inode *inode, struct file *file, 4014ov51x_v4l1_ioctl_internal(struct file *file, unsigned int cmd, void *arg)
4015 unsigned int cmd, void *arg)
4016{ 4015{
4017 struct video_device *vdev = file->private_data; 4016 struct video_device *vdev = file->private_data;
4018 struct usb_ov511 *ov = video_get_drvdata(vdev); 4017 struct usb_ov511 *ov = video_get_drvdata(vdev);
@@ -4461,7 +4460,7 @@ ov51x_v4l1_ioctl(struct inode *inode, struct file *file,
4461 if (mutex_lock_interruptible(&ov->lock)) 4460 if (mutex_lock_interruptible(&ov->lock))
4462 return -EINTR; 4461 return -EINTR;
4463 4462
4464 rc = video_usercopy(inode, file, cmd, arg, ov51x_v4l1_ioctl_internal); 4463 rc = video_usercopy(file, cmd, arg, ov51x_v4l1_ioctl_internal);
4465 4464
4466 mutex_unlock(&ov->lock); 4465 mutex_unlock(&ov->lock);
4467 return rc; 4466 return rc;
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
new file mode 100644
index 000000000000..54b736fcc07a
--- /dev/null
+++ b/drivers/media/video/ov772x.c
@@ -0,0 +1,1012 @@
1/*
2 * ov772x Camera Driver
3 *
4 * Copyright (C) 2008 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * Based on ov7670 and soc_camera_platform driver,
8 *
9 * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
10 * Copyright (C) 2008 Magnus Damm
11 * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/i2c.h>
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/videodev2.h>
24#include <media/v4l2-chip-ident.h>
25#include <media/v4l2-common.h>
26#include <media/soc_camera.h>
27#include <media/ov772x.h>
28
29/*
30 * register offset
31 */
32#define GAIN 0x00 /* AGC - Gain control gain setting */
33#define BLUE 0x01 /* AWB - Blue channel gain setting */
34#define RED 0x02 /* AWB - Red channel gain setting */
35#define GREEN 0x03 /* AWB - Green channel gain setting */
36#define COM1 0x04 /* Common control 1 */
37#define BAVG 0x05 /* U/B Average Level */
38#define GAVG 0x06 /* Y/Gb Average Level */
39#define RAVG 0x07 /* V/R Average Level */
40#define AECH 0x08 /* Exposure Value - AEC MSBs */
41#define COM2 0x09 /* Common control 2 */
42#define PID 0x0A /* Product ID Number MSB */
43#define VER 0x0B /* Product ID Number LSB */
44#define COM3 0x0C /* Common control 3 */
45#define COM4 0x0D /* Common control 4 */
46#define COM5 0x0E /* Common control 5 */
47#define COM6 0x0F /* Common control 6 */
48#define AEC 0x10 /* Exposure Value */
49#define CLKRC 0x11 /* Internal clock */
50#define COM7 0x12 /* Common control 7 */
51#define COM8 0x13 /* Common control 8 */
52#define COM9 0x14 /* Common control 9 */
53#define COM10 0x15 /* Common control 10 */
54#define REG16 0x16 /* Register 16 */
55#define HSTART 0x17 /* Horizontal sensor size */
56#define HSIZE 0x18 /* Horizontal frame (HREF column) end high 8-bit */
57#define VSTART 0x19 /* Vertical frame (row) start high 8-bit */
58#define VSIZE 0x1A /* Vertical sensor size */
59#define PSHFT 0x1B /* Data format - pixel delay select */
60#define MIDH 0x1C /* Manufacturer ID byte - high */
61#define MIDL 0x1D /* Manufacturer ID byte - low */
62#define LAEC 0x1F /* Fine AEC value */
63#define COM11 0x20 /* Common control 11 */
64#define BDBASE 0x22 /* Banding filter Minimum AEC value */
65#define DBSTEP 0x23 /* Banding filter Maximum Setp */
66#define AEW 0x24 /* AGC/AEC - Stable operating region (upper limit) */
67#define AEB 0x25 /* AGC/AEC - Stable operating region (lower limit) */
68#define VPT 0x26 /* AGC/AEC Fast mode operating region */
69#define REG28 0x28 /* Register 28 */
70#define HOUTSIZE 0x29 /* Horizontal data output size MSBs */
71#define EXHCH 0x2A /* Dummy pixel insert MSB */
72#define EXHCL 0x2B /* Dummy pixel insert LSB */
73#define VOUTSIZE 0x2C /* Vertical data output size MSBs */
74#define ADVFL 0x2D /* LSB of insert dummy lines in Vertical direction */
75#define ADVFH 0x2E /* MSG of insert dummy lines in Vertical direction */
76#define YAVE 0x2F /* Y/G Channel Average value */
77#define LUMHTH 0x30 /* Histogram AEC/AGC Luminance high level threshold */
78#define LUMLTH 0x31 /* Histogram AEC/AGC Luminance low level threshold */
79#define HREF 0x32 /* Image start and size control */
80#define DM_LNL 0x33 /* Dummy line low 8 bits */
81#define DM_LNH 0x34 /* Dummy line high 8 bits */
82#define ADOFF_B 0x35 /* AD offset compensation value for B channel */
83#define ADOFF_R 0x36 /* AD offset compensation value for R channel */
84#define ADOFF_GB 0x37 /* AD offset compensation value for Gb channel */
85#define ADOFF_GR 0x38 /* AD offset compensation value for Gr channel */
86#define OFF_B 0x39 /* Analog process B channel offset value */
87#define OFF_R 0x3A /* Analog process R channel offset value */
88#define OFF_GB 0x3B /* Analog process Gb channel offset value */
89#define OFF_GR 0x3C /* Analog process Gr channel offset value */
90#define COM12 0x3D /* Common control 12 */
91#define COM13 0x3E /* Common control 13 */
92#define COM14 0x3F /* Common control 14 */
93#define COM15 0x40 /* Common control 15*/
94#define COM16 0x41 /* Common control 16 */
95#define TGT_B 0x42 /* BLC blue channel target value */
96#define TGT_R 0x43 /* BLC red channel target value */
97#define TGT_GB 0x44 /* BLC Gb channel target value */
98#define TGT_GR 0x45 /* BLC Gr channel target value */
99/* for ov7720 */
100#define LCC0 0x46 /* Lens correction control 0 */
101#define LCC1 0x47 /* Lens correction option 1 - X coordinate */
102#define LCC2 0x48 /* Lens correction option 2 - Y coordinate */
103#define LCC3 0x49 /* Lens correction option 3 */
104#define LCC4 0x4A /* Lens correction option 4 - radius of the circular */
105#define LCC5 0x4B /* Lens correction option 5 */
106#define LCC6 0x4C /* Lens correction option 6 */
107/* for ov7725 */
108#define LC_CTR 0x46 /* Lens correction control */
109#define LC_XC 0x47 /* X coordinate of lens correction center relative */
110#define LC_YC 0x48 /* Y coordinate of lens correction center relative */
111#define LC_COEF 0x49 /* Lens correction coefficient */
112#define LC_RADI 0x4A /* Lens correction radius */
113#define LC_COEFB 0x4B /* Lens B channel compensation coefficient */
114#define LC_COEFR 0x4C /* Lens R channel compensation coefficient */
115
116#define FIXGAIN 0x4D /* Analog fix gain amplifer */
117#define AREF0 0x4E /* Sensor reference control */
118#define AREF1 0x4F /* Sensor reference current control */
119#define AREF2 0x50 /* Analog reference control */
120#define AREF3 0x51 /* ADC reference control */
121#define AREF4 0x52 /* ADC reference control */
122#define AREF5 0x53 /* ADC reference control */
123#define AREF6 0x54 /* Analog reference control */
124#define AREF7 0x55 /* Analog reference control */
125#define UFIX 0x60 /* U channel fixed value output */
126#define VFIX 0x61 /* V channel fixed value output */
127#define AWBB_BLK 0x62 /* AWB option for advanced AWB */
128#define AWB_CTRL0 0x63 /* AWB control byte 0 */
129#define DSP_CTRL1 0x64 /* DSP control byte 1 */
130#define DSP_CTRL2 0x65 /* DSP control byte 2 */
131#define DSP_CTRL3 0x66 /* DSP control byte 3 */
132#define DSP_CTRL4 0x67 /* DSP control byte 4 */
133#define AWB_BIAS 0x68 /* AWB BLC level clip */
134#define AWB_CTRL1 0x69 /* AWB control 1 */
135#define AWB_CTRL2 0x6A /* AWB control 2 */
136#define AWB_CTRL3 0x6B /* AWB control 3 */
137#define AWB_CTRL4 0x6C /* AWB control 4 */
138#define AWB_CTRL5 0x6D /* AWB control 5 */
139#define AWB_CTRL6 0x6E /* AWB control 6 */
140#define AWB_CTRL7 0x6F /* AWB control 7 */
141#define AWB_CTRL8 0x70 /* AWB control 8 */
142#define AWB_CTRL9 0x71 /* AWB control 9 */
143#define AWB_CTRL10 0x72 /* AWB control 10 */
144#define AWB_CTRL11 0x73 /* AWB control 11 */
145#define AWB_CTRL12 0x74 /* AWB control 12 */
146#define AWB_CTRL13 0x75 /* AWB control 13 */
147#define AWB_CTRL14 0x76 /* AWB control 14 */
148#define AWB_CTRL15 0x77 /* AWB control 15 */
149#define AWB_CTRL16 0x78 /* AWB control 16 */
150#define AWB_CTRL17 0x79 /* AWB control 17 */
151#define AWB_CTRL18 0x7A /* AWB control 18 */
152#define AWB_CTRL19 0x7B /* AWB control 19 */
153#define AWB_CTRL20 0x7C /* AWB control 20 */
154#define AWB_CTRL21 0x7D /* AWB control 21 */
155#define GAM1 0x7E /* Gamma Curve 1st segment input end point */
156#define GAM2 0x7F /* Gamma Curve 2nd segment input end point */
157#define GAM3 0x80 /* Gamma Curve 3rd segment input end point */
158#define GAM4 0x81 /* Gamma Curve 4th segment input end point */
159#define GAM5 0x82 /* Gamma Curve 5th segment input end point */
160#define GAM6 0x83 /* Gamma Curve 6th segment input end point */
161#define GAM7 0x84 /* Gamma Curve 7th segment input end point */
162#define GAM8 0x85 /* Gamma Curve 8th segment input end point */
163#define GAM9 0x86 /* Gamma Curve 9th segment input end point */
164#define GAM10 0x87 /* Gamma Curve 10th segment input end point */
165#define GAM11 0x88 /* Gamma Curve 11th segment input end point */
166#define GAM12 0x89 /* Gamma Curve 12th segment input end point */
167#define GAM13 0x8A /* Gamma Curve 13th segment input end point */
168#define GAM14 0x8B /* Gamma Curve 14th segment input end point */
169#define GAM15 0x8C /* Gamma Curve 15th segment input end point */
170#define SLOP 0x8D /* Gamma curve highest segment slope */
171#define DNSTH 0x8E /* De-noise threshold */
172#define EDGE0 0x8F /* Edge enhancement control 0 */
173#define EDGE1 0x90 /* Edge enhancement control 1 */
174#define DNSOFF 0x91 /* Auto De-noise threshold control */
175#define EDGE2 0x92 /* Edge enhancement strength low point control */
176#define EDGE3 0x93 /* Edge enhancement strength high point control */
177#define MTX1 0x94 /* Matrix coefficient 1 */
178#define MTX2 0x95 /* Matrix coefficient 2 */
179#define MTX3 0x96 /* Matrix coefficient 3 */
180#define MTX4 0x97 /* Matrix coefficient 4 */
181#define MTX5 0x98 /* Matrix coefficient 5 */
182#define MTX6 0x99 /* Matrix coefficient 6 */
183#define MTX_CTRL 0x9A /* Matrix control */
184#define BRIGHT 0x9B /* Brightness control */
185#define CNTRST 0x9C /* Contrast contrast */
186#define CNTRST_CTRL 0x9D /* Contrast contrast center */
187#define UVAD_J0 0x9E /* Auto UV adjust contrast 0 */
188#define UVAD_J1 0x9F /* Auto UV adjust contrast 1 */
189#define SCAL0 0xA0 /* Scaling control 0 */
190#define SCAL1 0xA1 /* Scaling control 1 */
191#define SCAL2 0xA2 /* Scaling control 2 */
192#define FIFODLYM 0xA3 /* FIFO manual mode delay control */
193#define FIFODLYA 0xA4 /* FIFO auto mode delay control */
194#define SDE 0xA6 /* Special digital effect control */
195#define USAT 0xA7 /* U component saturation control */
196#define VSAT 0xA8 /* V component saturation control */
197/* for ov7720 */
198#define HUE0 0xA9 /* Hue control 0 */
199#define HUE1 0xAA /* Hue control 1 */
200/* for ov7725 */
201#define HUECOS 0xA9 /* Cosine value */
202#define HUESIN 0xAA /* Sine value */
203
204#define SIGN 0xAB /* Sign bit for Hue and contrast */
205#define DSPAUTO 0xAC /* DSP auto function ON/OFF control */
206
207/*
208 * register detail
209 */
210
211/* COM2 */
212#define SOFT_SLEEP_MODE 0x10 /* Soft sleep mode */
213 /* Output drive capability */
214#define OCAP_1x 0x00 /* 1x */
215#define OCAP_2x 0x01 /* 2x */
216#define OCAP_3x 0x02 /* 3x */
217#define OCAP_4x 0x03 /* 4x */
218
219/* COM3 */
220#define SWAP_MASK 0x38
221
222#define VFIMG_ON_OFF 0x80 /* Vertical flip image ON/OFF selection */
223#define HMIMG_ON_OFF 0x40 /* Horizontal mirror image ON/OFF selection */
224#define SWAP_RGB 0x20 /* Swap B/R output sequence in RGB mode */
225#define SWAP_YUV 0x10 /* Swap Y/UV output sequence in YUV mode */
226#define SWAP_ML 0x08 /* Swap output MSB/LSB */
227 /* Tri-state option for output clock */
228#define NOTRI_CLOCK 0x04 /* 0: Tri-state at this period */
229 /* 1: No tri-state at this period */
230 /* Tri-state option for output data */
231#define NOTRI_DATA 0x02 /* 0: Tri-state at this period */
232 /* 1: No tri-state at this period */
233#define SCOLOR_TEST 0x01 /* Sensor color bar test pattern */
234
235/* COM4 */
236 /* PLL frequency control */
237#define PLL_BYPASS 0x00 /* 00: Bypass PLL */
238#define PLL_4x 0x40 /* 01: PLL 4x */
239#define PLL_6x 0x80 /* 10: PLL 6x */
240#define PLL_8x 0xc0 /* 11: PLL 8x */
241 /* AEC evaluate window */
242#define AEC_FULL 0x00 /* 00: Full window */
243#define AEC_1p2 0x10 /* 01: 1/2 window */
244#define AEC_1p4 0x20 /* 10: 1/4 window */
245#define AEC_2p3 0x30 /* 11: Low 2/3 window */
246
247/* COM5 */
248#define AFR_ON_OFF 0x80 /* Auto frame rate control ON/OFF selection */
249#define AFR_SPPED 0x40 /* Auto frame rate control speed slection */
250 /* Auto frame rate max rate control */
251#define AFR_NO_RATE 0x00 /* No reduction of frame rate */
252#define AFR_1p2 0x10 /* Max reduction to 1/2 frame rate */
253#define AFR_1p4 0x20 /* Max reduction to 1/4 frame rate */
254#define AFR_1p8 0x30 /* Max reduction to 1/8 frame rate */
255 /* Auto frame rate active point control */
256#define AF_2x 0x00 /* Add frame when AGC reaches 2x gain */
257#define AF_4x 0x04 /* Add frame when AGC reaches 4x gain */
258#define AF_8x 0x08 /* Add frame when AGC reaches 8x gain */
259#define AF_16x 0x0c /* Add frame when AGC reaches 16x gain */
260 /* AEC max step control */
261#define AEC_NO_LIMIT 0x01 /* 0 : AEC incease step has limit */
262 /* 1 : No limit to AEC increase step */
263
264/* COM7 */
265 /* SCCB Register Reset */
266#define SCCB_RESET 0x80 /* 0 : No change */
267 /* 1 : Resets all registers to default */
268 /* Resolution selection */
269#define SLCT_MASK 0x40 /* Mask of VGA or QVGA */
270#define SLCT_VGA 0x00 /* 0 : VGA */
271#define SLCT_QVGA 0x40 /* 1 : QVGA */
272#define ITU656_ON_OFF 0x20 /* ITU656 protocol ON/OFF selection */
273 /* RGB output format control */
274#define FMT_GBR422 0x00 /* 00 : GBR 4:2:2 */
275#define FMT_RGB565 0x04 /* 01 : RGB 565 */
276#define FMT_RGB555 0x08 /* 10 : RGB 555 */
277#define FMT_RGB444 0x0c /* 11 : RGB 444 */
278 /* Output format control */
279#define OFMT_YUV 0x00 /* 00 : YUV */
280#define OFMT_P_BRAW 0x01 /* 01 : Processed Bayer RAW */
281#define OFMT_RGB 0x02 /* 10 : RGB */
282#define OFMT_BRAW 0x03 /* 11 : Bayer RAW */
283
284/* COM8 */
285#define FAST_ALGO 0x80 /* Enable fast AGC/AEC algorithm */
286 /* AEC Setp size limit */
287#define UNLMT_STEP 0x40 /* 0 : Step size is limited */
288 /* 1 : Unlimited step size */
289#define BNDF_ON_OFF 0x20 /* Banding filter ON/OFF */
290#define AEC_BND 0x10 /* Enable AEC below banding value */
291#define AEC_ON_OFF 0x08 /* Fine AEC ON/OFF control */
292#define AGC_ON 0x04 /* AGC Enable */
293#define AWB_ON 0x02 /* AWB Enable */
294#define AEC_ON 0x01 /* AEC Enable */
295
296/* COM9 */
297#define BASE_AECAGC 0x80 /* Histogram or average based AEC/AGC */
298 /* Automatic gain ceiling - maximum AGC value */
299#define GAIN_2x 0x00 /* 000 : 2x */
300#define GAIN_4x 0x10 /* 001 : 4x */
301#define GAIN_8x 0x20 /* 010 : 8x */
302#define GAIN_16x 0x30 /* 011 : 16x */
303#define GAIN_32x 0x40 /* 100 : 32x */
304#define GAIN_64x 0x50 /* 101 : 64x */
305#define GAIN_128x 0x60 /* 110 : 128x */
306#define DROP_VSYNC 0x04 /* Drop VSYNC output of corrupt frame */
307#define DROP_HREF 0x02 /* Drop HREF output of corrupt frame */
308
309/* COM11 */
310#define SGLF_ON_OFF 0x02 /* Single frame ON/OFF selection */
311#define SGLF_TRIG 0x01 /* Single frame transfer trigger */
312
313/* EXHCH */
314#define VSIZE_LSB 0x04 /* Vertical data output size LSB */
315
316/* DSP_CTRL1 */
317#define FIFO_ON 0x80 /* FIFO enable/disable selection */
318#define UV_ON_OFF 0x40 /* UV adjust function ON/OFF selection */
319#define YUV444_2_422 0x20 /* YUV444 to 422 UV channel option selection */
320#define CLR_MTRX_ON_OFF 0x10 /* Color matrix ON/OFF selection */
321#define INTPLT_ON_OFF 0x08 /* Interpolation ON/OFF selection */
322#define GMM_ON_OFF 0x04 /* Gamma function ON/OFF selection */
323#define AUTO_BLK_ON_OFF 0x02 /* Black defect auto correction ON/OFF */
324#define AUTO_WHT_ON_OFF 0x01 /* White define auto correction ON/OFF */
325
326/* DSP_CTRL3 */
327#define UV_MASK 0x80 /* UV output sequence option */
328#define UV_ON 0x80 /* ON */
329#define UV_OFF 0x00 /* OFF */
330#define CBAR_MASK 0x20 /* DSP Color bar mask */
331#define CBAR_ON 0x20 /* ON */
332#define CBAR_OFF 0x00 /* OFF */
333
334/* HSTART */
335#define HST_VGA 0x23
336#define HST_QVGA 0x3F
337
338/* HSIZE */
339#define HSZ_VGA 0xA0
340#define HSZ_QVGA 0x50
341
342/* VSTART */
343#define VST_VGA 0x07
344#define VST_QVGA 0x03
345
346/* VSIZE */
347#define VSZ_VGA 0xF0
348#define VSZ_QVGA 0x78
349
350/* HOUTSIZE */
351#define HOSZ_VGA 0xA0
352#define HOSZ_QVGA 0x50
353
354/* VOUTSIZE */
355#define VOSZ_VGA 0xF0
356#define VOSZ_QVGA 0x78
357
358/*
359 * bit configure (32 bit)
360 * this is used in struct ov772x_color_format :: option
361 */
362#define OP_UV 0x00000001
363#define OP_SWAP_RGB 0x00000002
364
365/*
366 * ID
367 */
368#define OV7720 0x7720
369#define OV7725 0x7721
370#define VERSION(pid, ver) ((pid<<8)|(ver&0xFF))
371
372/*
373 * struct
374 */
375struct regval_list {
376 unsigned char reg_num;
377 unsigned char value;
378};
379
380struct ov772x_color_format {
381 char *name;
382 __u32 fourcc;
383 const struct regval_list *regs;
384 unsigned int option;
385};
386
387struct ov772x_win_size {
388 char *name;
389 __u32 width;
390 __u32 height;
391 unsigned char com7_bit;
392 const struct regval_list *regs;
393};
394
395struct ov772x_priv {
396 struct ov772x_camera_info *info;
397 struct i2c_client *client;
398 struct soc_camera_device icd;
399 const struct ov772x_color_format *fmt;
400 const struct ov772x_win_size *win;
401 int model;
402};
403
404#define ENDMARKER { 0xff, 0xff }
405
406/*
407 * register setting for color format
408 */
409static const struct regval_list ov772x_RGB555_regs[] = {
410 { COM3, 0x00 },
411 { COM7, FMT_RGB555 | OFMT_RGB },
412 ENDMARKER,
413};
414
415static const struct regval_list ov772x_RGB565_regs[] = {
416 { COM3, 0x00 },
417 { COM7, FMT_RGB565 | OFMT_RGB },
418 ENDMARKER,
419};
420
421static const struct regval_list ov772x_YYUV_regs[] = {
422 { COM3, SWAP_YUV },
423 { COM7, OFMT_YUV },
424 ENDMARKER,
425};
426
427static const struct regval_list ov772x_UVYY_regs[] = {
428 { COM3, 0x00 },
429 { COM7, OFMT_YUV },
430 ENDMARKER,
431};
432
433
434/*
435 * register setting for window size
436 */
437static const struct regval_list ov772x_qvga_regs[] = {
438 { HSTART, HST_QVGA },
439 { HSIZE, HSZ_QVGA },
440 { VSTART, VST_QVGA },
441 { VSIZE, VSZ_QVGA },
442 { HOUTSIZE, HOSZ_QVGA },
443 { VOUTSIZE, VOSZ_QVGA },
444 ENDMARKER,
445};
446
447static const struct regval_list ov772x_vga_regs[] = {
448 { HSTART, HST_VGA },
449 { HSIZE, HSZ_VGA },
450 { VSTART, VST_VGA },
451 { VSIZE, VSZ_VGA },
452 { HOUTSIZE, HOSZ_VGA },
453 { VOUTSIZE, VOSZ_VGA },
454 ENDMARKER,
455};
456
457/*
458 * supported format list
459 */
460
461#define SETFOURCC(type) .name = (#type), .fourcc = (V4L2_PIX_FMT_ ## type)
462static const struct soc_camera_data_format ov772x_fmt_lists[] = {
463 {
464 SETFOURCC(YUYV),
465 .depth = 16,
466 .colorspace = V4L2_COLORSPACE_JPEG,
467 },
468 {
469 SETFOURCC(YVYU),
470 .depth = 16,
471 .colorspace = V4L2_COLORSPACE_JPEG,
472 },
473 {
474 SETFOURCC(UYVY),
475 .depth = 16,
476 .colorspace = V4L2_COLORSPACE_JPEG,
477 },
478 {
479 SETFOURCC(RGB555),
480 .depth = 16,
481 .colorspace = V4L2_COLORSPACE_SRGB,
482 },
483 {
484 SETFOURCC(RGB555X),
485 .depth = 16,
486 .colorspace = V4L2_COLORSPACE_SRGB,
487 },
488 {
489 SETFOURCC(RGB565),
490 .depth = 16,
491 .colorspace = V4L2_COLORSPACE_SRGB,
492 },
493 {
494 SETFOURCC(RGB565X),
495 .depth = 16,
496 .colorspace = V4L2_COLORSPACE_SRGB,
497 },
498};
499
500/*
501 * color format list
502 */
503#define T_YUYV 0
504static const struct ov772x_color_format ov772x_cfmts[] = {
505 [T_YUYV] = {
506 SETFOURCC(YUYV),
507 .regs = ov772x_YYUV_regs,
508 },
509 {
510 SETFOURCC(YVYU),
511 .regs = ov772x_YYUV_regs,
512 .option = OP_UV,
513 },
514 {
515 SETFOURCC(UYVY),
516 .regs = ov772x_UVYY_regs,
517 },
518 {
519 SETFOURCC(RGB555),
520 .regs = ov772x_RGB555_regs,
521 .option = OP_SWAP_RGB,
522 },
523 {
524 SETFOURCC(RGB555X),
525 .regs = ov772x_RGB555_regs,
526 },
527 {
528 SETFOURCC(RGB565),
529 .regs = ov772x_RGB565_regs,
530 .option = OP_SWAP_RGB,
531 },
532 {
533 SETFOURCC(RGB565X),
534 .regs = ov772x_RGB565_regs,
535 },
536};
537
538
539/*
540 * window size list
541 */
542#define VGA_WIDTH 640
543#define VGA_HEIGHT 480
544#define QVGA_WIDTH 320
545#define QVGA_HEIGHT 240
546#define MAX_WIDTH VGA_WIDTH
547#define MAX_HEIGHT VGA_HEIGHT
548
549static const struct ov772x_win_size ov772x_win_vga = {
550 .name = "VGA",
551 .width = VGA_WIDTH,
552 .height = VGA_HEIGHT,
553 .com7_bit = SLCT_VGA,
554 .regs = ov772x_vga_regs,
555};
556
557static const struct ov772x_win_size ov772x_win_qvga = {
558 .name = "QVGA",
559 .width = QVGA_WIDTH,
560 .height = QVGA_HEIGHT,
561 .com7_bit = SLCT_QVGA,
562 .regs = ov772x_qvga_regs,
563};
564
565
566/*
567 * general function
568 */
569
570static int ov772x_write_array(struct i2c_client *client,
571 const struct regval_list *vals)
572{
573 while (vals->reg_num != 0xff) {
574 int ret = i2c_smbus_write_byte_data(client,
575 vals->reg_num,
576 vals->value);
577 if (ret < 0)
578 return ret;
579 vals++;
580 }
581 return 0;
582}
583
584static int ov772x_mask_set(struct i2c_client *client,
585 u8 command,
586 u8 mask,
587 u8 set)
588{
589 s32 val = i2c_smbus_read_byte_data(client, command);
590 val &= ~mask;
591 val |= set;
592
593 return i2c_smbus_write_byte_data(client, command, val);
594}
595
596static int ov772x_reset(struct i2c_client *client)
597{
598 int ret = i2c_smbus_write_byte_data(client, COM7, SCCB_RESET);
599 msleep(1);
600 return ret;
601}
602
603/*
604 * soc_camera_ops function
605 */
606
607static int ov772x_init(struct soc_camera_device *icd)
608{
609 struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd);
610 int ret = 0;
611
612 if (priv->info->link.power) {
613 ret = priv->info->link.power(&priv->client->dev, 1);
614 if (ret < 0)
615 return ret;
616 }
617
618 if (priv->info->link.reset)
619 ret = priv->info->link.reset(&priv->client->dev);
620
621 return ret;
622}
623
624static int ov772x_release(struct soc_camera_device *icd)
625{
626 struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd);
627 int ret = 0;
628
629 if (priv->info->link.power)
630 ret = priv->info->link.power(&priv->client->dev, 0);
631
632 return ret;
633}
634
635static int ov772x_start_capture(struct soc_camera_device *icd)
636{
637 struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd);
638 int ret;
639
640 if (!priv->win)
641 priv->win = &ov772x_win_vga;
642 if (!priv->fmt)
643 priv->fmt = &ov772x_cfmts[T_YUYV];
644
645 /*
646 * reset hardware
647 */
648 ov772x_reset(priv->client);
649
650 /*
651 * set color format
652 */
653 ret = ov772x_write_array(priv->client, priv->fmt->regs);
654 if (ret < 0)
655 goto start_end;
656
657 /*
658 * set size format
659 */
660 ret = ov772x_write_array(priv->client, priv->win->regs);
661 if (ret < 0)
662 goto start_end;
663
664 /*
665 * set COM7 bit ( QVGA or VGA )
666 */
667 ret = ov772x_mask_set(priv->client,
668 COM7, SLCT_MASK, priv->win->com7_bit);
669 if (ret < 0)
670 goto start_end;
671
672 /*
673 * set UV setting
674 */
675 if (priv->fmt->option & OP_UV) {
676 ret = ov772x_mask_set(priv->client,
677 DSP_CTRL3, UV_MASK, UV_ON);
678 if (ret < 0)
679 goto start_end;
680 }
681
682 /*
683 * set SWAP setting
684 */
685 if (priv->fmt->option & OP_SWAP_RGB) {
686 ret = ov772x_mask_set(priv->client,
687 COM3, SWAP_MASK, SWAP_RGB);
688 if (ret < 0)
689 goto start_end;
690 }
691
692 dev_dbg(&icd->dev,
693 "format %s, win %s\n", priv->fmt->name, priv->win->name);
694
695start_end:
696 priv->fmt = NULL;
697 priv->win = NULL;
698
699 return ret;
700}
701
702static int ov772x_stop_capture(struct soc_camera_device *icd)
703{
704 struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd);
705 ov772x_reset(priv->client);
706 return 0;
707}
708
709static int ov772x_set_bus_param(struct soc_camera_device *icd,
710 unsigned long flags)
711{
712 return 0;
713}
714
715static unsigned long ov772x_query_bus_param(struct soc_camera_device *icd)
716{
717 struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd);
718 struct soc_camera_link *icl = priv->client->dev.platform_data;
719 unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER |
720 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH |
721 priv->info->buswidth;
722
723 return soc_camera_apply_sensor_flags(icl, flags);
724}
725
726static int ov772x_get_chip_id(struct soc_camera_device *icd,
727 struct v4l2_chip_ident *id)
728{
729 struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd);
730
731 id->ident = priv->model;
732 id->revision = 0;
733
734 return 0;
735}
736
737#ifdef CONFIG_VIDEO_ADV_DEBUG
738static int ov772x_get_register(struct soc_camera_device *icd,
739 struct v4l2_register *reg)
740{
741 struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd);
742 int ret;
743
744 if (reg->reg > 0xff)
745 return -EINVAL;
746
747 ret = i2c_smbus_read_byte_data(priv->client, reg->reg);
748 if (ret < 0)
749 return ret;
750
751 reg->val = (__u64)ret;
752
753 return 0;
754}
755
756static int ov772x_set_register(struct soc_camera_device *icd,
757 struct v4l2_register *reg)
758{
759 struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd);
760
761 if (reg->reg > 0xff ||
762 reg->val > 0xff)
763 return -EINVAL;
764
765 return i2c_smbus_write_byte_data(priv->client, reg->reg, reg->val);
766}
767#endif
768
769static const struct ov772x_win_size*
770ov772x_select_win(u32 width, u32 height)
771{
772 __u32 diff;
773 const struct ov772x_win_size *win;
774
775 /* default is QVGA */
776 diff = abs(width - ov772x_win_qvga.width) +
777 abs(height - ov772x_win_qvga.height);
778 win = &ov772x_win_qvga;
779
780 /* VGA */
781 if (diff >
782 abs(width - ov772x_win_vga.width) +
783 abs(height - ov772x_win_vga.height))
784 win = &ov772x_win_vga;
785
786 return win;
787}
788
789
790static int ov772x_set_fmt(struct soc_camera_device *icd,
791 __u32 pixfmt,
792 struct v4l2_rect *rect)
793{
794 struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd);
795 int ret = -EINVAL;
796 int i;
797
798 /*
799 * select format
800 */
801 priv->fmt = NULL;
802 for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++) {
803 if (pixfmt == ov772x_cfmts[i].fourcc) {
804 priv->fmt = ov772x_cfmts + i;
805 ret = 0;
806 break;
807 }
808 }
809
810 /*
811 * select win
812 */
813 priv->win = ov772x_select_win(rect->width, rect->height);
814
815 return ret;
816}
817
818static int ov772x_try_fmt(struct soc_camera_device *icd,
819 struct v4l2_format *f)
820{
821 struct v4l2_pix_format *pix = &f->fmt.pix;
822 const struct ov772x_win_size *win;
823
824 /*
825 * select suitable win
826 */
827 win = ov772x_select_win(pix->width, pix->height);
828
829 pix->width = win->width;
830 pix->height = win->height;
831 pix->field = V4L2_FIELD_NONE;
832
833 return 0;
834}
835
836static int ov772x_video_probe(struct soc_camera_device *icd)
837{
838 struct ov772x_priv *priv = container_of(icd, struct ov772x_priv, icd);
839 u8 pid, ver;
840 const char *devname;
841
842 /*
843 * We must have a parent by now. And it cannot be a wrong one.
844 * So this entire test is completely redundant.
845 */
846 if (!icd->dev.parent ||
847 to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
848 return -ENODEV;
849
850 /*
851 * ov772x only use 8 or 10 bit bus width
852 */
853 if (SOCAM_DATAWIDTH_10 != priv->info->buswidth &&
854 SOCAM_DATAWIDTH_8 != priv->info->buswidth) {
855 dev_err(&icd->dev, "bus width error\n");
856 return -ENODEV;
857 }
858
859 icd->formats = ov772x_fmt_lists;
860 icd->num_formats = ARRAY_SIZE(ov772x_fmt_lists);
861
862 /*
863 * check and show product ID and manufacturer ID
864 */
865 pid = i2c_smbus_read_byte_data(priv->client, PID);
866 ver = i2c_smbus_read_byte_data(priv->client, VER);
867
868 switch (VERSION(pid, ver)) {
869 case OV7720:
870 devname = "ov7720";
871 priv->model = V4L2_IDENT_OV7720;
872 break;
873 case OV7725:
874 devname = "ov7725";
875 priv->model = V4L2_IDENT_OV7725;
876 break;
877 default:
878 dev_err(&icd->dev,
879 "Product ID error %x:%x\n", pid, ver);
880 return -ENODEV;
881 }
882
883 dev_info(&icd->dev,
884 "%s Product ID %0x:%0x Manufacturer ID %x:%x\n",
885 devname,
886 pid,
887 ver,
888 i2c_smbus_read_byte_data(priv->client, MIDH),
889 i2c_smbus_read_byte_data(priv->client, MIDL));
890
891
892 return soc_camera_video_start(icd);
893}
894
895static void ov772x_video_remove(struct soc_camera_device *icd)
896{
897 soc_camera_video_stop(icd);
898}
899
900static struct soc_camera_ops ov772x_ops = {
901 .owner = THIS_MODULE,
902 .probe = ov772x_video_probe,
903 .remove = ov772x_video_remove,
904 .init = ov772x_init,
905 .release = ov772x_release,
906 .start_capture = ov772x_start_capture,
907 .stop_capture = ov772x_stop_capture,
908 .set_fmt = ov772x_set_fmt,
909 .try_fmt = ov772x_try_fmt,
910 .set_bus_param = ov772x_set_bus_param,
911 .query_bus_param = ov772x_query_bus_param,
912 .get_chip_id = ov772x_get_chip_id,
913#ifdef CONFIG_VIDEO_ADV_DEBUG
914 .get_register = ov772x_get_register,
915 .set_register = ov772x_set_register,
916#endif
917};
918
919/*
920 * i2c_driver function
921 */
922
923static int ov772x_probe(struct i2c_client *client,
924 const struct i2c_device_id *did)
925{
926 struct ov772x_priv *priv;
927 struct ov772x_camera_info *info;
928 struct soc_camera_device *icd;
929 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
930 int ret;
931
932 info = client->dev.platform_data;
933 if (!info)
934 return -EINVAL;
935
936 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
937 dev_err(&adapter->dev,
938 "I2C-Adapter doesn't support "
939 "I2C_FUNC_SMBUS_BYTE_DATA\n");
940 return -EIO;
941 }
942
943 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
944 if (!priv)
945 return -ENOMEM;
946
947 priv->info = info;
948 priv->client = client;
949 i2c_set_clientdata(client, priv);
950
951 icd = &priv->icd;
952 icd->ops = &ov772x_ops;
953 icd->control = &client->dev;
954 icd->width_max = MAX_WIDTH;
955 icd->height_max = MAX_HEIGHT;
956 icd->iface = priv->info->link.bus_id;
957
958 ret = soc_camera_device_register(icd);
959
960 if (ret) {
961 i2c_set_clientdata(client, NULL);
962 kfree(priv);
963 }
964
965 return ret;
966}
967
968static int ov772x_remove(struct i2c_client *client)
969{
970 struct ov772x_priv *priv = i2c_get_clientdata(client);
971
972 soc_camera_device_unregister(&priv->icd);
973 i2c_set_clientdata(client, NULL);
974 kfree(priv);
975 return 0;
976}
977
978static const struct i2c_device_id ov772x_id[] = {
979 { "ov772x", 0 },
980 { }
981};
982MODULE_DEVICE_TABLE(i2c, ov772x_id);
983
984static struct i2c_driver ov772x_i2c_driver = {
985 .driver = {
986 .name = "ov772x",
987 },
988 .probe = ov772x_probe,
989 .remove = ov772x_remove,
990 .id_table = ov772x_id,
991};
992
993/*
994 * module function
995 */
996
997static int __init ov772x_module_init(void)
998{
999 return i2c_add_driver(&ov772x_i2c_driver);
1000}
1001
1002static void __exit ov772x_module_exit(void)
1003{
1004 i2c_del_driver(&ov772x_i2c_driver);
1005}
1006
1007module_init(ov772x_module_init);
1008module_exit(ov772x_module_exit);
1009
1010MODULE_DESCRIPTION("SoC Camera driver for ov772x");
1011MODULE_AUTHOR("Kuninori Morimoto");
1012MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 994807818aa2..45730fac1570 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -10,8 +10,8 @@
10 * 14478 Potsdam, Germany 10 * 14478 Potsdam, Germany
11 * 11 *
12 * Most of this code is directly derived from his userspace driver. 12 * Most of this code is directly derived from his userspace driver.
13 * His driver works so send any reports to alan@redhat.com unless the 13 * His driver works so send any reports to alan@lxorguk.ukuu.org.uk
14 * userspace driver also doesn't work for you... 14 * unless the userspace driver also doesn't work for you...
15 * 15 *
16 * Changes: 16 * Changes:
17 * 08/07/2003 Daniele Bellucci <bellucda@tiscali.it> 17 * 08/07/2003 Daniele Bellucci <bellucda@tiscali.it>
@@ -680,8 +680,7 @@ static int pms_capture(struct pms_device *dev, char __user *buf, int rgb555, int
680 * Video4linux interfacing 680 * Video4linux interfacing
681 */ 681 */
682 682
683static int pms_do_ioctl(struct inode *inode, struct file *file, 683static int pms_do_ioctl(struct file *file, unsigned int cmd, void *arg)
684 unsigned int cmd, void *arg)
685{ 684{
686 struct video_device *dev = video_devdata(file); 685 struct video_device *dev = video_devdata(file);
687 struct pms_device *pd=(struct pms_device *)dev; 686 struct pms_device *pd=(struct pms_device *)dev;
@@ -866,7 +865,7 @@ static int pms_do_ioctl(struct inode *inode, struct file *file,
866static int pms_ioctl(struct inode *inode, struct file *file, 865static int pms_ioctl(struct inode *inode, struct file *file,
867 unsigned int cmd, unsigned long arg) 866 unsigned int cmd, unsigned long arg)
868{ 867{
869 return video_usercopy(inode, file, cmd, arg, pms_do_ioctl); 868 return video_usercopy(file, cmd, arg, pms_do_ioctl);
870} 869}
871 870
872static ssize_t pms_read(struct file *file, char __user *buf, 871static ssize_t pms_read(struct file *file, char __user *buf,
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 5b81ba469641..4358079f1966 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -2395,7 +2395,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
2395 2395
2396 scnprintf(hdw->bus_info,sizeof(hdw->bus_info), 2396 scnprintf(hdw->bus_info,sizeof(hdw->bus_info),
2397 "usb %s address %d", 2397 "usb %s address %d",
2398 hdw->usb_dev->dev.bus_id, 2398 dev_name(&hdw->usb_dev->dev),
2399 hdw->usb_dev->devnum); 2399 hdw->usb_dev->devnum);
2400 2400
2401 ifnum = hdw->usb_intf->cur_altsetting->desc.bInterfaceNumber; 2401 ifnum = hdw->usb_intf->cur_altsetting->desc.bInterfaceNumber;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
index 733680f21317..e641cd971453 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
@@ -628,10 +628,10 @@ static void class_dev_create(struct pvr2_sysfs *sfp,
628 628
629 class_dev->class = &class_ptr->class; 629 class_dev->class = &class_ptr->class;
630 if (pvr2_hdw_get_sn(sfp->channel.hdw)) { 630 if (pvr2_hdw_get_sn(sfp->channel.hdw)) {
631 snprintf(class_dev->bus_id, BUS_ID_SIZE, "sn-%lu", 631 dev_set_name(class_dev, "sn-%lu",
632 pvr2_hdw_get_sn(sfp->channel.hdw)); 632 pvr2_hdw_get_sn(sfp->channel.hdw));
633 } else if (pvr2_hdw_get_unit_number(sfp->channel.hdw) >= 0) { 633 } else if (pvr2_hdw_get_unit_number(sfp->channel.hdw) >= 0) {
634 snprintf(class_dev->bus_id, BUS_ID_SIZE, "unit-%c", 634 dev_set_name(class_dev, "unit-%c",
635 pvr2_hdw_get_unit_number(sfp->channel.hdw) + 'a'); 635 pvr2_hdw_get_unit_number(sfp->channel.hdw) + 'a');
636 } else { 636 } else {
637 kfree(class_dev); 637 kfree(class_dev);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 97ed95957992..52af1c435965 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -168,8 +168,7 @@ static const char *get_v4l_name(int v4l_type)
168 * This is part of Video 4 Linux API. The procedure handles ioctl() calls. 168 * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
169 * 169 *
170 */ 170 */
171static int __pvr2_v4l2_do_ioctl(struct file *file, 171static int pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
172 unsigned int cmd, void *arg)
173{ 172{
174 struct pvr2_v4l2_fh *fh = file->private_data; 173 struct pvr2_v4l2_fh *fh = file->private_data;
175 struct pvr2_v4l2 *vp = fh->vhead; 174 struct pvr2_v4l2 *vp = fh->vhead;
@@ -864,7 +863,7 @@ static int __pvr2_v4l2_do_ioctl(struct file *file,
864 863
865 default : 864 default :
866 ret = v4l_compat_translate_ioctl(file, cmd, 865 ret = v4l_compat_translate_ioctl(file, cmd,
867 arg, __pvr2_v4l2_do_ioctl); 866 arg, pvr2_v4l2_do_ioctl);
868 } 867 }
869 868
870 pvr2_hdw_commit_ctl(hdw); 869 pvr2_hdw_commit_ctl(hdw);
@@ -890,12 +889,6 @@ static int __pvr2_v4l2_do_ioctl(struct file *file,
890 return ret; 889 return ret;
891} 890}
892 891
893static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
894 unsigned int cmd, void *arg)
895{
896 return __pvr2_v4l2_do_ioctl(file, cmd, arg);
897}
898
899static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip) 892static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
900{ 893{
901 int num = dip->devbase.num; 894 int num = dip->devbase.num;
@@ -963,7 +956,7 @@ static int pvr2_v4l2_ioctl(struct inode *inode, struct file *file,
963#define IVTV_IOC_G_CODEC 0xFFEE7703 956#define IVTV_IOC_G_CODEC 0xFFEE7703
964#define IVTV_IOC_S_CODEC 0xFFEE7704 957#define IVTV_IOC_S_CODEC 0xFFEE7704
965 if (cmd == IVTV_IOC_G_CODEC || cmd == IVTV_IOC_S_CODEC) return 0; 958 if (cmd == IVTV_IOC_G_CODEC || cmd == IVTV_IOC_S_CODEC) return 0;
966 return video_usercopy(inode, file, cmd, arg, pvr2_v4l2_do_ioctl); 959 return video_usercopy(file, cmd, arg, pvr2_v4l2_do_ioctl);
967} 960}
968 961
969 962
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index f3897a3fdb75..1ce9da167b7e 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -1412,7 +1412,7 @@ static int pwc_video_ioctl(struct inode *inode, struct file *file,
1412 1412
1413 mutex_lock(&pdev->modlock); 1413 mutex_lock(&pdev->modlock);
1414 if (!pdev->unplugged) 1414 if (!pdev->unplugged)
1415 r = video_usercopy(inode, file, cmd, arg, pwc_video_do_ioctl); 1415 r = video_usercopy(file, cmd, arg, pwc_video_do_ioctl);
1416 mutex_unlock(&pdev->modlock); 1416 mutex_unlock(&pdev->modlock);
1417out: 1417out:
1418 return r; 1418 return r;
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index 76a1376c9751..d7c147328e35 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -337,8 +337,7 @@ static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
337 337
338} 338}
339 339
340int pwc_video_do_ioctl(struct inode *inode, struct file *file, 340int pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
341 unsigned int cmd, void *arg)
342{ 341{
343 struct video_device *vdev = video_devdata(file); 342 struct video_device *vdev = video_devdata(file);
344 struct pwc_device *pdev; 343 struct pwc_device *pdev;
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index 74178754b39b..c046a2535668 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -340,8 +340,7 @@ extern int pwc_camera_power(struct pwc_device *pdev, int power);
340extern int pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg); 340extern int pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg);
341 341
342/** Functions in pwc-v4l.c */ 342/** Functions in pwc-v4l.c */
343extern int pwc_video_do_ioctl(struct inode *inode, struct file *file, 343extern int pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg);
344 unsigned int cmd, void *arg);
345 344
346/** pwc-uncompress.c */ 345/** pwc-uncompress.c */
347/* Expand frame to image, possibly including decompression. Uses read_frame and fill_image */ 346/* Expand frame to image, possibly including decompression. Uses read_frame and fill_image */
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index eb6be5802928..9d33de22cc48 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -25,7 +25,6 @@
25#include <linux/version.h> 25#include <linux/version.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/mutex.h>
29#include <linux/clk.h> 28#include <linux/clk.h>
30 29
31#include <media/v4l2-common.h> 30#include <media/v4l2-common.h>
@@ -39,9 +38,106 @@
39#include <mach/pxa-regs.h> 38#include <mach/pxa-regs.h>
40#include <mach/camera.h> 39#include <mach/camera.h>
41 40
41#include "pxa_camera.h"
42
42#define PXA_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5) 43#define PXA_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5)
43#define PXA_CAM_DRV_NAME "pxa27x-camera" 44#define PXA_CAM_DRV_NAME "pxa27x-camera"
44 45
46/* Camera Interface */
47#define CICR0 0x0000
48#define CICR1 0x0004
49#define CICR2 0x0008
50#define CICR3 0x000C
51#define CICR4 0x0010
52#define CISR 0x0014
53#define CIFR 0x0018
54#define CITOR 0x001C
55#define CIBR0 0x0028
56#define CIBR1 0x0030
57#define CIBR2 0x0038
58
59#define CICR0_DMAEN (1 << 31) /* DMA request enable */
60#define CICR0_PAR_EN (1 << 30) /* Parity enable */
61#define CICR0_SL_CAP_EN (1 << 29) /* Capture enable for slave mode */
62#define CICR0_ENB (1 << 28) /* Camera interface enable */
63#define CICR0_DIS (1 << 27) /* Camera interface disable */
64#define CICR0_SIM (0x7 << 24) /* Sensor interface mode mask */
65#define CICR0_TOM (1 << 9) /* Time-out mask */
66#define CICR0_RDAVM (1 << 8) /* Receive-data-available mask */
67#define CICR0_FEM (1 << 7) /* FIFO-empty mask */
68#define CICR0_EOLM (1 << 6) /* End-of-line mask */
69#define CICR0_PERRM (1 << 5) /* Parity-error mask */
70#define CICR0_QDM (1 << 4) /* Quick-disable mask */
71#define CICR0_CDM (1 << 3) /* Disable-done mask */
72#define CICR0_SOFM (1 << 2) /* Start-of-frame mask */
73#define CICR0_EOFM (1 << 1) /* End-of-frame mask */
74#define CICR0_FOM (1 << 0) /* FIFO-overrun mask */
75
76#define CICR1_TBIT (1 << 31) /* Transparency bit */
77#define CICR1_RGBT_CONV (0x3 << 29) /* RGBT conversion mask */
78#define CICR1_PPL (0x7ff << 15) /* Pixels per line mask */
79#define CICR1_RGB_CONV (0x7 << 12) /* RGB conversion mask */
80#define CICR1_RGB_F (1 << 11) /* RGB format */
81#define CICR1_YCBCR_F (1 << 10) /* YCbCr format */
82#define CICR1_RGB_BPP (0x7 << 7) /* RGB bis per pixel mask */
83#define CICR1_RAW_BPP (0x3 << 5) /* Raw bis per pixel mask */
84#define CICR1_COLOR_SP (0x3 << 3) /* Color space mask */
85#define CICR1_DW (0x7 << 0) /* Data width mask */
86
87#define CICR2_BLW (0xff << 24) /* Beginning-of-line pixel clock
88 wait count mask */
89#define CICR2_ELW (0xff << 16) /* End-of-line pixel clock
90 wait count mask */
91#define CICR2_HSW (0x3f << 10) /* Horizontal sync pulse width mask */
92#define CICR2_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock
93 wait count mask */
94#define CICR2_FSW (0x7 << 0) /* Frame stabilization
95 wait count mask */
96
97#define CICR3_BFW (0xff << 24) /* Beginning-of-frame line clock
98 wait count mask */
99#define CICR3_EFW (0xff << 16) /* End-of-frame line clock
100 wait count mask */
101#define CICR3_VSW (0x3f << 10) /* Vertical sync pulse width mask */
102#define CICR3_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock
103 wait count mask */
104#define CICR3_LPF (0x7ff << 0) /* Lines per frame mask */
105
106#define CICR4_MCLK_DLY (0x3 << 24) /* MCLK Data Capture Delay mask */
107#define CICR4_PCLK_EN (1 << 23) /* Pixel clock enable */
108#define CICR4_PCP (1 << 22) /* Pixel clock polarity */
109#define CICR4_HSP (1 << 21) /* Horizontal sync polarity */
110#define CICR4_VSP (1 << 20) /* Vertical sync polarity */
111#define CICR4_MCLK_EN (1 << 19) /* MCLK enable */
112#define CICR4_FR_RATE (0x7 << 8) /* Frame rate mask */
113#define CICR4_DIV (0xff << 0) /* Clock divisor mask */
114
115#define CISR_FTO (1 << 15) /* FIFO time-out */
116#define CISR_RDAV_2 (1 << 14) /* Channel 2 receive data available */
117#define CISR_RDAV_1 (1 << 13) /* Channel 1 receive data available */
118#define CISR_RDAV_0 (1 << 12) /* Channel 0 receive data available */
119#define CISR_FEMPTY_2 (1 << 11) /* Channel 2 FIFO empty */
120#define CISR_FEMPTY_1 (1 << 10) /* Channel 1 FIFO empty */
121#define CISR_FEMPTY_0 (1 << 9) /* Channel 0 FIFO empty */
122#define CISR_EOL (1 << 8) /* End of line */
123#define CISR_PAR_ERR (1 << 7) /* Parity error */
124#define CISR_CQD (1 << 6) /* Camera interface quick disable */
125#define CISR_CDD (1 << 5) /* Camera interface disable done */
126#define CISR_SOF (1 << 4) /* Start of frame */
127#define CISR_EOF (1 << 3) /* End of frame */
128#define CISR_IFO_2 (1 << 2) /* FIFO overrun for Channel 2 */
129#define CISR_IFO_1 (1 << 1) /* FIFO overrun for Channel 1 */
130#define CISR_IFO_0 (1 << 0) /* FIFO overrun for Channel 0 */
131
132#define CIFR_FLVL2 (0x7f << 23) /* FIFO 2 level mask */
133#define CIFR_FLVL1 (0x7f << 16) /* FIFO 1 level mask */
134#define CIFR_FLVL0 (0xff << 8) /* FIFO 0 level mask */
135#define CIFR_THL_0 (0x3 << 4) /* Threshold Level for Channel 0 FIFO */
136#define CIFR_RESET_F (1 << 3) /* Reset input FIFOs */
137#define CIFR_FEN2 (1 << 2) /* FIFO enable for channel 2 */
138#define CIFR_FEN1 (1 << 1) /* FIFO enable for channel 1 */
139#define CIFR_FEN0 (1 << 0) /* FIFO enable for channel 0 */
140
45#define CICR0_SIM_MP (0 << 24) 141#define CICR0_SIM_MP (0 << 24)
46#define CICR0_SIM_SP (1 << 24) 142#define CICR0_SIM_SP (1 << 24)
47#define CICR0_SIM_MS (2 << 24) 143#define CICR0_SIM_MS (2 << 24)
@@ -69,8 +165,6 @@
69 CICR0_PERRM | CICR0_QDM | CICR0_CDM | CICR0_SOFM | \ 165 CICR0_PERRM | CICR0_QDM | CICR0_CDM | CICR0_SOFM | \
70 CICR0_EOFM | CICR0_FOM) 166 CICR0_EOFM | CICR0_FOM)
71 167
72static DEFINE_MUTEX(camera_lock);
73
74/* 168/*
75 * Structures 169 * Structures
76 */ 170 */
@@ -120,7 +214,9 @@ struct pxa_camera_dev {
120 struct pxacamera_platform_data *pdata; 214 struct pxacamera_platform_data *pdata;
121 struct resource *res; 215 struct resource *res;
122 unsigned long platform_flags; 216 unsigned long platform_flags;
123 unsigned long platform_mclk_10khz; 217 unsigned long ciclk;
218 unsigned long mclk;
219 u32 mclk_divisor;
124 220
125 struct list_head capture; 221 struct list_head capture;
126 222
@@ -143,8 +239,7 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
143 unsigned int *size) 239 unsigned int *size)
144{ 240{
145 struct soc_camera_device *icd = vq->priv_data; 241 struct soc_camera_device *icd = vq->priv_data;
146 struct soc_camera_host *ici = 242 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
147 to_soc_camera_host(icd->dev.parent);
148 struct pxa_camera_dev *pcdev = ici->priv; 243 struct pxa_camera_dev *pcdev = ici->priv;
149 244
150 dev_dbg(&icd->dev, "count=%d, size=%d\n", *count, *size); 245 dev_dbg(&icd->dev, "count=%d, size=%d\n", *count, *size);
@@ -170,8 +265,7 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
170static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf) 265static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
171{ 266{
172 struct soc_camera_device *icd = vq->priv_data; 267 struct soc_camera_device *icd = vq->priv_data;
173 struct soc_camera_host *ici = 268 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
174 to_soc_camera_host(icd->dev.parent);
175 struct pxa_camera_dev *pcdev = ici->priv; 269 struct pxa_camera_dev *pcdev = ici->priv;
176 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb); 270 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
177 int i; 271 int i;
@@ -247,8 +341,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
247 struct videobuf_buffer *vb, enum v4l2_field field) 341 struct videobuf_buffer *vb, enum v4l2_field field)
248{ 342{
249 struct soc_camera_device *icd = vq->priv_data; 343 struct soc_camera_device *icd = vq->priv_data;
250 struct soc_camera_host *ici = 344 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
251 to_soc_camera_host(icd->dev.parent);
252 struct pxa_camera_dev *pcdev = ici->priv; 345 struct pxa_camera_dev *pcdev = ici->priv;
253 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); 346 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
254 int ret; 347 int ret;
@@ -367,8 +460,7 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq,
367 struct videobuf_buffer *vb) 460 struct videobuf_buffer *vb)
368{ 461{
369 struct soc_camera_device *icd = vq->priv_data; 462 struct soc_camera_device *icd = vq->priv_data;
370 struct soc_camera_host *ici = 463 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
371 to_soc_camera_host(icd->dev.parent);
372 struct pxa_camera_dev *pcdev = ici->priv; 464 struct pxa_camera_dev *pcdev = ici->priv;
373 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); 465 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
374 struct pxa_buffer *active; 466 struct pxa_buffer *active;
@@ -385,7 +477,10 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq,
385 active = pcdev->active; 477 active = pcdev->active;
386 478
387 if (!active) { 479 if (!active) {
388 CIFR |= CIFR_RESET_F; 480 unsigned long cifr, cicr0;
481
482 cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
483 __raw_writel(cifr, pcdev->base + CIFR);
389 484
390 for (i = 0; i < pcdev->channels; i++) { 485 for (i = 0; i < pcdev->channels; i++) {
391 DDADR(pcdev->dma_chans[i]) = buf->dmas[i].sg_dma; 486 DDADR(pcdev->dma_chans[i]) = buf->dmas[i].sg_dma;
@@ -394,7 +489,9 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq,
394 } 489 }
395 490
396 pcdev->active = buf; 491 pcdev->active = buf;
397 CICR0 |= CICR0_ENB; 492
493 cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB;
494 __raw_writel(cicr0, pcdev->base + CICR0);
398 } else { 495 } else {
399 struct pxa_cam_dma *buf_dma; 496 struct pxa_cam_dma *buf_dma;
400 struct pxa_cam_dma *act_dma; 497 struct pxa_cam_dma *act_dma;
@@ -478,6 +575,8 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
478 struct videobuf_buffer *vb, 575 struct videobuf_buffer *vb,
479 struct pxa_buffer *buf) 576 struct pxa_buffer *buf)
480{ 577{
578 unsigned long cicr0;
579
481 /* _init is used to debug races, see comment in pxa_camera_reqbufs() */ 580 /* _init is used to debug races, see comment in pxa_camera_reqbufs() */
482 list_del_init(&vb->queue); 581 list_del_init(&vb->queue);
483 vb->state = VIDEOBUF_DONE; 582 vb->state = VIDEOBUF_DONE;
@@ -490,7 +589,9 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
490 DCSR(pcdev->dma_chans[0]) = 0; 589 DCSR(pcdev->dma_chans[0]) = 0;
491 DCSR(pcdev->dma_chans[1]) = 0; 590 DCSR(pcdev->dma_chans[1]) = 0;
492 DCSR(pcdev->dma_chans[2]) = 0; 591 DCSR(pcdev->dma_chans[2]) = 0;
493 CICR0 &= ~CICR0_ENB; 592
593 cicr0 = __raw_readl(pcdev->base + CICR0) & ~CICR0_ENB;
594 __raw_writel(cicr0, pcdev->base + CICR0);
494 return; 595 return;
495 } 596 }
496 597
@@ -505,6 +606,7 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
505 unsigned long flags; 606 unsigned long flags;
506 u32 status, camera_status, overrun; 607 u32 status, camera_status, overrun;
507 struct videobuf_buffer *vb; 608 struct videobuf_buffer *vb;
609 unsigned long cifr, cicr0;
508 610
509 spin_lock_irqsave(&pcdev->lock, flags); 611 spin_lock_irqsave(&pcdev->lock, flags);
510 612
@@ -527,22 +629,26 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
527 goto out; 629 goto out;
528 } 630 }
529 631
530 camera_status = CISR; 632 camera_status = __raw_readl(pcdev->base + CISR);
531 overrun = CISR_IFO_0; 633 overrun = CISR_IFO_0;
532 if (pcdev->channels == 3) 634 if (pcdev->channels == 3)
533 overrun |= CISR_IFO_1 | CISR_IFO_2; 635 overrun |= CISR_IFO_1 | CISR_IFO_2;
534 if (camera_status & overrun) { 636 if (camera_status & overrun) {
535 dev_dbg(pcdev->dev, "FIFO overrun! CISR: %x\n", camera_status); 637 dev_dbg(pcdev->dev, "FIFO overrun! CISR: %x\n", camera_status);
536 /* Stop the Capture Interface */ 638 /* Stop the Capture Interface */
537 CICR0 &= ~CICR0_ENB; 639 cicr0 = __raw_readl(pcdev->base + CICR0) & ~CICR0_ENB;
640 __raw_writel(cicr0, pcdev->base + CICR0);
641
538 /* Stop DMA */ 642 /* Stop DMA */
539 DCSR(channel) = 0; 643 DCSR(channel) = 0;
540 /* Reset the FIFOs */ 644 /* Reset the FIFOs */
541 CIFR |= CIFR_RESET_F; 645 cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
646 __raw_writel(cifr, pcdev->base + CIFR);
542 /* Enable End-Of-Frame Interrupt */ 647 /* Enable End-Of-Frame Interrupt */
543 CICR0 &= ~CICR0_EOFM; 648 cicr0 &= ~CICR0_EOFM;
649 __raw_writel(cicr0, pcdev->base + CICR0);
544 /* Restart the Capture Interface */ 650 /* Restart the Capture Interface */
545 CICR0 |= CICR0_ENB; 651 __raw_writel(cicr0 | CICR0_ENB, pcdev->base + CICR0);
546 goto out; 652 goto out;
547 } 653 }
548 654
@@ -598,24 +704,43 @@ static void pxa_camera_init_videobuf(struct videobuf_queue *q,
598 sizeof(struct pxa_buffer), icd); 704 sizeof(struct pxa_buffer), icd);
599} 705}
600 706
601static int mclk_get_divisor(struct pxa_camera_dev *pcdev) 707static u32 mclk_get_divisor(struct pxa_camera_dev *pcdev)
602{ 708{
603 unsigned int mclk_10khz = pcdev->platform_mclk_10khz; 709 unsigned long mclk = pcdev->mclk;
604 unsigned long div; 710 u32 div;
605 unsigned long lcdclk; 711 unsigned long lcdclk;
606 712
607 lcdclk = clk_get_rate(pcdev->clk) / 10000; 713 lcdclk = clk_get_rate(pcdev->clk);
714 pcdev->ciclk = lcdclk;
608 715
609 /* We verify platform_mclk_10khz != 0, so if anyone breaks it, here 716 /* mclk <= ciclk / 4 (27.4.2) */
610 * they get a nice Oops */ 717 if (mclk > lcdclk / 4) {
611 div = (lcdclk + 2 * mclk_10khz - 1) / (2 * mclk_10khz) - 1; 718 mclk = lcdclk / 4;
719 dev_warn(pcdev->dev, "Limiting master clock to %lu\n", mclk);
720 }
721
722 /* We verify mclk != 0, so if anyone breaks it, here comes their Oops */
723 div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
612 724
613 dev_dbg(pcdev->dev, "LCD clock %lukHz, target freq %dkHz, " 725 /* If we're not supplying MCLK, leave it at 0 */
614 "divisor %lu\n", lcdclk * 10, mclk_10khz * 10, div); 726 if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
727 pcdev->mclk = lcdclk / (2 * (div + 1));
728
729 dev_dbg(pcdev->dev, "LCD clock %luHz, target freq %luHz, "
730 "divisor %u\n", lcdclk, mclk, div);
615 731
616 return div; 732 return div;
617} 733}
618 734
735static void recalculate_fifo_timeout(struct pxa_camera_dev *pcdev,
736 unsigned long pclk)
737{
738 /* We want a timeout > 1 pixel time, not ">=" */
739 u32 ciclk_per_pixel = pcdev->ciclk / pclk + 1;
740
741 __raw_writel(ciclk_per_pixel, pcdev->base + CITOR);
742}
743
619static void pxa_camera_activate(struct pxa_camera_dev *pcdev) 744static void pxa_camera_activate(struct pxa_camera_dev *pcdev)
620{ 745{
621 struct pxacamera_platform_data *pdata = pcdev->pdata; 746 struct pxacamera_platform_data *pdata = pcdev->pdata;
@@ -629,7 +754,8 @@ static void pxa_camera_activate(struct pxa_camera_dev *pcdev)
629 pdata->init(pcdev->dev); 754 pdata->init(pcdev->dev);
630 } 755 }
631 756
632 CICR0 = 0x3FF; /* disable all interrupts */ 757 /* disable all interrupts */
758 __raw_writel(0x3ff, pcdev->base + CICR0);
633 759
634 if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) 760 if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
635 cicr4 |= CICR4_PCLK_EN; 761 cicr4 |= CICR4_PCLK_EN;
@@ -642,7 +768,14 @@ static void pxa_camera_activate(struct pxa_camera_dev *pcdev)
642 if (pcdev->platform_flags & PXA_CAMERA_VSP) 768 if (pcdev->platform_flags & PXA_CAMERA_VSP)
643 cicr4 |= CICR4_VSP; 769 cicr4 |= CICR4_VSP;
644 770
645 CICR4 = mclk_get_divisor(pcdev) | cicr4; 771 __raw_writel(pcdev->mclk_divisor | cicr4, pcdev->base + CICR4);
772
773 if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
774 /* Initialise the timeout under the assumption pclk = mclk */
775 recalculate_fifo_timeout(pcdev, pcdev->mclk);
776 else
777 /* "Safe default" - 13MHz */
778 recalculate_fifo_timeout(pcdev, 13000000);
646 779
647 clk_enable(pcdev->clk); 780 clk_enable(pcdev->clk);
648} 781}
@@ -655,14 +788,15 @@ static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev)
655static irqreturn_t pxa_camera_irq(int irq, void *data) 788static irqreturn_t pxa_camera_irq(int irq, void *data)
656{ 789{
657 struct pxa_camera_dev *pcdev = data; 790 struct pxa_camera_dev *pcdev = data;
658 unsigned int status = CISR; 791 unsigned long status, cicr0;
659 792
660 dev_dbg(pcdev->dev, "Camera interrupt status 0x%x\n", status); 793 status = __raw_readl(pcdev->base + CISR);
794 dev_dbg(pcdev->dev, "Camera interrupt status 0x%lx\n", status);
661 795
662 if (!status) 796 if (!status)
663 return IRQ_NONE; 797 return IRQ_NONE;
664 798
665 CISR = status; 799 __raw_writel(status, pcdev->base + CISR);
666 800
667 if (status & CISR_EOF) { 801 if (status & CISR_EOF) {
668 int i; 802 int i;
@@ -671,22 +805,24 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
671 pcdev->active->dmas[i].sg_dma; 805 pcdev->active->dmas[i].sg_dma;
672 DCSR(pcdev->dma_chans[i]) = DCSR_RUN; 806 DCSR(pcdev->dma_chans[i]) = DCSR_RUN;
673 } 807 }
674 CICR0 |= CICR0_EOFM; 808 cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_EOFM;
809 __raw_writel(cicr0, pcdev->base + CICR0);
675 } 810 }
676 811
677 return IRQ_HANDLED; 812 return IRQ_HANDLED;
678} 813}
679 814
680/* The following two functions absolutely depend on the fact, that 815/*
681 * there can be only one camera on PXA quick capture interface */ 816 * The following two functions absolutely depend on the fact, that
817 * there can be only one camera on PXA quick capture interface
818 * Called with .video_lock held
819 */
682static int pxa_camera_add_device(struct soc_camera_device *icd) 820static int pxa_camera_add_device(struct soc_camera_device *icd)
683{ 821{
684 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 822 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
685 struct pxa_camera_dev *pcdev = ici->priv; 823 struct pxa_camera_dev *pcdev = ici->priv;
686 int ret; 824 int ret;
687 825
688 mutex_lock(&camera_lock);
689
690 if (pcdev->icd) { 826 if (pcdev->icd) {
691 ret = -EBUSY; 827 ret = -EBUSY;
692 goto ebusy; 828 goto ebusy;
@@ -702,11 +838,10 @@ static int pxa_camera_add_device(struct soc_camera_device *icd)
702 pcdev->icd = icd; 838 pcdev->icd = icd;
703 839
704ebusy: 840ebusy:
705 mutex_unlock(&camera_lock);
706
707 return ret; 841 return ret;
708} 842}
709 843
844/* Called with .video_lock held */
710static void pxa_camera_remove_device(struct soc_camera_device *icd) 845static void pxa_camera_remove_device(struct soc_camera_device *icd)
711{ 846{
712 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 847 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
@@ -718,7 +853,7 @@ static void pxa_camera_remove_device(struct soc_camera_device *icd)
718 icd->devnum); 853 icd->devnum);
719 854
720 /* disable capture, disable interrupts */ 855 /* disable capture, disable interrupts */
721 CICR0 = 0x3ff; 856 __raw_writel(0x3ff, pcdev->base + CICR0);
722 857
723 /* Stop DMA engine */ 858 /* Stop DMA engine */
724 DCSR(pcdev->dma_chans[0]) = 0; 859 DCSR(pcdev->dma_chans[0]) = 0;
@@ -765,6 +900,9 @@ static int test_platform_param(struct pxa_camera_dev *pcdev,
765 if (!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8)) 900 if (!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8))
766 return -EINVAL; 901 return -EINVAL;
767 *flags |= SOCAM_DATAWIDTH_8; 902 *flags |= SOCAM_DATAWIDTH_8;
903 break;
904 default:
905 return -EINVAL;
768 } 906 }
769 907
770 return 0; 908 return 0;
@@ -772,11 +910,10 @@ static int test_platform_param(struct pxa_camera_dev *pcdev,
772 910
773static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) 911static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
774{ 912{
775 struct soc_camera_host *ici = 913 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
776 to_soc_camera_host(icd->dev.parent);
777 struct pxa_camera_dev *pcdev = ici->priv; 914 struct pxa_camera_dev *pcdev = ici->priv;
778 unsigned long dw, bpp, bus_flags, camera_flags, common_flags; 915 unsigned long dw, bpp, bus_flags, camera_flags, common_flags;
779 u32 cicr0, cicr1, cicr4 = 0; 916 u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0;
780 int ret = test_platform_param(pcdev, icd->buswidth, &bus_flags); 917 int ret = test_platform_param(pcdev, icd->buswidth, &bus_flags);
781 918
782 if (ret < 0) 919 if (ret < 0)
@@ -823,12 +960,10 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
823 * We fix bit-per-pixel equal to data-width... */ 960 * We fix bit-per-pixel equal to data-width... */
824 switch (common_flags & SOCAM_DATAWIDTH_MASK) { 961 switch (common_flags & SOCAM_DATAWIDTH_MASK) {
825 case SOCAM_DATAWIDTH_10: 962 case SOCAM_DATAWIDTH_10:
826 icd->buswidth = 10;
827 dw = 4; 963 dw = 4;
828 bpp = 0x40; 964 bpp = 0x40;
829 break; 965 break;
830 case SOCAM_DATAWIDTH_9: 966 case SOCAM_DATAWIDTH_9:
831 icd->buswidth = 9;
832 dw = 3; 967 dw = 3;
833 bpp = 0x20; 968 bpp = 0x20;
834 break; 969 break;
@@ -836,7 +971,6 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
836 /* Actually it can only be 8 now, 971 /* Actually it can only be 8 now,
837 * default is just to silence compiler warnings */ 972 * default is just to silence compiler warnings */
838 case SOCAM_DATAWIDTH_8: 973 case SOCAM_DATAWIDTH_8:
839 icd->buswidth = 8;
840 dw = 2; 974 dw = 2;
841 bpp = 0; 975 bpp = 0;
842 } 976 }
@@ -852,9 +986,9 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
852 if (common_flags & SOCAM_VSYNC_ACTIVE_LOW) 986 if (common_flags & SOCAM_VSYNC_ACTIVE_LOW)
853 cicr4 |= CICR4_VSP; 987 cicr4 |= CICR4_VSP;
854 988
855 cicr0 = CICR0; 989 cicr0 = __raw_readl(pcdev->base + CICR0);
856 if (cicr0 & CICR0_ENB) 990 if (cicr0 & CICR0_ENB)
857 CICR0 = cicr0 & ~CICR0_ENB; 991 __raw_writel(cicr0 & ~CICR0_ENB, pcdev->base + CICR0);
858 992
859 cicr1 = CICR1_PPL_VAL(icd->width - 1) | bpp | dw; 993 cicr1 = CICR1_PPL_VAL(icd->width - 1) | bpp | dw;
860 994
@@ -862,7 +996,17 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
862 case V4L2_PIX_FMT_YUV422P: 996 case V4L2_PIX_FMT_YUV422P:
863 pcdev->channels = 3; 997 pcdev->channels = 3;
864 cicr1 |= CICR1_YCBCR_F; 998 cicr1 |= CICR1_YCBCR_F;
999 /*
1000 * Normally, pxa bus wants as input UYVY format. We allow all
1001 * reorderings of the YUV422 format, as no processing is done,
1002 * and the YUV stream is just passed through without any
1003 * transformation. Note that UYVY is the only format that
1004 * should be used if pxa framebuffer Overlay2 is used.
1005 */
1006 case V4L2_PIX_FMT_UYVY:
1007 case V4L2_PIX_FMT_VYUY:
865 case V4L2_PIX_FMT_YUYV: 1008 case V4L2_PIX_FMT_YUYV:
1009 case V4L2_PIX_FMT_YVYU:
866 cicr1 |= CICR1_COLOR_SP_VAL(2); 1010 cicr1 |= CICR1_COLOR_SP_VAL(2);
867 break; 1011 break;
868 case V4L2_PIX_FMT_RGB555: 1012 case V4L2_PIX_FMT_RGB555:
@@ -874,27 +1018,32 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
874 break; 1018 break;
875 } 1019 }
876 1020
877 CICR1 = cicr1; 1021 cicr2 = 0;
878 CICR2 = 0; 1022 cicr3 = CICR3_LPF_VAL(icd->height - 1) |
879 CICR3 = CICR3_LPF_VAL(icd->height - 1) |
880 CICR3_BFW_VAL(min((unsigned short)255, icd->y_skip_top)); 1023 CICR3_BFW_VAL(min((unsigned short)255, icd->y_skip_top));
881 CICR4 = mclk_get_divisor(pcdev) | cicr4; 1024 cicr4 |= pcdev->mclk_divisor;
1025
1026 __raw_writel(cicr1, pcdev->base + CICR1);
1027 __raw_writel(cicr2, pcdev->base + CICR2);
1028 __raw_writel(cicr3, pcdev->base + CICR3);
1029 __raw_writel(cicr4, pcdev->base + CICR4);
882 1030
883 /* CIF interrupts are not used, only DMA */ 1031 /* CIF interrupts are not used, only DMA */
884 CICR0 = (pcdev->platform_flags & PXA_CAMERA_MASTER ? 1032 cicr0 = (cicr0 & CICR0_ENB) | (pcdev->platform_flags & PXA_CAMERA_MASTER ?
885 CICR0_SIM_MP : (CICR0_SL_CAP_EN | CICR0_SIM_SP)) | 1033 CICR0_SIM_MP : (CICR0_SL_CAP_EN | CICR0_SIM_SP));
886 CICR0_DMAEN | CICR0_IRQ_MASK | (cicr0 & CICR0_ENB); 1034 cicr0 |= CICR0_DMAEN | CICR0_IRQ_MASK;
1035 __raw_writel(cicr0, pcdev->base + CICR0);
887 1036
888 return 0; 1037 return 0;
889} 1038}
890 1039
891static int pxa_camera_try_bus_param(struct soc_camera_device *icd, __u32 pixfmt) 1040static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
1041 unsigned char buswidth)
892{ 1042{
893 struct soc_camera_host *ici = 1043 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
894 to_soc_camera_host(icd->dev.parent);
895 struct pxa_camera_dev *pcdev = ici->priv; 1044 struct pxa_camera_dev *pcdev = ici->priv;
896 unsigned long bus_flags, camera_flags; 1045 unsigned long bus_flags, camera_flags;
897 int ret = test_platform_param(pcdev, icd->buswidth, &bus_flags); 1046 int ret = test_platform_param(pcdev, buswidth, &bus_flags);
898 1047
899 if (ret < 0) 1048 if (ret < 0)
900 return ret; 1049 return ret;
@@ -904,28 +1053,210 @@ static int pxa_camera_try_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
904 return soc_camera_bus_param_compatible(camera_flags, bus_flags) ? 0 : -EINVAL; 1053 return soc_camera_bus_param_compatible(camera_flags, bus_flags) ? 0 : -EINVAL;
905} 1054}
906 1055
907static int pxa_camera_set_fmt_cap(struct soc_camera_device *icd, 1056static const struct soc_camera_data_format pxa_camera_formats[] = {
908 __u32 pixfmt, struct v4l2_rect *rect) 1057 {
1058 .name = "Planar YUV422 16 bit",
1059 .depth = 16,
1060 .fourcc = V4L2_PIX_FMT_YUV422P,
1061 .colorspace = V4L2_COLORSPACE_JPEG,
1062 },
1063};
1064
1065static bool buswidth_supported(struct soc_camera_device *icd, int depth)
1066{
1067 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
1068 struct pxa_camera_dev *pcdev = ici->priv;
1069
1070 switch (depth) {
1071 case 8:
1072 return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8);
1073 case 9:
1074 return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9);
1075 case 10:
1076 return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10);
1077 }
1078 return false;
1079}
1080
1081static int required_buswidth(const struct soc_camera_data_format *fmt)
909{ 1082{
910 return icd->ops->set_fmt_cap(icd, pixfmt, rect); 1083 switch (fmt->fourcc) {
1084 case V4L2_PIX_FMT_UYVY:
1085 case V4L2_PIX_FMT_VYUY:
1086 case V4L2_PIX_FMT_YUYV:
1087 case V4L2_PIX_FMT_YVYU:
1088 case V4L2_PIX_FMT_RGB565:
1089 case V4L2_PIX_FMT_RGB555:
1090 return 8;
1091 default:
1092 return fmt->depth;
1093 }
911} 1094}
912 1095
913static int pxa_camera_try_fmt_cap(struct soc_camera_device *icd, 1096static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx,
914 struct v4l2_format *f) 1097 struct soc_camera_format_xlate *xlate)
915{ 1098{
916 /* limit to pxa hardware capabilities */ 1099 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
917 if (f->fmt.pix.height < 32) 1100 int formats = 0, buswidth, ret;
918 f->fmt.pix.height = 32; 1101
919 if (f->fmt.pix.height > 2048) 1102 buswidth = required_buswidth(icd->formats + idx);
920 f->fmt.pix.height = 2048;
921 if (f->fmt.pix.width < 48)
922 f->fmt.pix.width = 48;
923 if (f->fmt.pix.width > 2048)
924 f->fmt.pix.width = 2048;
925 f->fmt.pix.width &= ~0x01;
926 1103
1104 if (!buswidth_supported(icd, buswidth))
1105 return 0;
1106
1107 ret = pxa_camera_try_bus_param(icd, buswidth);
1108 if (ret < 0)
1109 return 0;
1110
1111 switch (icd->formats[idx].fourcc) {
1112 case V4L2_PIX_FMT_UYVY:
1113 formats++;
1114 if (xlate) {
1115 xlate->host_fmt = &pxa_camera_formats[0];
1116 xlate->cam_fmt = icd->formats + idx;
1117 xlate->buswidth = buswidth;
1118 xlate++;
1119 dev_dbg(&ici->dev, "Providing format %s using %s\n",
1120 pxa_camera_formats[0].name,
1121 icd->formats[idx].name);
1122 }
1123 case V4L2_PIX_FMT_VYUY:
1124 case V4L2_PIX_FMT_YUYV:
1125 case V4L2_PIX_FMT_YVYU:
1126 case V4L2_PIX_FMT_RGB565:
1127 case V4L2_PIX_FMT_RGB555:
1128 formats++;
1129 if (xlate) {
1130 xlate->host_fmt = icd->formats + idx;
1131 xlate->cam_fmt = icd->formats + idx;
1132 xlate->buswidth = buswidth;
1133 xlate++;
1134 dev_dbg(&ici->dev, "Providing format %s packed\n",
1135 icd->formats[idx].name);
1136 }
1137 break;
1138 default:
1139 /* Generic pass-through */
1140 formats++;
1141 if (xlate) {
1142 xlate->host_fmt = icd->formats + idx;
1143 xlate->cam_fmt = icd->formats + idx;
1144 xlate->buswidth = icd->formats[idx].depth;
1145 xlate++;
1146 dev_dbg(&ici->dev,
1147 "Providing format %s in pass-through mode\n",
1148 icd->formats[idx].name);
1149 }
1150 }
1151
1152 return formats;
1153}
1154
1155static int pxa_camera_set_fmt(struct soc_camera_device *icd,
1156 __u32 pixfmt, struct v4l2_rect *rect)
1157{
1158 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
1159 struct pxa_camera_dev *pcdev = ici->priv;
1160 const struct soc_camera_data_format *host_fmt, *cam_fmt = NULL;
1161 const struct soc_camera_format_xlate *xlate;
1162 struct soc_camera_sense sense = {
1163 .master_clock = pcdev->mclk,
1164 .pixel_clock_max = pcdev->ciclk / 4,
1165 };
1166 int ret, buswidth;
1167
1168 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
1169 if (!xlate) {
1170 dev_warn(&ici->dev, "Format %x not found\n", pixfmt);
1171 return -EINVAL;
1172 }
1173
1174 buswidth = xlate->buswidth;
1175 host_fmt = xlate->host_fmt;
1176 cam_fmt = xlate->cam_fmt;
1177
1178 /* If PCLK is used to latch data from the sensor, check sense */
1179 if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
1180 icd->sense = &sense;
1181
1182 switch (pixfmt) {
1183 case 0: /* Only geometry change */
1184 ret = icd->ops->set_fmt(icd, pixfmt, rect);
1185 break;
1186 default:
1187 ret = icd->ops->set_fmt(icd, cam_fmt->fourcc, rect);
1188 }
1189
1190 icd->sense = NULL;
1191
1192 if (ret < 0) {
1193 dev_warn(&ici->dev, "Failed to configure for format %x\n",
1194 pixfmt);
1195 } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
1196 if (sense.pixel_clock > sense.pixel_clock_max) {
1197 dev_err(&ici->dev,
1198 "pixel clock %lu set by the camera too high!",
1199 sense.pixel_clock);
1200 return -EIO;
1201 }
1202 recalculate_fifo_timeout(pcdev, sense.pixel_clock);
1203 }
1204
1205 if (pixfmt && !ret) {
1206 icd->buswidth = buswidth;
1207 icd->current_fmt = host_fmt;
1208 }
1209
1210 return ret;
1211}
1212
1213static int pxa_camera_try_fmt(struct soc_camera_device *icd,
1214 struct v4l2_format *f)
1215{
1216 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
1217 const struct soc_camera_format_xlate *xlate;
1218 struct v4l2_pix_format *pix = &f->fmt.pix;
1219 __u32 pixfmt = pix->pixelformat;
1220 enum v4l2_field field;
1221 int ret;
1222
1223 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
1224 if (!xlate) {
1225 dev_warn(&ici->dev, "Format %x not found\n", pixfmt);
1226 return -EINVAL;
1227 }
1228
1229 /* limit to pxa hardware capabilities */
1230 if (pix->height < 32)
1231 pix->height = 32;
1232 if (pix->height > 2048)
1233 pix->height = 2048;
1234 if (pix->width < 48)
1235 pix->width = 48;
1236 if (pix->width > 2048)
1237 pix->width = 2048;
1238 pix->width &= ~0x01;
1239
1240 pix->bytesperline = pix->width *
1241 DIV_ROUND_UP(xlate->host_fmt->depth, 8);
1242 pix->sizeimage = pix->height * pix->bytesperline;
1243
1244 /* camera has to see its format, but the user the original one */
1245 pix->pixelformat = xlate->cam_fmt->fourcc;
927 /* limit to sensor capabilities */ 1246 /* limit to sensor capabilities */
928 return icd->ops->try_fmt_cap(icd, f); 1247 ret = icd->ops->try_fmt(icd, f);
1248 pix->pixelformat = xlate->host_fmt->fourcc;
1249
1250 field = pix->field;
1251
1252 if (field == V4L2_FIELD_ANY) {
1253 pix->field = V4L2_FIELD_NONE;
1254 } else if (field != V4L2_FIELD_NONE) {
1255 dev_err(&icd->dev, "Field type %d unsupported.\n", field);
1256 return -EINVAL;
1257 }
1258
1259 return ret;
929} 1260}
930 1261
931static int pxa_camera_reqbufs(struct soc_camera_file *icf, 1262static int pxa_camera_reqbufs(struct soc_camera_file *icf,
@@ -977,16 +1308,15 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
977 1308
978static int pxa_camera_suspend(struct soc_camera_device *icd, pm_message_t state) 1309static int pxa_camera_suspend(struct soc_camera_device *icd, pm_message_t state)
979{ 1310{
980 struct soc_camera_host *ici = 1311 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
981 to_soc_camera_host(icd->dev.parent);
982 struct pxa_camera_dev *pcdev = ici->priv; 1312 struct pxa_camera_dev *pcdev = ici->priv;
983 int i = 0, ret = 0; 1313 int i = 0, ret = 0;
984 1314
985 pcdev->save_cicr[i++] = CICR0; 1315 pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR0);
986 pcdev->save_cicr[i++] = CICR1; 1316 pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR1);
987 pcdev->save_cicr[i++] = CICR2; 1317 pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR2);
988 pcdev->save_cicr[i++] = CICR3; 1318 pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3);
989 pcdev->save_cicr[i++] = CICR4; 1319 pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4);
990 1320
991 if ((pcdev->icd) && (pcdev->icd->ops->suspend)) 1321 if ((pcdev->icd) && (pcdev->icd->ops->suspend))
992 ret = pcdev->icd->ops->suspend(pcdev->icd, state); 1322 ret = pcdev->icd->ops->suspend(pcdev->icd, state);
@@ -996,8 +1326,7 @@ static int pxa_camera_suspend(struct soc_camera_device *icd, pm_message_t state)
996 1326
997static int pxa_camera_resume(struct soc_camera_device *icd) 1327static int pxa_camera_resume(struct soc_camera_device *icd)
998{ 1328{
999 struct soc_camera_host *ici = 1329 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
1000 to_soc_camera_host(icd->dev.parent);
1001 struct pxa_camera_dev *pcdev = ici->priv; 1330 struct pxa_camera_dev *pcdev = ici->priv;
1002 int i = 0, ret = 0; 1331 int i = 0, ret = 0;
1003 1332
@@ -1005,23 +1334,27 @@ static int pxa_camera_resume(struct soc_camera_device *icd)
1005 DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD; 1334 DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD;
1006 DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD; 1335 DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD;
1007 1336
1008 CICR0 = pcdev->save_cicr[i++] & ~CICR0_ENB; 1337 __raw_writel(pcdev->save_cicr[i++] & ~CICR0_ENB, pcdev->base + CICR0);
1009 CICR1 = pcdev->save_cicr[i++]; 1338 __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR1);
1010 CICR2 = pcdev->save_cicr[i++]; 1339 __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR2);
1011 CICR3 = pcdev->save_cicr[i++]; 1340 __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3);
1012 CICR4 = pcdev->save_cicr[i++]; 1341 __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4);
1013 1342
1014 if ((pcdev->icd) && (pcdev->icd->ops->resume)) 1343 if ((pcdev->icd) && (pcdev->icd->ops->resume))
1015 ret = pcdev->icd->ops->resume(pcdev->icd); 1344 ret = pcdev->icd->ops->resume(pcdev->icd);
1016 1345
1017 /* Restart frame capture if active buffer exists */ 1346 /* Restart frame capture if active buffer exists */
1018 if (!ret && pcdev->active) { 1347 if (!ret && pcdev->active) {
1348 unsigned long cifr, cicr0;
1349
1019 /* Reset the FIFOs */ 1350 /* Reset the FIFOs */
1020 CIFR |= CIFR_RESET_F; 1351 cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
1021 /* Enable End-Of-Frame Interrupt */ 1352 __raw_writel(cifr, pcdev->base + CIFR);
1022 CICR0 &= ~CICR0_EOFM; 1353
1023 /* Restart the Capture Interface */ 1354 cicr0 = __raw_readl(pcdev->base + CICR0);
1024 CICR0 |= CICR0_ENB; 1355 cicr0 &= ~CICR0_EOFM; /* Enable End-Of-Frame Interrupt */
1356 cicr0 |= CICR0_ENB; /* Restart the Capture Interface */
1357 __raw_writel(cicr0, pcdev->base + CICR0);
1025 } 1358 }
1026 1359
1027 return ret; 1360 return ret;
@@ -1033,13 +1366,13 @@ static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
1033 .remove = pxa_camera_remove_device, 1366 .remove = pxa_camera_remove_device,
1034 .suspend = pxa_camera_suspend, 1367 .suspend = pxa_camera_suspend,
1035 .resume = pxa_camera_resume, 1368 .resume = pxa_camera_resume,
1036 .set_fmt_cap = pxa_camera_set_fmt_cap, 1369 .get_formats = pxa_camera_get_formats,
1037 .try_fmt_cap = pxa_camera_try_fmt_cap, 1370 .set_fmt = pxa_camera_set_fmt,
1371 .try_fmt = pxa_camera_try_fmt,
1038 .init_videobuf = pxa_camera_init_videobuf, 1372 .init_videobuf = pxa_camera_init_videobuf,
1039 .reqbufs = pxa_camera_reqbufs, 1373 .reqbufs = pxa_camera_reqbufs,
1040 .poll = pxa_camera_poll, 1374 .poll = pxa_camera_poll,
1041 .querycap = pxa_camera_querycap, 1375 .querycap = pxa_camera_querycap,
1042 .try_bus_param = pxa_camera_try_bus_param,
1043 .set_bus_param = pxa_camera_set_bus_param, 1376 .set_bus_param = pxa_camera_set_bus_param,
1044}; 1377};
1045 1378
@@ -1071,7 +1404,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
1071 goto exit; 1404 goto exit;
1072 } 1405 }
1073 1406
1074 pcdev->clk = clk_get(&pdev->dev, "CAMCLK"); 1407 pcdev->clk = clk_get(&pdev->dev, NULL);
1075 if (IS_ERR(pcdev->clk)) { 1408 if (IS_ERR(pcdev->clk)) {
1076 err = PTR_ERR(pcdev->clk); 1409 err = PTR_ERR(pcdev->clk);
1077 goto exit_kfree; 1410 goto exit_kfree;
@@ -1090,14 +1423,17 @@ static int pxa_camera_probe(struct platform_device *pdev)
1090 "data widths, using default 10 bit\n"); 1423 "data widths, using default 10 bit\n");
1091 pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10; 1424 pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
1092 } 1425 }
1093 pcdev->platform_mclk_10khz = pcdev->pdata->mclk_10khz; 1426 pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
1094 if (!pcdev->platform_mclk_10khz) { 1427 if (!pcdev->mclk) {
1095 dev_warn(&pdev->dev, 1428 dev_warn(&pdev->dev,
1096 "mclk_10khz == 0! Please, fix your platform data. " 1429 "mclk == 0! Please, fix your platform data. "
1097 "Using default 20MHz\n"); 1430 "Using default 20MHz\n");
1098 pcdev->platform_mclk_10khz = 2000; 1431 pcdev->mclk = 20000000;
1099 } 1432 }
1100 1433
1434 pcdev->dev = &pdev->dev;
1435 pcdev->mclk_divisor = mclk_get_divisor(pcdev);
1436
1101 INIT_LIST_HEAD(&pcdev->capture); 1437 INIT_LIST_HEAD(&pcdev->capture);
1102 spin_lock_init(&pcdev->lock); 1438 spin_lock_init(&pcdev->lock);
1103 1439
@@ -1117,7 +1453,6 @@ static int pxa_camera_probe(struct platform_device *pdev)
1117 } 1453 }
1118 pcdev->irq = irq; 1454 pcdev->irq = irq;
1119 pcdev->base = base; 1455 pcdev->base = base;
1120 pcdev->dev = &pdev->dev;
1121 1456
1122 /* request dma */ 1457 /* request dma */
1123 err = pxa_request_dma("CI_Y", DMA_PRIO_HIGH, 1458 err = pxa_request_dma("CI_Y", DMA_PRIO_HIGH,
diff --git a/drivers/media/video/pxa_camera.h b/drivers/media/video/pxa_camera.h
new file mode 100644
index 000000000000..89cbfc9a35c5
--- /dev/null
+++ b/drivers/media/video/pxa_camera.h
@@ -0,0 +1,95 @@
1/* Camera Interface */
2#define CICR0 __REG(0x50000000)
3#define CICR1 __REG(0x50000004)
4#define CICR2 __REG(0x50000008)
5#define CICR3 __REG(0x5000000C)
6#define CICR4 __REG(0x50000010)
7#define CISR __REG(0x50000014)
8#define CIFR __REG(0x50000018)
9#define CITOR __REG(0x5000001C)
10#define CIBR0 __REG(0x50000028)
11#define CIBR1 __REG(0x50000030)
12#define CIBR2 __REG(0x50000038)
13
14#define CICR0_DMAEN (1 << 31) /* DMA request enable */
15#define CICR0_PAR_EN (1 << 30) /* Parity enable */
16#define CICR0_SL_CAP_EN (1 << 29) /* Capture enable for slave mode */
17#define CICR0_ENB (1 << 28) /* Camera interface enable */
18#define CICR0_DIS (1 << 27) /* Camera interface disable */
19#define CICR0_SIM (0x7 << 24) /* Sensor interface mode mask */
20#define CICR0_TOM (1 << 9) /* Time-out mask */
21#define CICR0_RDAVM (1 << 8) /* Receive-data-available mask */
22#define CICR0_FEM (1 << 7) /* FIFO-empty mask */
23#define CICR0_EOLM (1 << 6) /* End-of-line mask */
24#define CICR0_PERRM (1 << 5) /* Parity-error mask */
25#define CICR0_QDM (1 << 4) /* Quick-disable mask */
26#define CICR0_CDM (1 << 3) /* Disable-done mask */
27#define CICR0_SOFM (1 << 2) /* Start-of-frame mask */
28#define CICR0_EOFM (1 << 1) /* End-of-frame mask */
29#define CICR0_FOM (1 << 0) /* FIFO-overrun mask */
30
31#define CICR1_TBIT (1 << 31) /* Transparency bit */
32#define CICR1_RGBT_CONV (0x3 << 29) /* RGBT conversion mask */
33#define CICR1_PPL (0x7ff << 15) /* Pixels per line mask */
34#define CICR1_RGB_CONV (0x7 << 12) /* RGB conversion mask */
35#define CICR1_RGB_F (1 << 11) /* RGB format */
36#define CICR1_YCBCR_F (1 << 10) /* YCbCr format */
37#define CICR1_RGB_BPP (0x7 << 7) /* RGB bis per pixel mask */
38#define CICR1_RAW_BPP (0x3 << 5) /* Raw bis per pixel mask */
39#define CICR1_COLOR_SP (0x3 << 3) /* Color space mask */
40#define CICR1_DW (0x7 << 0) /* Data width mask */
41
42#define CICR2_BLW (0xff << 24) /* Beginning-of-line pixel clock
43 wait count mask */
44#define CICR2_ELW (0xff << 16) /* End-of-line pixel clock
45 wait count mask */
46#define CICR2_HSW (0x3f << 10) /* Horizontal sync pulse width mask */
47#define CICR2_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock
48 wait count mask */
49#define CICR2_FSW (0x7 << 0) /* Frame stabilization
50 wait count mask */
51
52#define CICR3_BFW (0xff << 24) /* Beginning-of-frame line clock
53 wait count mask */
54#define CICR3_EFW (0xff << 16) /* End-of-frame line clock
55 wait count mask */
56#define CICR3_VSW (0x3f << 10) /* Vertical sync pulse width mask */
57#define CICR3_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock
58 wait count mask */
59#define CICR3_LPF (0x7ff << 0) /* Lines per frame mask */
60
61#define CICR4_MCLK_DLY (0x3 << 24) /* MCLK Data Capture Delay mask */
62#define CICR4_PCLK_EN (1 << 23) /* Pixel clock enable */
63#define CICR4_PCP (1 << 22) /* Pixel clock polarity */
64#define CICR4_HSP (1 << 21) /* Horizontal sync polarity */
65#define CICR4_VSP (1 << 20) /* Vertical sync polarity */
66#define CICR4_MCLK_EN (1 << 19) /* MCLK enable */
67#define CICR4_FR_RATE (0x7 << 8) /* Frame rate mask */
68#define CICR4_DIV (0xff << 0) /* Clock divisor mask */
69
70#define CISR_FTO (1 << 15) /* FIFO time-out */
71#define CISR_RDAV_2 (1 << 14) /* Channel 2 receive data available */
72#define CISR_RDAV_1 (1 << 13) /* Channel 1 receive data available */
73#define CISR_RDAV_0 (1 << 12) /* Channel 0 receive data available */
74#define CISR_FEMPTY_2 (1 << 11) /* Channel 2 FIFO empty */
75#define CISR_FEMPTY_1 (1 << 10) /* Channel 1 FIFO empty */
76#define CISR_FEMPTY_0 (1 << 9) /* Channel 0 FIFO empty */
77#define CISR_EOL (1 << 8) /* End of line */
78#define CISR_PAR_ERR (1 << 7) /* Parity error */
79#define CISR_CQD (1 << 6) /* Camera interface quick disable */
80#define CISR_CDD (1 << 5) /* Camera interface disable done */
81#define CISR_SOF (1 << 4) /* Start of frame */
82#define CISR_EOF (1 << 3) /* End of frame */
83#define CISR_IFO_2 (1 << 2) /* FIFO overrun for Channel 2 */
84#define CISR_IFO_1 (1 << 1) /* FIFO overrun for Channel 1 */
85#define CISR_IFO_0 (1 << 0) /* FIFO overrun for Channel 0 */
86
87#define CIFR_FLVL2 (0x7f << 23) /* FIFO 2 level mask */
88#define CIFR_FLVL1 (0x7f << 16) /* FIFO 1 level mask */
89#define CIFR_FLVL0 (0xff << 8) /* FIFO 0 level mask */
90#define CIFR_THL_0 (0x3 << 4) /* Threshold Level for Channel 0 FIFO */
91#define CIFR_RESET_F (1 << 3) /* Reset input FIFOs */
92#define CIFR_FEN2 (1 << 2) /* FIFO enable for channel 2 */
93#define CIFR_FEN1 (1 << 1) /* FIFO enable for channel 1 */
94#define CIFR_FEN0 (1 << 0) /* FIFO enable for channel 0 */
95
diff --git a/drivers/media/video/saa5246a.c b/drivers/media/video/saa5246a.c
index 4a21b8a6a709..f159441e9375 100644
--- a/drivers/media/video/saa5246a.c
+++ b/drivers/media/video/saa5246a.c
@@ -15,7 +15,7 @@
15 * <richard.guenther@student.uni-tuebingen.de> 15 * <richard.guenther@student.uni-tuebingen.de>
16 * 16 *
17 * with changes by 17 * with changes by
18 * Alan Cox <Alan.Cox@linux.org> 18 * Alan Cox <alan@lxorguk.ukuu.org.uk>
19 * 19 *
20 * and 20 * and
21 * 21 *
@@ -804,8 +804,7 @@ static inline int saa5246a_stop_dau(struct saa5246a_device *t,
804 * 804 *
805 * Returns 0 if successful 805 * Returns 0 if successful
806 */ 806 */
807static int do_saa5246a_ioctl(struct inode *inode, struct file *file, 807static int do_saa5246a_ioctl(struct file *file, unsigned int cmd, void *arg)
808 unsigned int cmd, void *arg)
809{ 808{
810 struct saa5246a_device *t = video_drvdata(file); 809 struct saa5246a_device *t = video_drvdata(file);
811 810
@@ -953,7 +952,7 @@ static int saa5246a_ioctl(struct inode *inode, struct file *file,
953 952
954 cmd = vtx_fix_command(cmd); 953 cmd = vtx_fix_command(cmd);
955 mutex_lock(&t->lock); 954 mutex_lock(&t->lock);
956 err = video_usercopy(inode, file, cmd, arg, do_saa5246a_ioctl); 955 err = video_usercopy(file, cmd, arg, do_saa5246a_ioctl);
957 mutex_unlock(&t->lock); 956 mutex_unlock(&t->lock);
958 return err; 957 return err;
959} 958}
diff --git a/drivers/media/video/saa5249.c b/drivers/media/video/saa5249.c
index 3bb959c25d9d..6ef3affb97f1 100644
--- a/drivers/media/video/saa5249.c
+++ b/drivers/media/video/saa5249.c
@@ -8,7 +8,7 @@
8 * you can add arbitary multiple teletext devices to Linux video4linux 8 * you can add arbitary multiple teletext devices to Linux video4linux
9 * now (well 32 anyway). 9 * now (well 32 anyway).
10 * 10 *
11 * Alan Cox <Alan.Cox@linux.org> 11 * Alan Cox <alan@lxorguk.ukuu.org.uk>
12 * 12 *
13 * The original driver was heavily modified to match the i2c interface 13 * The original driver was heavily modified to match the i2c interface
14 * It was truncated to use the WinTV boards, too. 14 * It was truncated to use the WinTV boards, too.
@@ -190,8 +190,7 @@ static int i2c_getdata(struct saa5249_device *t, int count, u8 *buf)
190 * Standard character-device-driver functions 190 * Standard character-device-driver functions
191 */ 191 */
192 192
193static int do_saa5249_ioctl(struct inode *inode, struct file *file, 193static int do_saa5249_ioctl(struct file *file, unsigned int cmd, void *arg)
194 unsigned int cmd, void *arg)
195{ 194{
196 static int virtual_mode = false; 195 static int virtual_mode = false;
197 struct saa5249_device *t = video_drvdata(file); 196 struct saa5249_device *t = video_drvdata(file);
@@ -488,7 +487,7 @@ static int saa5249_ioctl(struct inode *inode, struct file *file,
488 487
489 cmd = vtx_fix_command(cmd); 488 cmd = vtx_fix_command(cmd);
490 mutex_lock(&t->lock); 489 mutex_lock(&t->lock);
491 err = video_usercopy(inode,file,cmd,arg,do_saa5249_ioctl); 490 err = video_usercopy(file, cmd, arg, do_saa5249_ioctl);
492 mutex_unlock(&t->lock); 491 mutex_unlock(&t->lock);
493 return err; 492 return err;
494} 493}
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index c8e9cb3db30a..22708ecdf1bb 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -44,7 +44,7 @@
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/i2c.h> 45#include <linux/i2c.h>
46#include <linux/videodev2.h> 46#include <linux/videodev2.h>
47#include <media/v4l2-common.h> 47#include <media/v4l2-device.h>
48#include <media/v4l2-chip-ident.h> 48#include <media/v4l2-chip-ident.h>
49#include <media/v4l2-i2c-drv-legacy.h> 49#include <media/v4l2-i2c-drv-legacy.h>
50#include <media/saa7115.h> 50#include <media/saa7115.h>
@@ -70,6 +70,7 @@ static unsigned short normal_i2c[] = {
70I2C_CLIENT_INSMOD; 70I2C_CLIENT_INSMOD;
71 71
72struct saa711x_state { 72struct saa711x_state {
73 struct v4l2_subdev sd;
73 v4l2_std_id std; 74 v4l2_std_id std;
74 int input; 75 int input;
75 int output; 76 int output;
@@ -89,10 +90,17 @@ struct saa711x_state {
89 u8 apll; 90 u8 apll;
90}; 91};
91 92
93static inline struct saa711x_state *to_state(struct v4l2_subdev *sd)
94{
95 return container_of(sd, struct saa711x_state, sd);
96}
97
92/* ----------------------------------------------------------------------- */ 98/* ----------------------------------------------------------------------- */
93 99
94static inline int saa711x_write(struct i2c_client *client, u8 reg, u8 value) 100static inline int saa711x_write(struct v4l2_subdev *sd, u8 reg, u8 value)
95{ 101{
102 struct i2c_client *client = v4l2_get_subdevdata(sd);
103
96 return i2c_smbus_write_byte_data(client, reg, value); 104 return i2c_smbus_write_byte_data(client, reg, value);
97} 105}
98 106
@@ -128,9 +136,9 @@ static int saa711x_has_reg(const int id, const u8 reg)
128 return 1; 136 return 1;
129} 137}
130 138
131static int saa711x_writeregs(struct i2c_client *client, const unsigned char *regs) 139static int saa711x_writeregs(struct v4l2_subdev *sd, const unsigned char *regs)
132{ 140{
133 struct saa711x_state *state = i2c_get_clientdata(client); 141 struct saa711x_state *state = to_state(sd);
134 unsigned char reg, data; 142 unsigned char reg, data;
135 143
136 while (*regs != 0x00) { 144 while (*regs != 0x00) {
@@ -139,18 +147,20 @@ static int saa711x_writeregs(struct i2c_client *client, const unsigned char *reg
139 147
140 /* According with datasheets, reserved regs should be 148 /* According with datasheets, reserved regs should be
141 filled with 0 - seems better not to touch on they */ 149 filled with 0 - seems better not to touch on they */
142 if (saa711x_has_reg(state->ident,reg)) { 150 if (saa711x_has_reg(state->ident, reg)) {
143 if (saa711x_write(client, reg, data) < 0) 151 if (saa711x_write(sd, reg, data) < 0)
144 return -1; 152 return -1;
145 } else { 153 } else {
146 v4l_dbg(1, debug, client, "tried to access reserved reg 0x%02x\n", reg); 154 v4l2_dbg(1, debug, sd, "tried to access reserved reg 0x%02x\n", reg);
147 } 155 }
148 } 156 }
149 return 0; 157 return 0;
150} 158}
151 159
152static inline int saa711x_read(struct i2c_client *client, u8 reg) 160static inline int saa711x_read(struct v4l2_subdev *sd, u8 reg)
153{ 161{
162 struct i2c_client *client = v4l2_get_subdevdata(sd);
163
154 return i2c_smbus_read_byte_data(client, reg); 164 return i2c_smbus_read_byte_data(client, reg);
155} 165}
156 166
@@ -601,7 +611,7 @@ static int saa711x_odd_parity(u8 c)
601 return c & 1; 611 return c & 1;
602} 612}
603 613
604static int saa711x_decode_vps(u8 * dst, u8 * p) 614static int saa711x_decode_vps(u8 *dst, u8 *p)
605{ 615{
606 static const u8 biphase_tbl[] = { 616 static const u8 biphase_tbl[] = {
607 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 617 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
@@ -648,7 +658,7 @@ static int saa711x_decode_vps(u8 * dst, u8 * p)
648 return err & 0xf0; 658 return err & 0xf0;
649} 659}
650 660
651static int saa711x_decode_wss(u8 * p) 661static int saa711x_decode_wss(u8 *p)
652{ 662{
653 static const int wss_bits[8] = { 663 static const int wss_bits[8] = {
654 0, 0, 0, 1, 0, 1, 1, 1 664 0, 0, 0, 1, 0, 1, 1, 1
@@ -675,9 +685,9 @@ static int saa711x_decode_wss(u8 * p)
675 return wss; 685 return wss;
676} 686}
677 687
678static int saa711x_set_audio_clock_freq(struct i2c_client *client, u32 freq) 688static int saa711x_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
679{ 689{
680 struct saa711x_state *state = i2c_get_clientdata(client); 690 struct saa711x_state *state = to_state(sd);
681 u32 acpf; 691 u32 acpf;
682 u32 acni; 692 u32 acni;
683 u32 hz; 693 u32 hz;
@@ -685,10 +695,10 @@ static int saa711x_set_audio_clock_freq(struct i2c_client *client, u32 freq)
685 u8 acc = 0; /* reg 0x3a, audio clock control */ 695 u8 acc = 0; /* reg 0x3a, audio clock control */
686 696
687 /* Checks for chips that don't have audio clock (saa7111, saa7113) */ 697 /* Checks for chips that don't have audio clock (saa7111, saa7113) */
688 if (!saa711x_has_reg(state->ident,R_30_AUD_MAST_CLK_CYCLES_PER_FIELD)) 698 if (!saa711x_has_reg(state->ident, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD))
689 return 0; 699 return 0;
690 700
691 v4l_dbg(1, debug, client, "set audio clock freq: %d\n", freq); 701 v4l2_dbg(1, debug, sd, "set audio clock freq: %d\n", freq);
692 702
693 /* sanity check */ 703 /* sanity check */
694 if (freq < 32000 || freq > 48000) 704 if (freq < 32000 || freq > 48000)
@@ -715,66 +725,66 @@ static int saa711x_set_audio_clock_freq(struct i2c_client *client, u32 freq)
715 if (state->apll) 725 if (state->apll)
716 acc |= 0x08; 726 acc |= 0x08;
717 727
718 saa711x_write(client, R_38_CLK_RATIO_AMXCLK_TO_ASCLK, 0x03); 728 saa711x_write(sd, R_38_CLK_RATIO_AMXCLK_TO_ASCLK, 0x03);
719 saa711x_write(client, R_39_CLK_RATIO_ASCLK_TO_ALRCLK, 0x10); 729 saa711x_write(sd, R_39_CLK_RATIO_ASCLK_TO_ALRCLK, 0x10);
720 saa711x_write(client, R_3A_AUD_CLK_GEN_BASIC_SETUP, acc); 730 saa711x_write(sd, R_3A_AUD_CLK_GEN_BASIC_SETUP, acc);
721 731
722 saa711x_write(client, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD, acpf & 0xff); 732 saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD, acpf & 0xff);
723 saa711x_write(client, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD+1, 733 saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD+1,
724 (acpf >> 8) & 0xff); 734 (acpf >> 8) & 0xff);
725 saa711x_write(client, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD+2, 735 saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD+2,
726 (acpf >> 16) & 0x03); 736 (acpf >> 16) & 0x03);
727 737
728 saa711x_write(client, R_34_AUD_MAST_CLK_NOMINAL_INC, acni & 0xff); 738 saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC, acni & 0xff);
729 saa711x_write(client, R_34_AUD_MAST_CLK_NOMINAL_INC+1, (acni >> 8) & 0xff); 739 saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC+1, (acni >> 8) & 0xff);
730 saa711x_write(client, R_34_AUD_MAST_CLK_NOMINAL_INC+2, (acni >> 16) & 0x3f); 740 saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC+2, (acni >> 16) & 0x3f);
731 state->audclk_freq = freq; 741 state->audclk_freq = freq;
732 return 0; 742 return 0;
733} 743}
734 744
735static int saa711x_set_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl) 745static int saa711x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
736{ 746{
737 struct saa711x_state *state = i2c_get_clientdata(client); 747 struct saa711x_state *state = to_state(sd);
738 748
739 switch (ctrl->id) { 749 switch (ctrl->id) {
740 case V4L2_CID_BRIGHTNESS: 750 case V4L2_CID_BRIGHTNESS:
741 if (ctrl->value < 0 || ctrl->value > 255) { 751 if (ctrl->value < 0 || ctrl->value > 255) {
742 v4l_err(client, "invalid brightness setting %d\n", ctrl->value); 752 v4l2_err(sd, "invalid brightness setting %d\n", ctrl->value);
743 return -ERANGE; 753 return -ERANGE;
744 } 754 }
745 755
746 state->bright = ctrl->value; 756 state->bright = ctrl->value;
747 saa711x_write(client, R_0A_LUMA_BRIGHT_CNTL, state->bright); 757 saa711x_write(sd, R_0A_LUMA_BRIGHT_CNTL, state->bright);
748 break; 758 break;
749 759
750 case V4L2_CID_CONTRAST: 760 case V4L2_CID_CONTRAST:
751 if (ctrl->value < 0 || ctrl->value > 127) { 761 if (ctrl->value < 0 || ctrl->value > 127) {
752 v4l_err(client, "invalid contrast setting %d\n", ctrl->value); 762 v4l2_err(sd, "invalid contrast setting %d\n", ctrl->value);
753 return -ERANGE; 763 return -ERANGE;
754 } 764 }
755 765
756 state->contrast = ctrl->value; 766 state->contrast = ctrl->value;
757 saa711x_write(client, R_0B_LUMA_CONTRAST_CNTL, state->contrast); 767 saa711x_write(sd, R_0B_LUMA_CONTRAST_CNTL, state->contrast);
758 break; 768 break;
759 769
760 case V4L2_CID_SATURATION: 770 case V4L2_CID_SATURATION:
761 if (ctrl->value < 0 || ctrl->value > 127) { 771 if (ctrl->value < 0 || ctrl->value > 127) {
762 v4l_err(client, "invalid saturation setting %d\n", ctrl->value); 772 v4l2_err(sd, "invalid saturation setting %d\n", ctrl->value);
763 return -ERANGE; 773 return -ERANGE;
764 } 774 }
765 775
766 state->sat = ctrl->value; 776 state->sat = ctrl->value;
767 saa711x_write(client, R_0C_CHROMA_SAT_CNTL, state->sat); 777 saa711x_write(sd, R_0C_CHROMA_SAT_CNTL, state->sat);
768 break; 778 break;
769 779
770 case V4L2_CID_HUE: 780 case V4L2_CID_HUE:
771 if (ctrl->value < -127 || ctrl->value > 127) { 781 if (ctrl->value < -127 || ctrl->value > 127) {
772 v4l_err(client, "invalid hue setting %d\n", ctrl->value); 782 v4l2_err(sd, "invalid hue setting %d\n", ctrl->value);
773 return -ERANGE; 783 return -ERANGE;
774 } 784 }
775 785
776 state->hue = ctrl->value; 786 state->hue = ctrl->value;
777 saa711x_write(client, R_0D_CHROMA_HUE_CNTL, state->hue); 787 saa711x_write(sd, R_0D_CHROMA_HUE_CNTL, state->hue);
778 break; 788 break;
779 789
780 default: 790 default:
@@ -784,9 +794,9 @@ static int saa711x_set_v4lctrl(struct i2c_client *client, struct v4l2_control *c
784 return 0; 794 return 0;
785} 795}
786 796
787static int saa711x_get_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl) 797static int saa711x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
788{ 798{
789 struct saa711x_state *state = i2c_get_clientdata(client); 799 struct saa711x_state *state = to_state(sd);
790 800
791 switch (ctrl->id) { 801 switch (ctrl->id) {
792 case V4L2_CID_BRIGHTNESS: 802 case V4L2_CID_BRIGHTNESS:
@@ -808,16 +818,16 @@ static int saa711x_get_v4lctrl(struct i2c_client *client, struct v4l2_control *c
808 return 0; 818 return 0;
809} 819}
810 820
811static int saa711x_set_size(struct i2c_client *client, int width, int height) 821static int saa711x_set_size(struct v4l2_subdev *sd, int width, int height)
812{ 822{
813 struct saa711x_state *state = i2c_get_clientdata(client); 823 struct saa711x_state *state = to_state(sd);
814 int HPSC, HFSC; 824 int HPSC, HFSC;
815 int VSCY; 825 int VSCY;
816 int res; 826 int res;
817 int is_50hz = state->std & V4L2_STD_625_50; 827 int is_50hz = state->std & V4L2_STD_625_50;
818 int Vsrc = is_50hz ? 576 : 480; 828 int Vsrc = is_50hz ? 576 : 480;
819 829
820 v4l_dbg(1, debug, client, "decoder set size to %ix%i\n",width,height); 830 v4l2_dbg(1, debug, sd, "decoder set size to %ix%i\n", width, height);
821 831
822 /* FIXME need better bounds checking here */ 832 /* FIXME need better bounds checking here */
823 if ((width < 1) || (width > 1440)) 833 if ((width < 1) || (width > 1440))
@@ -825,7 +835,7 @@ static int saa711x_set_size(struct i2c_client *client, int width, int height)
825 if ((height < 1) || (height > Vsrc)) 835 if ((height < 1) || (height > Vsrc))
826 return -EINVAL; 836 return -EINVAL;
827 837
828 if (!saa711x_has_reg(state->ident,R_D0_B_HORIZ_PRESCALING)) { 838 if (!saa711x_has_reg(state->ident, R_D0_B_HORIZ_PRESCALING)) {
829 /* Decoder only supports 720 columns and 480 or 576 lines */ 839 /* Decoder only supports 720 columns and 480 or 576 lines */
830 if (width != 720) 840 if (width != 720)
831 return -EINVAL; 841 return -EINVAL;
@@ -843,22 +853,22 @@ static int saa711x_set_size(struct i2c_client *client, int width, int height)
843 /* Set output width/height */ 853 /* Set output width/height */
844 /* width */ 854 /* width */
845 855
846 saa711x_write(client, R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH, 856 saa711x_write(sd, R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH,
847 (u8) (width & 0xff)); 857 (u8) (width & 0xff));
848 saa711x_write(client, R_CD_B_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 858 saa711x_write(sd, R_CD_B_HORIZ_OUTPUT_WINDOW_LENGTH_MSB,
849 (u8) ((width >> 8) & 0xff)); 859 (u8) ((width >> 8) & 0xff));
850 860
851 /* Vertical Scaling uses height/2 */ 861 /* Vertical Scaling uses height/2 */
852 res=height/2; 862 res = height / 2;
853 863
854 /* On 60Hz, it is using a higher Vertical Output Size */ 864 /* On 60Hz, it is using a higher Vertical Output Size */
855 if (!is_50hz) 865 if (!is_50hz)
856 res += (VRES_60HZ - 480) >> 1; 866 res += (VRES_60HZ - 480) >> 1;
857 867
858 /* height */ 868 /* height */
859 saa711x_write(client, R_CE_B_VERT_OUTPUT_WINDOW_LENGTH, 869 saa711x_write(sd, R_CE_B_VERT_OUTPUT_WINDOW_LENGTH,
860 (u8) (res & 0xff)); 870 (u8) (res & 0xff));
861 saa711x_write(client, R_CF_B_VERT_OUTPUT_WINDOW_LENGTH_MSB, 871 saa711x_write(sd, R_CF_B_VERT_OUTPUT_WINDOW_LENGTH_MSB,
862 (u8) ((res >> 8) & 0xff)); 872 (u8) ((res >> 8) & 0xff));
863 873
864 /* Scaling settings */ 874 /* Scaling settings */
@@ -869,54 +879,54 @@ static int saa711x_set_size(struct i2c_client *client, int width, int height)
869 HFSC = (int)((1024 * 720) / (HPSC * width)); 879 HFSC = (int)((1024 * 720) / (HPSC * width));
870 /* FIXME hardcodes to "Task B" 880 /* FIXME hardcodes to "Task B"
871 * write H prescaler integer */ 881 * write H prescaler integer */
872 saa711x_write(client, R_D0_B_HORIZ_PRESCALING, 882 saa711x_write(sd, R_D0_B_HORIZ_PRESCALING,
873 (u8) (HPSC & 0x3f)); 883 (u8) (HPSC & 0x3f));
874 884
875 v4l_dbg(1, debug, client, "Hpsc: 0x%05x, Hfsc: 0x%05x\n", HPSC, HFSC); 885 v4l2_dbg(1, debug, sd, "Hpsc: 0x%05x, Hfsc: 0x%05x\n", HPSC, HFSC);
876 /* write H fine-scaling (luminance) */ 886 /* write H fine-scaling (luminance) */
877 saa711x_write(client, R_D8_B_HORIZ_LUMA_SCALING_INC, 887 saa711x_write(sd, R_D8_B_HORIZ_LUMA_SCALING_INC,
878 (u8) (HFSC & 0xff)); 888 (u8) (HFSC & 0xff));
879 saa711x_write(client, R_D9_B_HORIZ_LUMA_SCALING_INC_MSB, 889 saa711x_write(sd, R_D9_B_HORIZ_LUMA_SCALING_INC_MSB,
880 (u8) ((HFSC >> 8) & 0xff)); 890 (u8) ((HFSC >> 8) & 0xff));
881 /* write H fine-scaling (chrominance) 891 /* write H fine-scaling (chrominance)
882 * must be lum/2, so i'll just bitshift :) */ 892 * must be lum/2, so i'll just bitshift :) */
883 saa711x_write(client, R_DC_B_HORIZ_CHROMA_SCALING, 893 saa711x_write(sd, R_DC_B_HORIZ_CHROMA_SCALING,
884 (u8) ((HFSC >> 1) & 0xff)); 894 (u8) ((HFSC >> 1) & 0xff));
885 saa711x_write(client, R_DD_B_HORIZ_CHROMA_SCALING_MSB, 895 saa711x_write(sd, R_DD_B_HORIZ_CHROMA_SCALING_MSB,
886 (u8) ((HFSC >> 9) & 0xff)); 896 (u8) ((HFSC >> 9) & 0xff));
887 897
888 VSCY = (int)((1024 * Vsrc) / height); 898 VSCY = (int)((1024 * Vsrc) / height);
889 v4l_dbg(1, debug, client, "Vsrc: %d, Vscy: 0x%05x\n", Vsrc, VSCY); 899 v4l2_dbg(1, debug, sd, "Vsrc: %d, Vscy: 0x%05x\n", Vsrc, VSCY);
890 900
891 /* Correct Contrast and Luminance */ 901 /* Correct Contrast and Luminance */
892 saa711x_write(client, R_D5_B_LUMA_CONTRAST_CNTL, 902 saa711x_write(sd, R_D5_B_LUMA_CONTRAST_CNTL,
893 (u8) (64 * 1024 / VSCY)); 903 (u8) (64 * 1024 / VSCY));
894 saa711x_write(client, R_D6_B_CHROMA_SATURATION_CNTL, 904 saa711x_write(sd, R_D6_B_CHROMA_SATURATION_CNTL,
895 (u8) (64 * 1024 / VSCY)); 905 (u8) (64 * 1024 / VSCY));
896 906
897 /* write V fine-scaling (luminance) */ 907 /* write V fine-scaling (luminance) */
898 saa711x_write(client, R_E0_B_VERT_LUMA_SCALING_INC, 908 saa711x_write(sd, R_E0_B_VERT_LUMA_SCALING_INC,
899 (u8) (VSCY & 0xff)); 909 (u8) (VSCY & 0xff));
900 saa711x_write(client, R_E1_B_VERT_LUMA_SCALING_INC_MSB, 910 saa711x_write(sd, R_E1_B_VERT_LUMA_SCALING_INC_MSB,
901 (u8) ((VSCY >> 8) & 0xff)); 911 (u8) ((VSCY >> 8) & 0xff));
902 /* write V fine-scaling (chrominance) */ 912 /* write V fine-scaling (chrominance) */
903 saa711x_write(client, R_E2_B_VERT_CHROMA_SCALING_INC, 913 saa711x_write(sd, R_E2_B_VERT_CHROMA_SCALING_INC,
904 (u8) (VSCY & 0xff)); 914 (u8) (VSCY & 0xff));
905 saa711x_write(client, R_E3_B_VERT_CHROMA_SCALING_INC_MSB, 915 saa711x_write(sd, R_E3_B_VERT_CHROMA_SCALING_INC_MSB,
906 (u8) ((VSCY >> 8) & 0xff)); 916 (u8) ((VSCY >> 8) & 0xff));
907 917
908 saa711x_writeregs(client, saa7115_cfg_reset_scaler); 918 saa711x_writeregs(sd, saa7115_cfg_reset_scaler);
909 919
910 /* Activates task "B" */ 920 /* Activates task "B" */
911 saa711x_write(client, R_80_GLOBAL_CNTL_1, 921 saa711x_write(sd, R_80_GLOBAL_CNTL_1,
912 saa711x_read(client,R_80_GLOBAL_CNTL_1) | 0x20); 922 saa711x_read(sd, R_80_GLOBAL_CNTL_1) | 0x20);
913 923
914 return 0; 924 return 0;
915} 925}
916 926
917static void saa711x_set_v4lstd(struct i2c_client *client, v4l2_std_id std) 927static void saa711x_set_v4lstd(struct v4l2_subdev *sd, v4l2_std_id std)
918{ 928{
919 struct saa711x_state *state = i2c_get_clientdata(client); 929 struct saa711x_state *state = to_state(sd);
920 930
921 /* Prevent unnecessary standard changes. During a standard 931 /* Prevent unnecessary standard changes. During a standard
922 change the I-Port is temporarily disabled. Any devices 932 change the I-Port is temporarily disabled. Any devices
@@ -932,13 +942,13 @@ static void saa711x_set_v4lstd(struct i2c_client *client, v4l2_std_id std)
932 942
933 // This works for NTSC-M, SECAM-L and the 50Hz PAL variants. 943 // This works for NTSC-M, SECAM-L and the 50Hz PAL variants.
934 if (std & V4L2_STD_525_60) { 944 if (std & V4L2_STD_525_60) {
935 v4l_dbg(1, debug, client, "decoder set standard 60 Hz\n"); 945 v4l2_dbg(1, debug, sd, "decoder set standard 60 Hz\n");
936 saa711x_writeregs(client, saa7115_cfg_60hz_video); 946 saa711x_writeregs(sd, saa7115_cfg_60hz_video);
937 saa711x_set_size(client, 720, 480); 947 saa711x_set_size(sd, 720, 480);
938 } else { 948 } else {
939 v4l_dbg(1, debug, client, "decoder set standard 50 Hz\n"); 949 v4l2_dbg(1, debug, sd, "decoder set standard 50 Hz\n");
940 saa711x_writeregs(client, saa7115_cfg_50hz_video); 950 saa711x_writeregs(sd, saa7115_cfg_50hz_video);
941 saa711x_set_size(client, 720, 576); 951 saa711x_set_size(sd, 720, 576);
942 } 952 }
943 953
944 /* Register 0E - Bits D6-D4 on NO-AUTO mode 954 /* Register 0E - Bits D6-D4 on NO-AUTO mode
@@ -952,7 +962,7 @@ static void saa711x_set_v4lstd(struct i2c_client *client, v4l2_std_id std)
952 */ 962 */
953 if (state->ident == V4L2_IDENT_SAA7111 || 963 if (state->ident == V4L2_IDENT_SAA7111 ||
954 state->ident == V4L2_IDENT_SAA7113) { 964 state->ident == V4L2_IDENT_SAA7113) {
955 u8 reg = saa711x_read(client, R_0E_CHROMA_CNTL_1) & 0x8f; 965 u8 reg = saa711x_read(sd, R_0E_CHROMA_CNTL_1) & 0x8f;
956 966
957 if (std == V4L2_STD_PAL_M) { 967 if (std == V4L2_STD_PAL_M) {
958 reg |= 0x30; 968 reg |= 0x30;
@@ -965,87 +975,31 @@ static void saa711x_set_v4lstd(struct i2c_client *client, v4l2_std_id std)
965 } else if (std & V4L2_STD_SECAM) { 975 } else if (std & V4L2_STD_SECAM) {
966 reg |= 0x50; 976 reg |= 0x50;
967 } 977 }
968 saa711x_write(client, R_0E_CHROMA_CNTL_1, reg); 978 saa711x_write(sd, R_0E_CHROMA_CNTL_1, reg);
969 } else { 979 } else {
970 /* restart task B if needed */ 980 /* restart task B if needed */
971 int taskb = saa711x_read(client, R_80_GLOBAL_CNTL_1) & 0x10; 981 int taskb = saa711x_read(sd, R_80_GLOBAL_CNTL_1) & 0x10;
972 982
973 if (taskb && state->ident == V4L2_IDENT_SAA7114) { 983 if (taskb && state->ident == V4L2_IDENT_SAA7114) {
974 saa711x_writeregs(client, saa7115_cfg_vbi_on); 984 saa711x_writeregs(sd, saa7115_cfg_vbi_on);
975 } 985 }
976 986
977 /* switch audio mode too! */ 987 /* switch audio mode too! */
978 saa711x_set_audio_clock_freq(client, state->audclk_freq); 988 saa711x_s_clock_freq(sd, state->audclk_freq);
979 }
980}
981
982static v4l2_std_id saa711x_get_v4lstd(struct i2c_client *client)
983{
984 struct saa711x_state *state = i2c_get_clientdata(client);
985
986 return state->std;
987}
988
989static void saa711x_log_status(struct i2c_client *client)
990{
991 struct saa711x_state *state = i2c_get_clientdata(client);
992 int reg1e, reg1f;
993 int signalOk;
994 int vcr;
995
996 v4l_info(client, "Audio frequency: %d Hz\n", state->audclk_freq);
997 if (state->ident != V4L2_IDENT_SAA7115) {
998 /* status for the saa7114 */
999 reg1f = saa711x_read(client, R_1F_STATUS_BYTE_2_VD_DEC);
1000 signalOk = (reg1f & 0xc1) == 0x81;
1001 v4l_info(client, "Video signal: %s\n", signalOk ? "ok" : "bad");
1002 v4l_info(client, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz");
1003 return;
1004 }
1005
1006 /* status for the saa7115 */
1007 reg1e = saa711x_read(client, R_1E_STATUS_BYTE_1_VD_DEC);
1008 reg1f = saa711x_read(client, R_1F_STATUS_BYTE_2_VD_DEC);
1009
1010 signalOk = (reg1f & 0xc1) == 0x81 && (reg1e & 0xc0) == 0x80;
1011 vcr = !(reg1f & 0x10);
1012
1013 if (state->input >= 6) {
1014 v4l_info(client, "Input: S-Video %d\n", state->input - 6);
1015 } else {
1016 v4l_info(client, "Input: Composite %d\n", state->input);
1017 } 989 }
1018 v4l_info(client, "Video signal: %s\n", signalOk ? (vcr ? "VCR" : "broadcast/DVD") : "bad");
1019 v4l_info(client, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz");
1020
1021 switch (reg1e & 0x03) {
1022 case 1:
1023 v4l_info(client, "Detected format: NTSC\n");
1024 break;
1025 case 2:
1026 v4l_info(client, "Detected format: PAL\n");
1027 break;
1028 case 3:
1029 v4l_info(client, "Detected format: SECAM\n");
1030 break;
1031 default:
1032 v4l_info(client, "Detected format: BW/No color\n");
1033 break;
1034 }
1035 v4l_info(client, "Width, Height: %d, %d\n", state->width, state->height);
1036} 990}
1037 991
1038/* setup the sliced VBI lcr registers according to the sliced VBI format */ 992/* setup the sliced VBI lcr registers according to the sliced VBI format */
1039static void saa711x_set_lcr(struct i2c_client *client, struct v4l2_sliced_vbi_format *fmt) 993static void saa711x_set_lcr(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt)
1040{ 994{
1041 struct saa711x_state *state = i2c_get_clientdata(client); 995 struct saa711x_state *state = to_state(sd);
1042 int is_50hz = (state->std & V4L2_STD_625_50); 996 int is_50hz = (state->std & V4L2_STD_625_50);
1043 u8 lcr[24]; 997 u8 lcr[24];
1044 int i, x; 998 int i, x;
1045 999
1046#if 1 1000#if 1
1047 /* saa7113/7114/7118 VBI support are experimental */ 1001 /* saa7113/7114/7118 VBI support are experimental */
1048 if (!saa711x_has_reg(state->ident,R_41_LCR_BASE)) 1002 if (!saa711x_has_reg(state->ident, R_41_LCR_BASE))
1049 return; 1003 return;
1050 1004
1051#else 1005#else
@@ -1109,16 +1063,16 @@ static void saa711x_set_lcr(struct i2c_client *client, struct v4l2_sliced_vbi_fo
1109 1063
1110 /* write the lcr registers */ 1064 /* write the lcr registers */
1111 for (i = 2; i <= 23; i++) { 1065 for (i = 2; i <= 23; i++) {
1112 saa711x_write(client, i - 2 + R_41_LCR_BASE, lcr[i]); 1066 saa711x_write(sd, i - 2 + R_41_LCR_BASE, lcr[i]);
1113 } 1067 }
1114 1068
1115 /* enable/disable raw VBI capturing */ 1069 /* enable/disable raw VBI capturing */
1116 saa711x_writeregs(client, fmt == NULL ? 1070 saa711x_writeregs(sd, fmt == NULL ?
1117 saa7115_cfg_vbi_on : 1071 saa7115_cfg_vbi_on :
1118 saa7115_cfg_vbi_off); 1072 saa7115_cfg_vbi_off);
1119} 1073}
1120 1074
1121static int saa711x_get_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt) 1075static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
1122{ 1076{
1123 static u16 lcr2vbi[] = { 1077 static u16 lcr2vbi[] = {
1124 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */ 1078 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
@@ -1134,10 +1088,10 @@ static int saa711x_get_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt
1134 return -EINVAL; 1088 return -EINVAL;
1135 memset(sliced, 0, sizeof(*sliced)); 1089 memset(sliced, 0, sizeof(*sliced));
1136 /* done if using raw VBI */ 1090 /* done if using raw VBI */
1137 if (saa711x_read(client, R_80_GLOBAL_CNTL_1) & 0x10) 1091 if (saa711x_read(sd, R_80_GLOBAL_CNTL_1) & 0x10)
1138 return 0; 1092 return 0;
1139 for (i = 2; i <= 23; i++) { 1093 for (i = 2; i <= 23; i++) {
1140 u8 v = saa711x_read(client, i - 2 + R_41_LCR_BASE); 1094 u8 v = saa711x_read(sd, i - 2 + R_41_LCR_BASE);
1141 1095
1142 sliced->service_lines[0][i] = lcr2vbi[v >> 4]; 1096 sliced->service_lines[0][i] = lcr2vbi[v >> 4];
1143 sliced->service_lines[1][i] = lcr2vbi[v & 0xf]; 1097 sliced->service_lines[1][i] = lcr2vbi[v & 0xf];
@@ -1147,20 +1101,20 @@ static int saa711x_get_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt
1147 return 0; 1101 return 0;
1148} 1102}
1149 1103
1150static int saa711x_set_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt) 1104static int saa711x_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
1151{ 1105{
1152 if (fmt->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) { 1106 if (fmt->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) {
1153 saa711x_set_lcr(client, &fmt->fmt.sliced); 1107 saa711x_set_lcr(sd, &fmt->fmt.sliced);
1154 return 0; 1108 return 0;
1155 } 1109 }
1156 if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) { 1110 if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
1157 saa711x_set_lcr(client, NULL); 1111 saa711x_set_lcr(sd, NULL);
1158 return 0; 1112 return 0;
1159 } 1113 }
1160 if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1114 if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1161 return -EINVAL; 1115 return -EINVAL;
1162 1116
1163 return saa711x_set_size(client,fmt->fmt.pix.width,fmt->fmt.pix.height); 1117 return saa711x_set_size(sd, fmt->fmt.pix.width, fmt->fmt.pix.height);
1164} 1118}
1165 1119
1166/* Decode the sliced VBI data stream as created by the saa7115. 1120/* Decode the sliced VBI data stream as created by the saa7115.
@@ -1169,13 +1123,12 @@ static int saa711x_set_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt
1169 The current implementation uses SAV/EAV codes and not the ancillary data 1123 The current implementation uses SAV/EAV codes and not the ancillary data
1170 headers. The vbi->p pointer points to the R_5E_SDID byte right after the SAV 1124 headers. The vbi->p pointer points to the R_5E_SDID byte right after the SAV
1171 code. */ 1125 code. */
1172static void saa711x_decode_vbi_line(struct i2c_client *client, 1126static int saa711x_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi)
1173 struct v4l2_decode_vbi_line *vbi)
1174{ 1127{
1128 struct saa711x_state *state = to_state(sd);
1175 static const char vbi_no_data_pattern[] = { 1129 static const char vbi_no_data_pattern[] = {
1176 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0 1130 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0
1177 }; 1131 };
1178 struct saa711x_state *state = i2c_get_clientdata(client);
1179 u8 *p = vbi->p; 1132 u8 *p = vbi->p;
1180 u32 wss; 1133 u32 wss;
1181 int id1, id2; /* the ID1 and ID2 bytes from the internal header */ 1134 int id1, id2; /* the ID1 and ID2 bytes from the internal header */
@@ -1202,7 +1155,7 @@ static void saa711x_decode_vbi_line(struct i2c_client *client,
1202 /* If the VBI slicer does not detect any signal it will fill up 1155 /* If the VBI slicer does not detect any signal it will fill up
1203 the payload buffer with 0xa0 bytes. */ 1156 the payload buffer with 0xa0 bytes. */
1204 if (!memcmp(p, vbi_no_data_pattern, sizeof(vbi_no_data_pattern))) 1157 if (!memcmp(p, vbi_no_data_pattern, sizeof(vbi_no_data_pattern)))
1205 return; 1158 return 0;
1206 1159
1207 /* decode payloads */ 1160 /* decode payloads */
1208 switch (id2) { 1161 switch (id2) {
@@ -1211,275 +1164,352 @@ static void saa711x_decode_vbi_line(struct i2c_client *client,
1211 break; 1164 break;
1212 case 4: 1165 case 4:
1213 if (!saa711x_odd_parity(p[0]) || !saa711x_odd_parity(p[1])) 1166 if (!saa711x_odd_parity(p[0]) || !saa711x_odd_parity(p[1]))
1214 return; 1167 return 0;
1215 vbi->type = V4L2_SLICED_CAPTION_525; 1168 vbi->type = V4L2_SLICED_CAPTION_525;
1216 break; 1169 break;
1217 case 5: 1170 case 5:
1218 wss = saa711x_decode_wss(p); 1171 wss = saa711x_decode_wss(p);
1219 if (wss == -1) 1172 if (wss == -1)
1220 return; 1173 return 0;
1221 p[0] = wss & 0xff; 1174 p[0] = wss & 0xff;
1222 p[1] = wss >> 8; 1175 p[1] = wss >> 8;
1223 vbi->type = V4L2_SLICED_WSS_625; 1176 vbi->type = V4L2_SLICED_WSS_625;
1224 break; 1177 break;
1225 case 7: 1178 case 7:
1226 if (saa711x_decode_vps(p, p) != 0) 1179 if (saa711x_decode_vps(p, p) != 0)
1227 return; 1180 return 0;
1228 vbi->type = V4L2_SLICED_VPS; 1181 vbi->type = V4L2_SLICED_VPS;
1229 break; 1182 break;
1230 default: 1183 default:
1231 return; 1184 break;
1232 } 1185 }
1186 return 0;
1233} 1187}
1234 1188
1235/* ============ SAA7115 AUDIO settings (end) ============= */ 1189/* ============ SAA7115 AUDIO settings (end) ============= */
1236 1190
1237static int saa7115_command(struct i2c_client *client, unsigned int cmd, void *arg) 1191static int saa711x_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1238{ 1192{
1239 struct saa711x_state *state = i2c_get_clientdata(client); 1193 struct saa711x_state *state = to_state(sd);
1194 int status;
1240 1195
1241 /* ioctls to allow direct access to the saa7115 registers for testing */ 1196 if (state->radio)
1242 switch (cmd) { 1197 return 0;
1243 case VIDIOC_S_FMT: 1198 status = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC);
1244 return saa711x_set_v4lfmt(client, (struct v4l2_format *)arg);
1245 1199
1246 case VIDIOC_G_FMT: 1200 v4l2_dbg(1, debug, sd, "status: 0x%02x\n", status);
1247 return saa711x_get_v4lfmt(client, (struct v4l2_format *)arg); 1201 vt->signal = ((status & (1 << 6)) == 0) ? 0xffff : 0x0;
1202 return 0;
1203}
1248 1204
1249 case VIDIOC_INT_AUDIO_CLOCK_FREQ: 1205static int saa711x_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
1250 return saa711x_set_audio_clock_freq(client, *(u32 *)arg); 1206{
1207 switch (qc->id) {
1208 case V4L2_CID_BRIGHTNESS:
1209 case V4L2_CID_CONTRAST:
1210 case V4L2_CID_SATURATION:
1211 case V4L2_CID_HUE:
1212 return v4l2_ctrl_query_fill_std(qc);
1213 default:
1214 return -EINVAL;
1215 }
1216}
1251 1217
1252 case VIDIOC_G_TUNER: 1218static int saa711x_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
1253 { 1219{
1254 struct v4l2_tuner *vt = arg; 1220 struct saa711x_state *state = to_state(sd);
1255 int status;
1256 1221
1257 if (state->radio) 1222 state->radio = 0;
1258 break; 1223 saa711x_set_v4lstd(sd, std);
1259 status = saa711x_read(client, R_1F_STATUS_BYTE_2_VD_DEC); 1224 return 0;
1225}
1260 1226
1261 v4l_dbg(1, debug, client, "status: 0x%02x\n", status); 1227static int saa711x_s_radio(struct v4l2_subdev *sd)
1262 vt->signal = ((status & (1 << 6)) == 0) ? 0xffff : 0x0; 1228{
1263 break; 1229 struct saa711x_state *state = to_state(sd);
1264 }
1265 1230
1266 case VIDIOC_LOG_STATUS: 1231 state->radio = 1;
1267 saa711x_log_status(client); 1232 return 0;
1268 break; 1233}
1269 1234
1270 case VIDIOC_G_CTRL: 1235static int saa711x_s_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
1271 return saa711x_get_v4lctrl(client, (struct v4l2_control *)arg); 1236{
1237 struct saa711x_state *state = to_state(sd);
1238 u32 input = route->input;
1239 u8 mask = (state->ident == V4L2_IDENT_SAA7111) ? 0xf8 : 0xf0;
1240
1241 v4l2_dbg(1, debug, sd, "decoder set input %d output %d\n", route->input, route->output);
1242 /* saa7111/3 does not have these inputs */
1243 if ((state->ident == V4L2_IDENT_SAA7113 ||
1244 state->ident == V4L2_IDENT_SAA7111) &&
1245 (route->input == SAA7115_COMPOSITE4 ||
1246 route->input == SAA7115_COMPOSITE5)) {
1247 return -EINVAL;
1248 }
1249 if (route->input > SAA7115_SVIDEO3)
1250 return -EINVAL;
1251 if (route->output > SAA7115_IPORT_ON)
1252 return -EINVAL;
1253 if (state->input == route->input && state->output == route->output)
1254 return 0;
1255 v4l2_dbg(1, debug, sd, "now setting %s input %s output\n",
1256 (route->input >= SAA7115_SVIDEO0) ? "S-Video" : "Composite",
1257 (route->output == SAA7115_IPORT_ON) ? "iport on" : "iport off");
1258 state->input = route->input;
1259
1260 /* saa7111 has slightly different input numbering */
1261 if (state->ident == V4L2_IDENT_SAA7111) {
1262 if (input >= SAA7115_COMPOSITE4)
1263 input -= 2;
1264 /* saa7111 specific */
1265 saa711x_write(sd, R_10_CHROMA_CNTL_2,
1266 (saa711x_read(sd, R_10_CHROMA_CNTL_2) & 0x3f) |
1267 ((route->output & 0xc0) ^ 0x40));
1268 saa711x_write(sd, R_13_RT_X_PORT_OUT_CNTL,
1269 (saa711x_read(sd, R_13_RT_X_PORT_OUT_CNTL) & 0xf0) |
1270 ((route->output & 2) ? 0x0a : 0));
1271 }
1272 1272
1273 case VIDIOC_S_CTRL: 1273 /* select mode */
1274 return saa711x_set_v4lctrl(client, (struct v4l2_control *)arg); 1274 saa711x_write(sd, R_02_INPUT_CNTL_1,
1275 (saa711x_read(sd, R_02_INPUT_CNTL_1) & mask) |
1276 input);
1275 1277
1276 case VIDIOC_QUERYCTRL: 1278 /* bypass chrominance trap for S-Video modes */
1277 { 1279 saa711x_write(sd, R_09_LUMA_CNTL,
1278 struct v4l2_queryctrl *qc = arg; 1280 (saa711x_read(sd, R_09_LUMA_CNTL) & 0x7f) |
1281 (state->input >= SAA7115_SVIDEO0 ? 0x80 : 0x0));
1279 1282
1280 switch (qc->id) { 1283 state->output = route->output;
1281 case V4L2_CID_BRIGHTNESS: 1284 if (state->ident == V4L2_IDENT_SAA7114 ||
1282 case V4L2_CID_CONTRAST: 1285 state->ident == V4L2_IDENT_SAA7115) {
1283 case V4L2_CID_SATURATION: 1286 saa711x_write(sd, R_83_X_PORT_I_O_ENA_AND_OUT_CLK,
1284 case V4L2_CID_HUE: 1287 (saa711x_read(sd, R_83_X_PORT_I_O_ENA_AND_OUT_CLK) & 0xfe) |
1285 return v4l2_ctrl_query_fill_std(qc); 1288 (state->output & 0x01));
1286 default:
1287 return -EINVAL;
1288 }
1289 } 1289 }
1290 return 0;
1291}
1290 1292
1291 case VIDIOC_G_STD: 1293static int saa711x_s_gpio(struct v4l2_subdev *sd, u32 val)
1292 *(v4l2_std_id *)arg = saa711x_get_v4lstd(client); 1294{
1293 break; 1295 struct saa711x_state *state = to_state(sd);
1294 1296
1295 case VIDIOC_S_STD: 1297 if (state->ident != V4L2_IDENT_SAA7111)
1296 state->radio = 0; 1298 return -EINVAL;
1297 saa711x_set_v4lstd(client, *(v4l2_std_id *)arg); 1299 saa711x_write(sd, 0x11, (saa711x_read(sd, 0x11) & 0x7f) |
1298 break; 1300 (val ? 0x80 : 0));
1301 return 0;
1302}
1299 1303
1300 case AUDC_SET_RADIO: 1304static int saa711x_s_stream(struct v4l2_subdev *sd, int enable)
1301 state->radio = 1; 1305{
1302 break; 1306 struct saa711x_state *state = to_state(sd);
1303 1307
1304 case VIDIOC_INT_G_VIDEO_ROUTING: 1308 v4l2_dbg(1, debug, sd, "%s output\n",
1305 { 1309 enable ? "enable" : "disable");
1306 struct v4l2_routing *route = arg;
1307 1310
1308 route->input = state->input; 1311 if (state->enable != enable) {
1309 route->output = state->output; 1312 state->enable = enable;
1310 break; 1313 saa711x_write(sd, R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED,
1314 state->enable);
1311 } 1315 }
1316 return 0;
1317}
1312 1318
1313 case VIDIOC_INT_S_VIDEO_ROUTING: 1319static int saa711x_s_crystal_freq(struct v4l2_subdev *sd, struct v4l2_crystal_freq *freq)
1314 { 1320{
1315 struct v4l2_routing *route = arg; 1321 struct saa711x_state *state = to_state(sd);
1316 u32 input = route->input;
1317 u8 mask = (state->ident == V4L2_IDENT_SAA7111) ? 0xf8 : 0xf0;
1318
1319 v4l_dbg(1, debug, client, "decoder set input %d output %d\n", route->input, route->output);
1320 /* saa7111/3 does not have these inputs */
1321 if ((state->ident == V4L2_IDENT_SAA7113 ||
1322 state->ident == V4L2_IDENT_SAA7111) &&
1323 (route->input == SAA7115_COMPOSITE4 ||
1324 route->input == SAA7115_COMPOSITE5)) {
1325 return -EINVAL;
1326 }
1327 if (route->input > SAA7115_SVIDEO3)
1328 return -EINVAL;
1329 if (route->output > SAA7115_IPORT_ON)
1330 return -EINVAL;
1331 if (state->input == route->input && state->output == route->output)
1332 break;
1333 v4l_dbg(1, debug, client, "now setting %s input %s output\n",
1334 (route->input >= SAA7115_SVIDEO0) ? "S-Video" : "Composite", (route->output == SAA7115_IPORT_ON) ? "iport on" : "iport off");
1335 state->input = route->input;
1336
1337 /* saa7111 has slightly different input numbering */
1338 if (state->ident == V4L2_IDENT_SAA7111) {
1339 if (input >= SAA7115_COMPOSITE4)
1340 input -= 2;
1341 /* saa7111 specific */
1342 saa711x_write(client, R_10_CHROMA_CNTL_2,
1343 (saa711x_read(client, R_10_CHROMA_CNTL_2) & 0x3f) |
1344 ((route->output & 0xc0) ^ 0x40));
1345 saa711x_write(client, R_13_RT_X_PORT_OUT_CNTL,
1346 (saa711x_read(client, R_13_RT_X_PORT_OUT_CNTL) & 0xf0) |
1347 ((route->output & 2) ? 0x0a : 0));
1348 }
1349 1322
1350 /* select mode */ 1323 if (freq->freq != SAA7115_FREQ_32_11_MHZ &&
1351 saa711x_write(client, R_02_INPUT_CNTL_1, 1324 freq->freq != SAA7115_FREQ_24_576_MHZ)
1352 (saa711x_read(client, R_02_INPUT_CNTL_1) & mask) | 1325 return -EINVAL;
1353 input); 1326 state->crystal_freq = freq->freq;
1327 state->cgcdiv = (freq->flags & SAA7115_FREQ_FL_CGCDIV) ? 3 : 4;
1328 state->ucgc = (freq->flags & SAA7115_FREQ_FL_UCGC) ? 1 : 0;
1329 state->apll = (freq->flags & SAA7115_FREQ_FL_APLL) ? 1 : 0;
1330 saa711x_s_clock_freq(sd, state->audclk_freq);
1331 return 0;
1332}
1354 1333
1355 /* bypass chrominance trap for S-Video modes */ 1334static int saa711x_reset(struct v4l2_subdev *sd, u32 val)
1356 saa711x_write(client, R_09_LUMA_CNTL, 1335{
1357 (saa711x_read(client, R_09_LUMA_CNTL) & 0x7f) | 1336 v4l2_dbg(1, debug, sd, "decoder RESET\n");
1358 (state->input >= SAA7115_SVIDEO0 ? 0x80 : 0x0)); 1337 saa711x_writeregs(sd, saa7115_cfg_reset_scaler);
1338 return 0;
1339}
1359 1340
1360 state->output = route->output; 1341static int saa711x_g_vbi_data(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *data)
1361 if (state->ident == V4L2_IDENT_SAA7114 || 1342{
1362 state->ident == V4L2_IDENT_SAA7115) { 1343 /* Note: the internal field ID is inverted for NTSC,
1363 saa711x_write(client, R_83_X_PORT_I_O_ENA_AND_OUT_CLK, 1344 so data->field 0 maps to the saa7115 even field,
1364 (saa711x_read(client, R_83_X_PORT_I_O_ENA_AND_OUT_CLK) & 0xfe) | 1345 whereas for PAL it maps to the saa7115 odd field. */
1365 (state->output & 0x01)); 1346 switch (data->id) {
1347 case V4L2_SLICED_WSS_625:
1348 if (saa711x_read(sd, 0x6b) & 0xc0)
1349 return -EIO;
1350 data->data[0] = saa711x_read(sd, 0x6c);
1351 data->data[1] = saa711x_read(sd, 0x6d);
1352 return 0;
1353 case V4L2_SLICED_CAPTION_525:
1354 if (data->field == 0) {
1355 /* CC */
1356 if (saa711x_read(sd, 0x66) & 0x30)
1357 return -EIO;
1358 data->data[0] = saa711x_read(sd, 0x69);
1359 data->data[1] = saa711x_read(sd, 0x6a);
1360 return 0;
1366 } 1361 }
1367 break; 1362 /* XDS */
1363 if (saa711x_read(sd, 0x66) & 0xc0)
1364 return -EIO;
1365 data->data[0] = saa711x_read(sd, 0x67);
1366 data->data[1] = saa711x_read(sd, 0x68);
1367 return 0;
1368 default:
1369 return -EINVAL;
1368 } 1370 }
1371}
1369 1372
1370 case VIDIOC_STREAMON: 1373#ifdef CONFIG_VIDEO_ADV_DEBUG
1371 case VIDIOC_STREAMOFF: 1374static int saa711x_g_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
1372 v4l_dbg(1, debug, client, "%s output\n", 1375{
1373 (cmd == VIDIOC_STREAMON) ? "enable" : "disable"); 1376 struct i2c_client *client = v4l2_get_subdevdata(sd);
1374
1375 if (state->enable != (cmd == VIDIOC_STREAMON)) {
1376 state->enable = (cmd == VIDIOC_STREAMON);
1377 saa711x_write(client,
1378 R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED,
1379 state->enable);
1380 }
1381 break;
1382 1377
1383 case VIDIOC_INT_S_CRYSTAL_FREQ: 1378 if (!v4l2_chip_match_i2c_client(client,
1384 { 1379 reg->match_type, reg->match_chip))
1385 struct v4l2_crystal_freq *freq = arg; 1380 return -EINVAL;
1381 if (!capable(CAP_SYS_ADMIN))
1382 return -EPERM;
1383 reg->val = saa711x_read(sd, reg->reg & 0xff);
1384 return 0;
1385}
1386 1386
1387 if (freq->freq != SAA7115_FREQ_32_11_MHZ && 1387static int saa711x_s_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
1388 freq->freq != SAA7115_FREQ_24_576_MHZ) 1388{
1389 return -EINVAL; 1389 struct i2c_client *client = v4l2_get_subdevdata(sd);
1390 state->crystal_freq = freq->freq;
1391 state->cgcdiv = (freq->flags & SAA7115_FREQ_FL_CGCDIV) ? 3 : 4;
1392 state->ucgc = (freq->flags & SAA7115_FREQ_FL_UCGC) ? 1 : 0;
1393 state->apll = (freq->flags & SAA7115_FREQ_FL_APLL) ? 1 : 0;
1394 saa711x_set_audio_clock_freq(client, state->audclk_freq);
1395 break;
1396 }
1397 1390
1398 case VIDIOC_INT_DECODE_VBI_LINE: 1391 if (!v4l2_chip_match_i2c_client(client,
1399 saa711x_decode_vbi_line(client, arg); 1392 reg->match_type, reg->match_chip))
1400 break; 1393 return -EINVAL;
1394 if (!capable(CAP_SYS_ADMIN))
1395 return -EPERM;
1396 saa711x_write(sd, reg->reg & 0xff, reg->val & 0xff);
1397 return 0;
1398}
1399#endif
1401 1400
1402 case VIDIOC_INT_RESET: 1401static int saa711x_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
1403 v4l_dbg(1, debug, client, "decoder RESET\n"); 1402{
1404 saa711x_writeregs(client, saa7115_cfg_reset_scaler); 1403 struct saa711x_state *state = to_state(sd);
1405 break; 1404 struct i2c_client *client = v4l2_get_subdevdata(sd);
1406 1405
1407 case VIDIOC_INT_S_GPIO: 1406 return v4l2_chip_ident_i2c_client(client, chip, state->ident, 0);
1408 if (state->ident != V4L2_IDENT_SAA7111) 1407}
1409 return -EINVAL;
1410 saa711x_write(client, 0x11, (saa711x_read(client, 0x11) & 0x7f) |
1411 (*(u32 *)arg ? 0x80 : 0));
1412 break;
1413 1408
1414 case VIDIOC_INT_G_VBI_DATA: 1409static int saa711x_log_status(struct v4l2_subdev *sd)
1415 { 1410{
1416 struct v4l2_sliced_vbi_data *data = arg; 1411 struct saa711x_state *state = to_state(sd);
1412 int reg1e, reg1f;
1413 int signalOk;
1414 int vcr;
1417 1415
1418 /* Note: the internal field ID is inverted for NTSC, 1416 v4l2_info(sd, "Audio frequency: %d Hz\n", state->audclk_freq);
1419 so data->field 0 maps to the saa7115 even field, 1417 if (state->ident != V4L2_IDENT_SAA7115) {
1420 whereas for PAL it maps to the saa7115 odd field. */ 1418 /* status for the saa7114 */
1421 switch (data->id) { 1419 reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC);
1422 case V4L2_SLICED_WSS_625: 1420 signalOk = (reg1f & 0xc1) == 0x81;
1423 if (saa711x_read(client, 0x6b) & 0xc0) 1421 v4l2_info(sd, "Video signal: %s\n", signalOk ? "ok" : "bad");
1424 return -EIO; 1422 v4l2_info(sd, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz");
1425 data->data[0] = saa711x_read(client, 0x6c); 1423 return 0;
1426 data->data[1] = saa711x_read(client, 0x6d);
1427 return 0;
1428 case V4L2_SLICED_CAPTION_525:
1429 if (data->field == 0) {
1430 /* CC */
1431 if (saa711x_read(client, 0x66) & 0x30)
1432 return -EIO;
1433 data->data[0] = saa711x_read(client, 0x69);
1434 data->data[1] = saa711x_read(client, 0x6a);
1435 return 0;
1436 }
1437 /* XDS */
1438 if (saa711x_read(client, 0x66) & 0xc0)
1439 return -EIO;
1440 data->data[0] = saa711x_read(client, 0x67);
1441 data->data[1] = saa711x_read(client, 0x68);
1442 return 0;
1443 default:
1444 return -EINVAL;
1445 }
1446 break;
1447 } 1424 }
1448 1425
1449#ifdef CONFIG_VIDEO_ADV_DEBUG 1426 /* status for the saa7115 */
1450 case VIDIOC_DBG_G_REGISTER: 1427 reg1e = saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC);
1451 case VIDIOC_DBG_S_REGISTER: 1428 reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC);
1452 {
1453 struct v4l2_register *reg = arg;
1454 1429
1455 if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip)) 1430 signalOk = (reg1f & 0xc1) == 0x81 && (reg1e & 0xc0) == 0x80;
1456 return -EINVAL; 1431 vcr = !(reg1f & 0x10);
1457 if (!capable(CAP_SYS_ADMIN))
1458 return -EPERM;
1459 if (cmd == VIDIOC_DBG_G_REGISTER)
1460 reg->val = saa711x_read(client, reg->reg & 0xff);
1461 else
1462 saa711x_write(client, reg->reg & 0xff, reg->val & 0xff);
1463 break;
1464 }
1465#endif
1466 1432
1467 case VIDIOC_G_CHIP_IDENT: 1433 if (state->input >= 6)
1468 return v4l2_chip_ident_i2c_client(client, arg, state->ident, 0); 1434 v4l2_info(sd, "Input: S-Video %d\n", state->input - 6);
1435 else
1436 v4l2_info(sd, "Input: Composite %d\n", state->input);
1437 v4l2_info(sd, "Video signal: %s\n", signalOk ? (vcr ? "VCR" : "broadcast/DVD") : "bad");
1438 v4l2_info(sd, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz");
1469 1439
1440 switch (reg1e & 0x03) {
1441 case 1:
1442 v4l2_info(sd, "Detected format: NTSC\n");
1443 break;
1444 case 2:
1445 v4l2_info(sd, "Detected format: PAL\n");
1446 break;
1447 case 3:
1448 v4l2_info(sd, "Detected format: SECAM\n");
1449 break;
1470 default: 1450 default:
1471 return -EINVAL; 1451 v4l2_info(sd, "Detected format: BW/No color\n");
1452 break;
1472 } 1453 }
1473 1454 v4l2_info(sd, "Width, Height: %d, %d\n", state->width, state->height);
1474 return 0; 1455 return 0;
1475} 1456}
1476 1457
1458static int saa711x_command(struct i2c_client *client, unsigned cmd, void *arg)
1459{
1460 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
1461}
1462
1463/* ----------------------------------------------------------------------- */
1464
1465static const struct v4l2_subdev_core_ops saa711x_core_ops = {
1466 .log_status = saa711x_log_status,
1467 .g_chip_ident = saa711x_g_chip_ident,
1468 .g_ctrl = saa711x_g_ctrl,
1469 .s_ctrl = saa711x_s_ctrl,
1470 .queryctrl = saa711x_queryctrl,
1471 .reset = saa711x_reset,
1472 .s_gpio = saa711x_s_gpio,
1473#ifdef CONFIG_VIDEO_ADV_DEBUG
1474 .g_register = saa711x_g_register,
1475 .s_register = saa711x_s_register,
1476#endif
1477};
1478
1479static const struct v4l2_subdev_tuner_ops saa711x_tuner_ops = {
1480 .s_std = saa711x_s_std,
1481 .s_radio = saa711x_s_radio,
1482 .g_tuner = saa711x_g_tuner,
1483};
1484
1485static const struct v4l2_subdev_audio_ops saa711x_audio_ops = {
1486 .s_clock_freq = saa711x_s_clock_freq,
1487};
1488
1489static const struct v4l2_subdev_video_ops saa711x_video_ops = {
1490 .s_routing = saa711x_s_routing,
1491 .s_crystal_freq = saa711x_s_crystal_freq,
1492 .g_fmt = saa711x_g_fmt,
1493 .s_fmt = saa711x_s_fmt,
1494 .g_vbi_data = saa711x_g_vbi_data,
1495 .decode_vbi_line = saa711x_decode_vbi_line,
1496 .s_stream = saa711x_s_stream,
1497};
1498
1499static const struct v4l2_subdev_ops saa711x_ops = {
1500 .core = &saa711x_core_ops,
1501 .tuner = &saa711x_tuner_ops,
1502 .audio = &saa711x_audio_ops,
1503 .video = &saa711x_video_ops,
1504};
1505
1477/* ----------------------------------------------------------------------- */ 1506/* ----------------------------------------------------------------------- */
1478 1507
1479static int saa7115_probe(struct i2c_client *client, 1508static int saa711x_probe(struct i2c_client *client,
1480 const struct i2c_device_id *id) 1509 const struct i2c_device_id *id)
1481{ 1510{
1482 struct saa711x_state *state; 1511 struct saa711x_state *state;
1512 struct v4l2_subdev *sd;
1483 int i; 1513 int i;
1484 char name[17]; 1514 char name[17];
1485 char chip_id; 1515 char chip_id;
@@ -1490,8 +1520,8 @@ static int saa7115_probe(struct i2c_client *client,
1490 return -EIO; 1520 return -EIO;
1491 1521
1492 for (i = 0; i < 0x0f; i++) { 1522 for (i = 0; i < 0x0f; i++) {
1493 saa711x_write(client, 0, i); 1523 i2c_smbus_write_byte_data(client, 0, i);
1494 name[i] = (saa711x_read(client, 0) & 0x0f) + '0'; 1524 name[i] = (i2c_smbus_read_byte_data(client, 0) & 0x0f) + '0';
1495 if (name[i] > '9') 1525 if (name[i] > '9')
1496 name[i] += 'a' - '9' - 1; 1526 name[i] += 'a' - '9' - 1;
1497 } 1527 }
@@ -1518,7 +1548,8 @@ static int saa7115_probe(struct i2c_client *client,
1518 state = kzalloc(sizeof(struct saa711x_state), GFP_KERNEL); 1548 state = kzalloc(sizeof(struct saa711x_state), GFP_KERNEL);
1519 if (state == NULL) 1549 if (state == NULL)
1520 return -ENOMEM; 1550 return -ENOMEM;
1521 i2c_set_clientdata(client, state); 1551 sd = &state->sd;
1552 v4l2_i2c_subdev_init(sd, client, &saa711x_ops);
1522 state->input = -1; 1553 state->input = -1;
1523 state->output = SAA7115_IPORT_ON; 1554 state->output = SAA7115_IPORT_ON;
1524 state->enable = 1; 1555 state->enable = 1;
@@ -1545,41 +1576,45 @@ static int saa7115_probe(struct i2c_client *client,
1545 break; 1576 break;
1546 default: 1577 default:
1547 state->ident = V4L2_IDENT_SAA7111; 1578 state->ident = V4L2_IDENT_SAA7111;
1548 v4l_info(client, "WARNING: Chip is not known - Falling back to saa7111\n"); 1579 v4l2_info(sd, "WARNING: Chip is not known - Falling back to saa7111\n");
1549 1580
1550 } 1581 }
1551 1582
1552 state->audclk_freq = 48000; 1583 state->audclk_freq = 48000;
1553 1584
1554 v4l_dbg(1, debug, client, "writing init values\n"); 1585 v4l2_dbg(1, debug, sd, "writing init values\n");
1555 1586
1556 /* init to 60hz/48khz */ 1587 /* init to 60hz/48khz */
1557 state->crystal_freq = SAA7115_FREQ_24_576_MHZ; 1588 state->crystal_freq = SAA7115_FREQ_24_576_MHZ;
1558 switch (state->ident) { 1589 switch (state->ident) {
1559 case V4L2_IDENT_SAA7111: 1590 case V4L2_IDENT_SAA7111:
1560 saa711x_writeregs(client, saa7111_init); 1591 saa711x_writeregs(sd, saa7111_init);
1561 break; 1592 break;
1562 case V4L2_IDENT_SAA7113: 1593 case V4L2_IDENT_SAA7113:
1563 saa711x_writeregs(client, saa7113_init); 1594 saa711x_writeregs(sd, saa7113_init);
1564 break; 1595 break;
1565 default: 1596 default:
1566 state->crystal_freq = SAA7115_FREQ_32_11_MHZ; 1597 state->crystal_freq = SAA7115_FREQ_32_11_MHZ;
1567 saa711x_writeregs(client, saa7115_init_auto_input); 1598 saa711x_writeregs(sd, saa7115_init_auto_input);
1568 } 1599 }
1569 if (state->ident != V4L2_IDENT_SAA7111) 1600 if (state->ident != V4L2_IDENT_SAA7111)
1570 saa711x_writeregs(client, saa7115_init_misc); 1601 saa711x_writeregs(sd, saa7115_init_misc);
1571 saa711x_set_v4lstd(client, V4L2_STD_NTSC); 1602 saa711x_set_v4lstd(sd, V4L2_STD_NTSC);
1572 1603
1573 v4l_dbg(1, debug, client, "status: (1E) 0x%02x, (1F) 0x%02x\n", 1604 v4l2_dbg(1, debug, sd, "status: (1E) 0x%02x, (1F) 0x%02x\n",
1574 saa711x_read(client, R_1E_STATUS_BYTE_1_VD_DEC), saa711x_read(client, R_1F_STATUS_BYTE_2_VD_DEC)); 1605 saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC),
1606 saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC));
1575 return 0; 1607 return 0;
1576} 1608}
1577 1609
1578/* ----------------------------------------------------------------------- */ 1610/* ----------------------------------------------------------------------- */
1579 1611
1580static int saa7115_remove(struct i2c_client *client) 1612static int saa711x_remove(struct i2c_client *client)
1581{ 1613{
1582 kfree(i2c_get_clientdata(client)); 1614 struct v4l2_subdev *sd = i2c_get_clientdata(client);
1615
1616 v4l2_device_unregister_subdev(sd);
1617 kfree(to_state(sd));
1583 return 0; 1618 return 0;
1584} 1619}
1585 1620
@@ -1597,9 +1632,9 @@ MODULE_DEVICE_TABLE(i2c, saa7115_id);
1597static struct v4l2_i2c_driver_data v4l2_i2c_data = { 1632static struct v4l2_i2c_driver_data v4l2_i2c_data = {
1598 .name = "saa7115", 1633 .name = "saa7115",
1599 .driverid = I2C_DRIVERID_SAA711X, 1634 .driverid = I2C_DRIVERID_SAA711X,
1600 .command = saa7115_command, 1635 .command = saa711x_command,
1601 .probe = saa7115_probe, 1636 .probe = saa711x_probe,
1602 .remove = saa7115_remove, 1637 .remove = saa711x_remove,
1603 .legacy_class = I2C_CLASS_TV_ANALOG | I2C_CLASS_TV_DIGITAL, 1638 .legacy_class = I2C_CLASS_TV_ANALOG | I2C_CLASS_TV_DIGITAL,
1604 .id_table = saa7115_id, 1639 .id_table = saa7115_id,
1605}; 1640};
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index cc02fb18efa7..bfc85654795e 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -53,7 +53,7 @@
53#include <linux/slab.h> 53#include <linux/slab.h>
54#include <linux/i2c.h> 54#include <linux/i2c.h>
55#include <linux/videodev2.h> 55#include <linux/videodev2.h>
56#include <media/v4l2-common.h> 56#include <media/v4l2-device.h>
57#include <media/v4l2-chip-ident.h> 57#include <media/v4l2-chip-ident.h>
58#include <media/v4l2-i2c-drv.h> 58#include <media/v4l2-i2c-drv.h>
59#include <media/saa7127.h> 59#include <media/saa7127.h>
@@ -231,6 +231,7 @@ static struct i2c_reg_value saa7127_init_config_50hz[] = {
231 */ 231 */
232 232
233struct saa7127_state { 233struct saa7127_state {
234 struct v4l2_subdev sd;
234 v4l2_std_id std; 235 v4l2_std_id std;
235 u32 ident; 236 u32 ident;
236 enum saa7127_input_type input_type; 237 enum saa7127_input_type input_type;
@@ -250,6 +251,11 @@ struct saa7127_state {
250 u8 reg_61; 251 u8 reg_61;
251}; 252};
252 253
254static inline struct saa7127_state *to_state(struct v4l2_subdev *sd)
255{
256 return container_of(sd, struct saa7127_state, sd);
257}
258
253static const char * const output_strs[] = 259static const char * const output_strs[] =
254{ 260{
255 "S-Video + Composite", 261 "S-Video + Composite",
@@ -281,32 +287,35 @@ static const char * const wss_strs[] = {
281 287
282/* ----------------------------------------------------------------------- */ 288/* ----------------------------------------------------------------------- */
283 289
284static int saa7127_read(struct i2c_client *client, u8 reg) 290static int saa7127_read(struct v4l2_subdev *sd, u8 reg)
285{ 291{
292 struct i2c_client *client = v4l2_get_subdevdata(sd);
293
286 return i2c_smbus_read_byte_data(client, reg); 294 return i2c_smbus_read_byte_data(client, reg);
287} 295}
288 296
289/* ----------------------------------------------------------------------- */ 297/* ----------------------------------------------------------------------- */
290 298
291static int saa7127_write(struct i2c_client *client, u8 reg, u8 val) 299static int saa7127_write(struct v4l2_subdev *sd, u8 reg, u8 val)
292{ 300{
301 struct i2c_client *client = v4l2_get_subdevdata(sd);
293 int i; 302 int i;
294 303
295 for (i = 0; i < 3; i++) { 304 for (i = 0; i < 3; i++) {
296 if (i2c_smbus_write_byte_data(client, reg, val) == 0) 305 if (i2c_smbus_write_byte_data(client, reg, val) == 0)
297 return 0; 306 return 0;
298 } 307 }
299 v4l_err(client, "I2C Write Problem\n"); 308 v4l2_err(sd, "I2C Write Problem\n");
300 return -1; 309 return -1;
301} 310}
302 311
303/* ----------------------------------------------------------------------- */ 312/* ----------------------------------------------------------------------- */
304 313
305static int saa7127_write_inittab(struct i2c_client *client, 314static int saa7127_write_inittab(struct v4l2_subdev *sd,
306 const struct i2c_reg_value *regs) 315 const struct i2c_reg_value *regs)
307{ 316{
308 while (regs->reg != 0) { 317 while (regs->reg != 0) {
309 saa7127_write(client, regs->reg, regs->value); 318 saa7127_write(sd, regs->reg, regs->value);
310 regs++; 319 regs++;
311 } 320 }
312 return 0; 321 return 0;
@@ -314,16 +323,16 @@ static int saa7127_write_inittab(struct i2c_client *client,
314 323
315/* ----------------------------------------------------------------------- */ 324/* ----------------------------------------------------------------------- */
316 325
317static int saa7127_set_vps(struct i2c_client *client, struct v4l2_sliced_vbi_data *data) 326static int saa7127_set_vps(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
318{ 327{
319 struct saa7127_state *state = i2c_get_clientdata(client); 328 struct saa7127_state *state = to_state(sd);
320 int enable = (data->line != 0); 329 int enable = (data->line != 0);
321 330
322 if (enable && (data->field != 0 || data->line != 16)) 331 if (enable && (data->field != 0 || data->line != 16))
323 return -EINVAL; 332 return -EINVAL;
324 if (state->vps_enable != enable) { 333 if (state->vps_enable != enable) {
325 v4l_dbg(1, debug, client, "Turn VPS Signal %s\n", enable ? "on" : "off"); 334 v4l2_dbg(1, debug, sd, "Turn VPS Signal %s\n", enable ? "on" : "off");
326 saa7127_write(client, 0x54, enable << 7); 335 saa7127_write(sd, 0x54, enable << 7);
327 state->vps_enable = enable; 336 state->vps_enable = enable;
328 } 337 }
329 if (!enable) 338 if (!enable)
@@ -334,91 +343,91 @@ static int saa7127_set_vps(struct i2c_client *client, struct v4l2_sliced_vbi_dat
334 state->vps_data[2] = data->data[9]; 343 state->vps_data[2] = data->data[9];
335 state->vps_data[3] = data->data[10]; 344 state->vps_data[3] = data->data[10];
336 state->vps_data[4] = data->data[11]; 345 state->vps_data[4] = data->data[11];
337 v4l_dbg(1, debug, client, "Set VPS data %02x %02x %02x %02x %02x\n", 346 v4l2_dbg(1, debug, sd, "Set VPS data %02x %02x %02x %02x %02x\n",
338 state->vps_data[0], state->vps_data[1], 347 state->vps_data[0], state->vps_data[1],
339 state->vps_data[2], state->vps_data[3], 348 state->vps_data[2], state->vps_data[3],
340 state->vps_data[4]); 349 state->vps_data[4]);
341 saa7127_write(client, 0x55, state->vps_data[0]); 350 saa7127_write(sd, 0x55, state->vps_data[0]);
342 saa7127_write(client, 0x56, state->vps_data[1]); 351 saa7127_write(sd, 0x56, state->vps_data[1]);
343 saa7127_write(client, 0x57, state->vps_data[2]); 352 saa7127_write(sd, 0x57, state->vps_data[2]);
344 saa7127_write(client, 0x58, state->vps_data[3]); 353 saa7127_write(sd, 0x58, state->vps_data[3]);
345 saa7127_write(client, 0x59, state->vps_data[4]); 354 saa7127_write(sd, 0x59, state->vps_data[4]);
346 return 0; 355 return 0;
347} 356}
348 357
349/* ----------------------------------------------------------------------- */ 358/* ----------------------------------------------------------------------- */
350 359
351static int saa7127_set_cc(struct i2c_client *client, struct v4l2_sliced_vbi_data *data) 360static int saa7127_set_cc(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
352{ 361{
353 struct saa7127_state *state = i2c_get_clientdata(client); 362 struct saa7127_state *state = to_state(sd);
354 u16 cc = data->data[1] << 8 | data->data[0]; 363 u16 cc = data->data[1] << 8 | data->data[0];
355 int enable = (data->line != 0); 364 int enable = (data->line != 0);
356 365
357 if (enable && (data->field != 0 || data->line != 21)) 366 if (enable && (data->field != 0 || data->line != 21))
358 return -EINVAL; 367 return -EINVAL;
359 if (state->cc_enable != enable) { 368 if (state->cc_enable != enable) {
360 v4l_dbg(1, debug, client, 369 v4l2_dbg(1, debug, sd,
361 "Turn CC %s\n", enable ? "on" : "off"); 370 "Turn CC %s\n", enable ? "on" : "off");
362 saa7127_write(client, SAA7127_REG_CLOSED_CAPTION, 371 saa7127_write(sd, SAA7127_REG_CLOSED_CAPTION,
363 (state->xds_enable << 7) | (enable << 6) | 0x11); 372 (state->xds_enable << 7) | (enable << 6) | 0x11);
364 state->cc_enable = enable; 373 state->cc_enable = enable;
365 } 374 }
366 if (!enable) 375 if (!enable)
367 return 0; 376 return 0;
368 377
369 v4l_dbg(2, debug, client, "CC data: %04x\n", cc); 378 v4l2_dbg(2, debug, sd, "CC data: %04x\n", cc);
370 saa7127_write(client, SAA7127_REG_LINE_21_ODD_0, cc & 0xff); 379 saa7127_write(sd, SAA7127_REG_LINE_21_ODD_0, cc & 0xff);
371 saa7127_write(client, SAA7127_REG_LINE_21_ODD_1, cc >> 8); 380 saa7127_write(sd, SAA7127_REG_LINE_21_ODD_1, cc >> 8);
372 state->cc_data = cc; 381 state->cc_data = cc;
373 return 0; 382 return 0;
374} 383}
375 384
376/* ----------------------------------------------------------------------- */ 385/* ----------------------------------------------------------------------- */
377 386
378static int saa7127_set_xds(struct i2c_client *client, struct v4l2_sliced_vbi_data *data) 387static int saa7127_set_xds(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
379{ 388{
380 struct saa7127_state *state = i2c_get_clientdata(client); 389 struct saa7127_state *state = to_state(sd);
381 u16 xds = data->data[1] << 8 | data->data[0]; 390 u16 xds = data->data[1] << 8 | data->data[0];
382 int enable = (data->line != 0); 391 int enable = (data->line != 0);
383 392
384 if (enable && (data->field != 1 || data->line != 21)) 393 if (enable && (data->field != 1 || data->line != 21))
385 return -EINVAL; 394 return -EINVAL;
386 if (state->xds_enable != enable) { 395 if (state->xds_enable != enable) {
387 v4l_dbg(1, debug, client, "Turn XDS %s\n", enable ? "on" : "off"); 396 v4l2_dbg(1, debug, sd, "Turn XDS %s\n", enable ? "on" : "off");
388 saa7127_write(client, SAA7127_REG_CLOSED_CAPTION, 397 saa7127_write(sd, SAA7127_REG_CLOSED_CAPTION,
389 (enable << 7) | (state->cc_enable << 6) | 0x11); 398 (enable << 7) | (state->cc_enable << 6) | 0x11);
390 state->xds_enable = enable; 399 state->xds_enable = enable;
391 } 400 }
392 if (!enable) 401 if (!enable)
393 return 0; 402 return 0;
394 403
395 v4l_dbg(2, debug, client, "XDS data: %04x\n", xds); 404 v4l2_dbg(2, debug, sd, "XDS data: %04x\n", xds);
396 saa7127_write(client, SAA7127_REG_LINE_21_EVEN_0, xds & 0xff); 405 saa7127_write(sd, SAA7127_REG_LINE_21_EVEN_0, xds & 0xff);
397 saa7127_write(client, SAA7127_REG_LINE_21_EVEN_1, xds >> 8); 406 saa7127_write(sd, SAA7127_REG_LINE_21_EVEN_1, xds >> 8);
398 state->xds_data = xds; 407 state->xds_data = xds;
399 return 0; 408 return 0;
400} 409}
401 410
402/* ----------------------------------------------------------------------- */ 411/* ----------------------------------------------------------------------- */
403 412
404static int saa7127_set_wss(struct i2c_client *client, struct v4l2_sliced_vbi_data *data) 413static int saa7127_set_wss(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
405{ 414{
406 struct saa7127_state *state = i2c_get_clientdata(client); 415 struct saa7127_state *state = to_state(sd);
407 int enable = (data->line != 0); 416 int enable = (data->line != 0);
408 417
409 if (enable && (data->field != 0 || data->line != 23)) 418 if (enable && (data->field != 0 || data->line != 23))
410 return -EINVAL; 419 return -EINVAL;
411 if (state->wss_enable != enable) { 420 if (state->wss_enable != enable) {
412 v4l_dbg(1, debug, client, "Turn WSS %s\n", enable ? "on" : "off"); 421 v4l2_dbg(1, debug, sd, "Turn WSS %s\n", enable ? "on" : "off");
413 saa7127_write(client, 0x27, enable << 7); 422 saa7127_write(sd, 0x27, enable << 7);
414 state->wss_enable = enable; 423 state->wss_enable = enable;
415 } 424 }
416 if (!enable) 425 if (!enable)
417 return 0; 426 return 0;
418 427
419 saa7127_write(client, 0x26, data->data[0]); 428 saa7127_write(sd, 0x26, data->data[0]);
420 saa7127_write(client, 0x27, 0x80 | (data->data[1] & 0x3f)); 429 saa7127_write(sd, 0x27, 0x80 | (data->data[1] & 0x3f));
421 v4l_dbg(1, debug, client, 430 v4l2_dbg(1, debug, sd,
422 "WSS mode: %s\n", wss_strs[data->data[0] & 0xf]); 431 "WSS mode: %s\n", wss_strs[data->data[0] & 0xf]);
423 state->wss_mode = (data->data[1] & 0x3f) << 8 | data->data[0]; 432 state->wss_mode = (data->data[1] & 0x3f) << 8 | data->data[0];
424 return 0; 433 return 0;
@@ -426,18 +435,18 @@ static int saa7127_set_wss(struct i2c_client *client, struct v4l2_sliced_vbi_dat
426 435
427/* ----------------------------------------------------------------------- */ 436/* ----------------------------------------------------------------------- */
428 437
429static int saa7127_set_video_enable(struct i2c_client *client, int enable) 438static int saa7127_set_video_enable(struct v4l2_subdev *sd, int enable)
430{ 439{
431 struct saa7127_state *state = i2c_get_clientdata(client); 440 struct saa7127_state *state = to_state(sd);
432 441
433 if (enable) { 442 if (enable) {
434 v4l_dbg(1, debug, client, "Enable Video Output\n"); 443 v4l2_dbg(1, debug, sd, "Enable Video Output\n");
435 saa7127_write(client, 0x2d, state->reg_2d); 444 saa7127_write(sd, 0x2d, state->reg_2d);
436 saa7127_write(client, 0x61, state->reg_61); 445 saa7127_write(sd, 0x61, state->reg_61);
437 } else { 446 } else {
438 v4l_dbg(1, debug, client, "Disable Video Output\n"); 447 v4l2_dbg(1, debug, sd, "Disable Video Output\n");
439 saa7127_write(client, 0x2d, (state->reg_2d & 0xf0)); 448 saa7127_write(sd, 0x2d, (state->reg_2d & 0xf0));
440 saa7127_write(client, 0x61, (state->reg_61 | 0xc0)); 449 saa7127_write(sd, 0x61, (state->reg_61 | 0xc0));
441 } 450 }
442 state->video_enable = enable; 451 state->video_enable = enable;
443 return 0; 452 return 0;
@@ -445,32 +454,32 @@ static int saa7127_set_video_enable(struct i2c_client *client, int enable)
445 454
446/* ----------------------------------------------------------------------- */ 455/* ----------------------------------------------------------------------- */
447 456
448static int saa7127_set_std(struct i2c_client *client, v4l2_std_id std) 457static int saa7127_set_std(struct v4l2_subdev *sd, v4l2_std_id std)
449{ 458{
450 struct saa7127_state *state = i2c_get_clientdata(client); 459 struct saa7127_state *state = to_state(sd);
451 const struct i2c_reg_value *inittab; 460 const struct i2c_reg_value *inittab;
452 461
453 if (std & V4L2_STD_525_60) { 462 if (std & V4L2_STD_525_60) {
454 v4l_dbg(1, debug, client, "Selecting 60 Hz video Standard\n"); 463 v4l2_dbg(1, debug, sd, "Selecting 60 Hz video Standard\n");
455 inittab = saa7127_init_config_60hz; 464 inittab = saa7127_init_config_60hz;
456 state->reg_61 = SAA7127_60HZ_DAC_CONTROL; 465 state->reg_61 = SAA7127_60HZ_DAC_CONTROL;
457 } else { 466 } else {
458 v4l_dbg(1, debug, client, "Selecting 50 Hz video Standard\n"); 467 v4l2_dbg(1, debug, sd, "Selecting 50 Hz video Standard\n");
459 inittab = saa7127_init_config_50hz; 468 inittab = saa7127_init_config_50hz;
460 state->reg_61 = SAA7127_50HZ_DAC_CONTROL; 469 state->reg_61 = SAA7127_50HZ_DAC_CONTROL;
461 } 470 }
462 471
463 /* Write Table */ 472 /* Write Table */
464 saa7127_write_inittab(client, inittab); 473 saa7127_write_inittab(sd, inittab);
465 state->std = std; 474 state->std = std;
466 return 0; 475 return 0;
467} 476}
468 477
469/* ----------------------------------------------------------------------- */ 478/* ----------------------------------------------------------------------- */
470 479
471static int saa7127_set_output_type(struct i2c_client *client, int output) 480static int saa7127_set_output_type(struct v4l2_subdev *sd, int output)
472{ 481{
473 struct saa7127_state *state = i2c_get_clientdata(client); 482 struct saa7127_state *state = to_state(sd);
474 483
475 switch (output) { 484 switch (output) {
476 case SAA7127_OUTPUT_TYPE_RGB: 485 case SAA7127_OUTPUT_TYPE_RGB:
@@ -506,165 +515,195 @@ static int saa7127_set_output_type(struct i2c_client *client, int output)
506 default: 515 default:
507 return -EINVAL; 516 return -EINVAL;
508 } 517 }
509 v4l_dbg(1, debug, client, 518 v4l2_dbg(1, debug, sd,
510 "Selecting %s output type\n", output_strs[output]); 519 "Selecting %s output type\n", output_strs[output]);
511 520
512 /* Configure Encoder */ 521 /* Configure Encoder */
513 saa7127_write(client, 0x2d, state->reg_2d); 522 saa7127_write(sd, 0x2d, state->reg_2d);
514 saa7127_write(client, 0x3a, state->reg_3a | state->reg_3a_cb); 523 saa7127_write(sd, 0x3a, state->reg_3a | state->reg_3a_cb);
515 state->output_type = output; 524 state->output_type = output;
516 return 0; 525 return 0;
517} 526}
518 527
519/* ----------------------------------------------------------------------- */ 528/* ----------------------------------------------------------------------- */
520 529
521static int saa7127_set_input_type(struct i2c_client *client, int input) 530static int saa7127_set_input_type(struct v4l2_subdev *sd, int input)
522{ 531{
523 struct saa7127_state *state = i2c_get_clientdata(client); 532 struct saa7127_state *state = to_state(sd);
524 533
525 switch (input) { 534 switch (input) {
526 case SAA7127_INPUT_TYPE_NORMAL: /* avia */ 535 case SAA7127_INPUT_TYPE_NORMAL: /* avia */
527 v4l_dbg(1, debug, client, "Selecting Normal Encoder Input\n"); 536 v4l2_dbg(1, debug, sd, "Selecting Normal Encoder Input\n");
528 state->reg_3a_cb = 0; 537 state->reg_3a_cb = 0;
529 break; 538 break;
530 539
531 case SAA7127_INPUT_TYPE_TEST_IMAGE: /* color bar */ 540 case SAA7127_INPUT_TYPE_TEST_IMAGE: /* color bar */
532 v4l_dbg(1, debug, client, "Selecting Color Bar generator\n"); 541 v4l2_dbg(1, debug, sd, "Selecting Color Bar generator\n");
533 state->reg_3a_cb = 0x80; 542 state->reg_3a_cb = 0x80;
534 break; 543 break;
535 544
536 default: 545 default:
537 return -EINVAL; 546 return -EINVAL;
538 } 547 }
539 saa7127_write(client, 0x3a, state->reg_3a | state->reg_3a_cb); 548 saa7127_write(sd, 0x3a, state->reg_3a | state->reg_3a_cb);
540 state->input_type = input; 549 state->input_type = input;
541 return 0; 550 return 0;
542} 551}
543 552
544/* ----------------------------------------------------------------------- */ 553/* ----------------------------------------------------------------------- */
545 554
546static int saa7127_command(struct i2c_client *client, 555static int saa7127_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
547 unsigned int cmd, void *arg)
548{ 556{
549 struct saa7127_state *state = i2c_get_clientdata(client); 557 struct saa7127_state *state = to_state(sd);
550 struct v4l2_format *fmt = arg;
551 struct v4l2_routing *route = arg;
552
553 switch (cmd) {
554 case VIDIOC_INT_S_STD_OUTPUT:
555 if (state->std == *(v4l2_std_id *)arg)
556 break;
557 return saa7127_set_std(client, *(v4l2_std_id *)arg);
558
559 case VIDIOC_INT_G_STD_OUTPUT:
560 *(v4l2_std_id *)arg = state->std;
561 break;
562 558
563 case VIDIOC_INT_G_VIDEO_ROUTING: 559 if (state->std == std)
564 route->input = state->input_type; 560 return 0;
565 route->output = state->output_type; 561 return saa7127_set_std(sd, std);
566 break; 562}
567 563
568 case VIDIOC_INT_S_VIDEO_ROUTING: 564static int saa7127_s_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
569 { 565{
570 int rc = 0; 566 struct saa7127_state *state = to_state(sd);
567 int rc = 0;
568
569 if (state->input_type != route->input)
570 rc = saa7127_set_input_type(sd, route->input);
571 if (rc == 0 && state->output_type != route->output)
572 rc = saa7127_set_output_type(sd, route->output);
573 return rc;
574}
571 575
572 if (state->input_type != route->input) 576static int saa7127_s_stream(struct v4l2_subdev *sd, int enable)
573 rc = saa7127_set_input_type(client, route->input); 577{
574 if (rc == 0 && state->output_type != route->output) 578 struct saa7127_state *state = to_state(sd);
575 rc = saa7127_set_output_type(client, route->output);
576 return rc;
577 }
578 579
579 case VIDIOC_STREAMON: 580 if (state->video_enable == enable)
580 case VIDIOC_STREAMOFF: 581 return 0;
581 if (state->video_enable == (cmd == VIDIOC_STREAMON)) 582 return saa7127_set_video_enable(sd, enable);
582 break; 583}
583 return saa7127_set_video_enable(client, cmd == VIDIOC_STREAMON);
584
585 case VIDIOC_G_FMT:
586 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
587 return -EINVAL;
588
589 memset(&fmt->fmt.sliced, 0, sizeof(fmt->fmt.sliced));
590 if (state->vps_enable)
591 fmt->fmt.sliced.service_lines[0][16] = V4L2_SLICED_VPS;
592 if (state->wss_enable)
593 fmt->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
594 if (state->cc_enable) {
595 fmt->fmt.sliced.service_lines[0][21] = V4L2_SLICED_CAPTION_525;
596 fmt->fmt.sliced.service_lines[1][21] = V4L2_SLICED_CAPTION_525;
597 }
598 fmt->fmt.sliced.service_set =
599 (state->vps_enable ? V4L2_SLICED_VPS : 0) |
600 (state->wss_enable ? V4L2_SLICED_WSS_625 : 0) |
601 (state->cc_enable ? V4L2_SLICED_CAPTION_525 : 0);
602 break;
603 584
604 case VIDIOC_LOG_STATUS: 585static int saa7127_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
605 v4l_info(client, "Standard: %s\n", (state->std & V4L2_STD_525_60) ? "60 Hz" : "50 Hz"); 586{
606 v4l_info(client, "Input: %s\n", state->input_type ? "color bars" : "normal"); 587 struct saa7127_state *state = to_state(sd);
607 v4l_info(client, "Output: %s\n", state->video_enable ?
608 output_strs[state->output_type] : "disabled");
609 v4l_info(client, "WSS: %s\n", state->wss_enable ?
610 wss_strs[state->wss_mode] : "disabled");
611 v4l_info(client, "VPS: %s\n", state->vps_enable ? "enabled" : "disabled");
612 v4l_info(client, "CC: %s\n", state->cc_enable ? "enabled" : "disabled");
613 break;
614 588
615#ifdef CONFIG_VIDEO_ADV_DEBUG 589 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
616 case VIDIOC_DBG_G_REGISTER: 590 return -EINVAL;
617 case VIDIOC_DBG_S_REGISTER:
618 {
619 struct v4l2_register *reg = arg;
620
621 if (!v4l2_chip_match_i2c_client(client,
622 reg->match_type, reg->match_chip))
623 return -EINVAL;
624 if (!capable(CAP_SYS_ADMIN))
625 return -EPERM;
626 if (cmd == VIDIOC_DBG_G_REGISTER)
627 reg->val = saa7127_read(client, reg->reg & 0xff);
628 else
629 saa7127_write(client, reg->reg & 0xff, reg->val & 0xff);
630 break;
631 }
632#endif
633 591
634 case VIDIOC_INT_S_VBI_DATA: 592 memset(&fmt->fmt.sliced, 0, sizeof(fmt->fmt.sliced));
635 { 593 if (state->vps_enable)
636 struct v4l2_sliced_vbi_data *data = arg; 594 fmt->fmt.sliced.service_lines[0][16] = V4L2_SLICED_VPS;
637 595 if (state->wss_enable)
638 switch (data->id) { 596 fmt->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
639 case V4L2_SLICED_WSS_625: 597 if (state->cc_enable) {
640 return saa7127_set_wss(client, data); 598 fmt->fmt.sliced.service_lines[0][21] = V4L2_SLICED_CAPTION_525;
641 case V4L2_SLICED_VPS: 599 fmt->fmt.sliced.service_lines[1][21] = V4L2_SLICED_CAPTION_525;
642 return saa7127_set_vps(client, data);
643 case V4L2_SLICED_CAPTION_525:
644 if (data->field == 0)
645 return saa7127_set_cc(client, data);
646 return saa7127_set_xds(client, data);
647 default:
648 return -EINVAL;
649 }
650 break;
651 } 600 }
601 fmt->fmt.sliced.service_set =
602 (state->vps_enable ? V4L2_SLICED_VPS : 0) |
603 (state->wss_enable ? V4L2_SLICED_WSS_625 : 0) |
604 (state->cc_enable ? V4L2_SLICED_CAPTION_525 : 0);
605 return 0;
606}
652 607
653 case VIDIOC_G_CHIP_IDENT: 608static int saa7127_s_vbi_data(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
654 return v4l2_chip_ident_i2c_client(client, arg, state->ident, 0); 609{
655 610 switch (data->id) {
611 case V4L2_SLICED_WSS_625:
612 return saa7127_set_wss(sd, data);
613 case V4L2_SLICED_VPS:
614 return saa7127_set_vps(sd, data);
615 case V4L2_SLICED_CAPTION_525:
616 if (data->field == 0)
617 return saa7127_set_cc(sd, data);
618 return saa7127_set_xds(sd, data);
656 default: 619 default:
657 return -EINVAL; 620 return -EINVAL;
658 } 621 }
659 return 0; 622 return 0;
660} 623}
661 624
625#ifdef CONFIG_VIDEO_ADV_DEBUG
626static int saa7127_g_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
627{
628 struct i2c_client *client = v4l2_get_subdevdata(sd);
629
630 if (!v4l2_chip_match_i2c_client(client,
631 reg->match_type, reg->match_chip))
632 return -EINVAL;
633 if (!capable(CAP_SYS_ADMIN))
634 return -EPERM;
635 reg->val = saa7127_read(sd, reg->reg & 0xff);
636 return 0;
637}
638
639static int saa7127_s_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
640{
641 struct i2c_client *client = v4l2_get_subdevdata(sd);
642
643 if (!v4l2_chip_match_i2c_client(client,
644 reg->match_type, reg->match_chip))
645 return -EINVAL;
646 if (!capable(CAP_SYS_ADMIN))
647 return -EPERM;
648 saa7127_write(sd, reg->reg & 0xff, reg->val & 0xff);
649 return 0;
650}
651#endif
652
653static int saa7127_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
654{
655 struct saa7127_state *state = to_state(sd);
656 struct i2c_client *client = v4l2_get_subdevdata(sd);
657
658 return v4l2_chip_ident_i2c_client(client, chip, state->ident, 0);
659}
660
661static int saa7127_log_status(struct v4l2_subdev *sd)
662{
663 struct saa7127_state *state = to_state(sd);
664
665 v4l2_info(sd, "Standard: %s\n", (state->std & V4L2_STD_525_60) ? "60 Hz" : "50 Hz");
666 v4l2_info(sd, "Input: %s\n", state->input_type ? "color bars" : "normal");
667 v4l2_info(sd, "Output: %s\n", state->video_enable ?
668 output_strs[state->output_type] : "disabled");
669 v4l2_info(sd, "WSS: %s\n", state->wss_enable ?
670 wss_strs[state->wss_mode] : "disabled");
671 v4l2_info(sd, "VPS: %s\n", state->vps_enable ? "enabled" : "disabled");
672 v4l2_info(sd, "CC: %s\n", state->cc_enable ? "enabled" : "disabled");
673 return 0;
674}
675
676/* ----------------------------------------------------------------------- */
677
678static const struct v4l2_subdev_core_ops saa7127_core_ops = {
679 .log_status = saa7127_log_status,
680 .g_chip_ident = saa7127_g_chip_ident,
681#ifdef CONFIG_VIDEO_ADV_DEBUG
682 .g_register = saa7127_g_register,
683 .s_register = saa7127_s_register,
684#endif
685};
686
687static const struct v4l2_subdev_video_ops saa7127_video_ops = {
688 .s_vbi_data = saa7127_s_vbi_data,
689 .g_fmt = saa7127_g_fmt,
690 .s_std_output = saa7127_s_std_output,
691 .s_routing = saa7127_s_routing,
692 .s_stream = saa7127_s_stream,
693};
694
695static const struct v4l2_subdev_ops saa7127_ops = {
696 .core = &saa7127_core_ops,
697 .video = &saa7127_video_ops,
698};
699
662/* ----------------------------------------------------------------------- */ 700/* ----------------------------------------------------------------------- */
663 701
664static int saa7127_probe(struct i2c_client *client, 702static int saa7127_probe(struct i2c_client *client,
665 const struct i2c_device_id *id) 703 const struct i2c_device_id *id)
666{ 704{
667 struct saa7127_state *state; 705 struct saa7127_state *state;
706 struct v4l2_subdev *sd;
668 struct v4l2_sliced_vbi_data vbi = { 0, 0, 0, 0 }; /* set to disabled */ 707 struct v4l2_sliced_vbi_data vbi = { 0, 0, 0, 0 }; /* set to disabled */
669 708
670 /* Check if the adapter supports the needed features */ 709 /* Check if the adapter supports the needed features */
@@ -674,40 +713,42 @@ static int saa7127_probe(struct i2c_client *client,
674 v4l_dbg(1, debug, client, "detecting saa7127 client on address 0x%x\n", 713 v4l_dbg(1, debug, client, "detecting saa7127 client on address 0x%x\n",
675 client->addr << 1); 714 client->addr << 1);
676 715
716 state = kzalloc(sizeof(struct saa7127_state), GFP_KERNEL);
717 if (state == NULL)
718 return -ENOMEM;
719
720 sd = &state->sd;
721 v4l2_i2c_subdev_init(sd, client, &saa7127_ops);
722
677 /* First test register 0: Bits 5-7 are a version ID (should be 0), 723 /* First test register 0: Bits 5-7 are a version ID (should be 0),
678 and bit 2 should also be 0. 724 and bit 2 should also be 0.
679 This is rather general, so the second test is more specific and 725 This is rather general, so the second test is more specific and
680 looks at the 'ending point of burst in clock cycles' which is 726 looks at the 'ending point of burst in clock cycles' which is
681 0x1d after a reset and not expected to ever change. */ 727 0x1d after a reset and not expected to ever change. */
682 if ((saa7127_read(client, 0) & 0xe4) != 0 || 728 if ((saa7127_read(sd, 0) & 0xe4) != 0 ||
683 (saa7127_read(client, 0x29) & 0x3f) != 0x1d) { 729 (saa7127_read(sd, 0x29) & 0x3f) != 0x1d) {
684 v4l_dbg(1, debug, client, "saa7127 not found\n"); 730 v4l2_dbg(1, debug, sd, "saa7127 not found\n");
731 kfree(state);
685 return -ENODEV; 732 return -ENODEV;
686 } 733 }
687 state = kzalloc(sizeof(struct saa7127_state), GFP_KERNEL);
688
689 if (state == NULL)
690 return -ENOMEM;
691
692 i2c_set_clientdata(client, state);
693 734
694 /* Configure Encoder */ 735 /* Configure Encoder */
695 736
696 v4l_dbg(1, debug, client, "Configuring encoder\n"); 737 v4l2_dbg(1, debug, sd, "Configuring encoder\n");
697 saa7127_write_inittab(client, saa7127_init_config_common); 738 saa7127_write_inittab(sd, saa7127_init_config_common);
698 saa7127_set_std(client, V4L2_STD_NTSC); 739 saa7127_set_std(sd, V4L2_STD_NTSC);
699 saa7127_set_output_type(client, SAA7127_OUTPUT_TYPE_BOTH); 740 saa7127_set_output_type(sd, SAA7127_OUTPUT_TYPE_BOTH);
700 saa7127_set_vps(client, &vbi); 741 saa7127_set_vps(sd, &vbi);
701 saa7127_set_wss(client, &vbi); 742 saa7127_set_wss(sd, &vbi);
702 saa7127_set_cc(client, &vbi); 743 saa7127_set_cc(sd, &vbi);
703 saa7127_set_xds(client, &vbi); 744 saa7127_set_xds(sd, &vbi);
704 if (test_image == 1) 745 if (test_image == 1)
705 /* The Encoder has an internal Colorbar generator */ 746 /* The Encoder has an internal Colorbar generator */
706 /* This can be used for debugging */ 747 /* This can be used for debugging */
707 saa7127_set_input_type(client, SAA7127_INPUT_TYPE_TEST_IMAGE); 748 saa7127_set_input_type(sd, SAA7127_INPUT_TYPE_TEST_IMAGE);
708 else 749 else
709 saa7127_set_input_type(client, SAA7127_INPUT_TYPE_NORMAL); 750 saa7127_set_input_type(sd, SAA7127_INPUT_TYPE_NORMAL);
710 saa7127_set_video_enable(client, 1); 751 saa7127_set_video_enable(sd, 1);
711 752
712 if (id->driver_data) { /* Chip type is already known */ 753 if (id->driver_data) { /* Chip type is already known */
713 state->ident = id->driver_data; 754 state->ident = id->driver_data;
@@ -715,10 +756,10 @@ static int saa7127_probe(struct i2c_client *client,
715 int read_result; 756 int read_result;
716 757
717 /* Detect if it's an saa7129 */ 758 /* Detect if it's an saa7129 */
718 read_result = saa7127_read(client, SAA7129_REG_FADE_KEY_COL2); 759 read_result = saa7127_read(sd, SAA7129_REG_FADE_KEY_COL2);
719 saa7127_write(client, SAA7129_REG_FADE_KEY_COL2, 0xaa); 760 saa7127_write(sd, SAA7129_REG_FADE_KEY_COL2, 0xaa);
720 if (saa7127_read(client, SAA7129_REG_FADE_KEY_COL2) == 0xaa) { 761 if (saa7127_read(sd, SAA7129_REG_FADE_KEY_COL2) == 0xaa) {
721 saa7127_write(client, SAA7129_REG_FADE_KEY_COL2, 762 saa7127_write(sd, SAA7129_REG_FADE_KEY_COL2,
722 read_result); 763 read_result);
723 state->ident = V4L2_IDENT_SAA7129; 764 state->ident = V4L2_IDENT_SAA7129;
724 strlcpy(client->name, "saa7129", I2C_NAME_SIZE); 765 strlcpy(client->name, "saa7129", I2C_NAME_SIZE);
@@ -728,10 +769,10 @@ static int saa7127_probe(struct i2c_client *client,
728 } 769 }
729 } 770 }
730 771
731 v4l_info(client, "%s found @ 0x%x (%s)\n", client->name, 772 v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
732 client->addr << 1, client->adapter->name); 773 client->addr << 1, client->adapter->name);
733 if (state->ident == V4L2_IDENT_SAA7129) 774 if (state->ident == V4L2_IDENT_SAA7129)
734 saa7127_write_inittab(client, saa7129_init_config_extra); 775 saa7127_write_inittab(sd, saa7129_init_config_extra);
735 return 0; 776 return 0;
736} 777}
737 778
@@ -739,9 +780,12 @@ static int saa7127_probe(struct i2c_client *client,
739 780
740static int saa7127_remove(struct i2c_client *client) 781static int saa7127_remove(struct i2c_client *client)
741{ 782{
783 struct v4l2_subdev *sd = i2c_get_clientdata(client);
784
785 v4l2_device_unregister_subdev(sd);
742 /* Turn off TV output */ 786 /* Turn off TV output */
743 saa7127_set_video_enable(client, 0); 787 saa7127_set_video_enable(sd, 0);
744 kfree(i2c_get_clientdata(client)); 788 kfree(to_state(sd));
745 return 0; 789 return 0;
746} 790}
747 791
@@ -760,7 +804,6 @@ MODULE_DEVICE_TABLE(i2c, saa7127_id);
760static struct v4l2_i2c_driver_data v4l2_i2c_data = { 804static struct v4l2_i2c_driver_data v4l2_i2c_data = {
761 .name = "saa7127", 805 .name = "saa7127",
762 .driverid = I2C_DRIVERID_SAA7127, 806 .driverid = I2C_DRIVERID_SAA7127,
763 .command = saa7127_command,
764 .probe = saa7127_probe, 807 .probe = saa7127_probe,
765 .remove = saa7127_remove, 808 .remove = saa7127_remove,
766 .id_table = saa7127_id, 809 .id_table = saa7127_id,
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index ddc5402c5fb0..a2e3f6729c5b 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -4606,6 +4606,43 @@ struct saa7134_board saa7134_boards[] = {
4606 .gpio = 0x0200000, 4606 .gpio = 0x0200000,
4607 }, 4607 },
4608 }, 4608 },
4609 [SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG] = {
4610 .name = "Kworld Plus TV Analog Lite PCI",
4611 .audio_clock = 0x00187de7,
4612 .tuner_type = TUNER_YMEC_TVF_5533MF,
4613 .radio_type = TUNER_TEA5767,
4614 .tuner_addr = ADDR_UNSET,
4615 .radio_addr = ADDR_UNSET,
4616 .gpiomask = 0x80000700,
4617 .inputs = { {
4618 .name = name_tv,
4619 .vmux = 1,
4620 .amux = LINE2,
4621 .tv = 1,
4622 .gpio = 0x100,
4623 }, {
4624 .name = name_comp1,
4625 .vmux = 3,
4626 .amux = LINE1,
4627 .gpio = 0x200,
4628 }, {
4629 .name = name_svideo,
4630 .vmux = 8,
4631 .amux = LINE1,
4632 .gpio = 0x200,
4633 } },
4634 .radio = {
4635 .name = name_radio,
4636 .vmux = 1,
4637 .amux = LINE1,
4638 .gpio = 0x100,
4639 },
4640 .mute = {
4641 .name = name_mute,
4642 .vmux = 8,
4643 .amux = 2,
4644 },
4645 },
4609}; 4646};
4610 4647
4611const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards); 4648const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -4736,6 +4773,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
4736 .driver_data = SAA7134_BOARD_MD7134, 4773 .driver_data = SAA7134_BOARD_MD7134,
4737 },{ 4774 },{
4738 .vendor = PCI_VENDOR_ID_PHILIPS, 4775 .vendor = PCI_VENDOR_ID_PHILIPS,
4776 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
4777 .subvendor = 0x16be, /* CTX946 analog TV, HW mpeg, DVB-T */
4778 .subdevice = 0x5000, /* only analog TV and DVB-T for now */
4779 .driver_data = SAA7134_BOARD_MD7134,
4780 }, {
4781 .vendor = PCI_VENDOR_ID_PHILIPS,
4739 .device = PCI_DEVICE_ID_PHILIPS_SAA7130, 4782 .device = PCI_DEVICE_ID_PHILIPS_SAA7130,
4740 .subvendor = 0x1048, 4783 .subvendor = 0x1048,
4741 .subdevice = 0x226b, 4784 .subdevice = 0x226b,
@@ -5653,6 +5696,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
5653 .subdevice = 0x4878, /* REV:1.02G */ 5696 .subdevice = 0x4878, /* REV:1.02G */
5654 .driver_data = SAA7134_BOARD_ASUSTeK_TIGER_3IN1, 5697 .driver_data = SAA7134_BOARD_ASUSTeK_TIGER_3IN1,
5655 }, { 5698 }, {
5699 .vendor = PCI_VENDOR_ID_PHILIPS,
5700 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
5701 .subvendor = 0x17de,
5702 .subdevice = 0x7128,
5703 .driver_data = SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG,
5704 }, {
5656 /* --- boards without eeprom + subsystem ID --- */ 5705 /* --- boards without eeprom + subsystem ID --- */
5657 .vendor = PCI_VENDOR_ID_PHILIPS, 5706 .vendor = PCI_VENDOR_ID_PHILIPS,
5658 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 5707 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -5880,6 +5929,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
5880 case SAA7134_BOARD_BEHOLD_507_9FM: 5929 case SAA7134_BOARD_BEHOLD_507_9FM:
5881 case SAA7134_BOARD_GENIUS_TVGO_A11MCE: 5930 case SAA7134_BOARD_GENIUS_TVGO_A11MCE:
5882 case SAA7134_BOARD_REAL_ANGEL_220: 5931 case SAA7134_BOARD_REAL_ANGEL_220:
5932 case SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG:
5883 dev->has_remote = SAA7134_REMOTE_GPIO; 5933 dev->has_remote = SAA7134_REMOTE_GPIO;
5884 break; 5934 break;
5885 case SAA7134_BOARD_FLYDVBS_LR300: 5935 case SAA7134_BOARD_FLYDVBS_LR300:
@@ -6048,7 +6098,7 @@ static void saa7134_tuner_setup(struct saa7134_dev *dev)
6048 struct v4l2_priv_tun_config xc2028_cfg; 6098 struct v4l2_priv_tun_config xc2028_cfg;
6049 struct xc2028_ctrl ctl; 6099 struct xc2028_ctrl ctl;
6050 6100
6051 memset(&xc2028_cfg, 0, sizeof(ctl)); 6101 memset(&xc2028_cfg, 0, sizeof(xc2028_cfg));
6052 memset(&ctl, 0, sizeof(ctl)); 6102 memset(&ctl, 0, sizeof(ctl));
6053 6103
6054 ctl.fname = XC2028_DEFAULT_FIRMWARE; 6104 ctl.fname = XC2028_DEFAULT_FIRMWARE;
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 8c46115d4c79..d9a5652595b5 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -954,20 +954,14 @@ static int dvb_init(struct saa7134_dev *dev)
954 /* FIXME: add support for multi-frontend */ 954 /* FIXME: add support for multi-frontend */
955 mutex_init(&dev->frontends.lock); 955 mutex_init(&dev->frontends.lock);
956 INIT_LIST_HEAD(&dev->frontends.felist); 956 INIT_LIST_HEAD(&dev->frontends.felist);
957 dev->frontends.active_fe_id = 0;
958 957
959 printk(KERN_INFO "%s() allocating 1 frontend\n", __func__); 958 printk(KERN_INFO "%s() allocating 1 frontend\n", __func__);
960 959 fe0 = videobuf_dvb_alloc_frontend(&dev->frontends, 1);
961 if (videobuf_dvb_alloc_frontend(&dev->frontends, 1) == NULL) { 960 if (!fe0) {
962 printk(KERN_ERR "%s() failed to alloc\n", __func__); 961 printk(KERN_ERR "%s() failed to alloc\n", __func__);
963 return -ENOMEM; 962 return -ENOMEM;
964 } 963 }
965 964
966 /* Get the first frontend */
967 fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
968 if (!fe0)
969 return -EINVAL;
970
971 /* init struct videobuf_dvb */ 965 /* init struct videobuf_dvb */
972 dev->ts.nr_bufs = 32; 966 dev->ts.nr_bufs = 32;
973 dev->ts.nr_packets = 32*4; 967 dev->ts.nr_packets = 32*4;
@@ -1376,7 +1370,7 @@ static int dvb_init(struct saa7134_dev *dev)
1376 }; 1370 };
1377 1371
1378 if (!fe0->dvb.frontend) 1372 if (!fe0->dvb.frontend)
1379 return -1; 1373 goto dettach_frontend;
1380 1374
1381 fe = dvb_attach(xc2028_attach, fe0->dvb.frontend, &cfg); 1375 fe = dvb_attach(xc2028_attach, fe0->dvb.frontend, &cfg);
1382 if (!fe) { 1376 if (!fe) {
@@ -1388,7 +1382,7 @@ static int dvb_init(struct saa7134_dev *dev)
1388 1382
1389 if (NULL == fe0->dvb.frontend) { 1383 if (NULL == fe0->dvb.frontend) {
1390 printk(KERN_ERR "%s/dvb: frontend initialization failed\n", dev->name); 1384 printk(KERN_ERR "%s/dvb: frontend initialization failed\n", dev->name);
1391 return -1; 1385 goto dettach_frontend;
1392 } 1386 }
1393 /* define general-purpose callback pointer */ 1387 /* define general-purpose callback pointer */
1394 fe0->dvb.frontend->callback = saa7134_tuner_callback; 1388 fe0->dvb.frontend->callback = saa7134_tuner_callback;
@@ -1411,11 +1405,8 @@ static int dvb_init(struct saa7134_dev *dev)
1411 return ret; 1405 return ret;
1412 1406
1413dettach_frontend: 1407dettach_frontend:
1414 if (fe0->dvb.frontend) 1408 videobuf_dvb_dealloc_frontends(&dev->frontends);
1415 dvb_frontend_detach(fe0->dvb.frontend); 1409 return -EINVAL;
1416 fe0->dvb.frontend = NULL;
1417
1418 return -1;
1419} 1410}
1420 1411
1421static int dvb_fini(struct saa7134_dev *dev) 1412static int dvb_fini(struct saa7134_dev *dev)
@@ -1454,8 +1445,7 @@ static int dvb_fini(struct saa7134_dev *dev)
1454 } 1445 }
1455 } 1446 }
1456 } 1447 }
1457 if (fe0->dvb.frontend) 1448 videobuf_dvb_unregister_bus(&dev->frontends);
1458 videobuf_dvb_unregister_bus(&dev->frontends);
1459 return 0; 1449 return 0;
1460} 1450}
1461 1451
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index c53fd5f9f6b5..d2124f64e4e2 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -97,6 +97,15 @@ static int build_key(struct saa7134_dev *dev)
97 dprintk("build_key gpio=0x%x mask=0x%x data=%d\n", 97 dprintk("build_key gpio=0x%x mask=0x%x data=%d\n",
98 gpio, ir->mask_keycode, data); 98 gpio, ir->mask_keycode, data);
99 99
100 switch (dev->board) {
101 case SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG:
102 if (data == ir->mask_keycode)
103 ir_input_nokey(ir->dev, &ir->ir);
104 else
105 ir_input_keydown(ir->dev, &ir->ir, data, data);
106 return 0;
107 }
108
100 if (ir->polling) { 109 if (ir->polling) {
101 if ((ir->mask_keydown && (0 != (gpio & ir->mask_keydown))) || 110 if ((ir->mask_keydown && (0 != (gpio & ir->mask_keydown))) ||
102 (ir->mask_keyup && (0 == (gpio & ir->mask_keyup)))) { 111 (ir->mask_keyup && (0 == (gpio & ir->mask_keyup)))) {
@@ -586,6 +595,11 @@ int saa7134_input_init1(struct saa7134_dev *dev)
586 mask_keyup = 0x4000; 595 mask_keyup = 0x4000;
587 polling = 50; /* ms */ 596 polling = 50; /* ms */
588 break; 597 break;
598 case SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG:
599 ir_codes = ir_codes_kworld_plus_tv_analog;
600 mask_keycode = 0x7f;
601 polling = 40; /* ms */
602 break;
589 } 603 }
590 if (NULL == ir_codes) { 604 if (NULL == ir_codes) {
591 printk("%s: Oops: IR config error [card=%d]\n", 605 printk("%s: Oops: IR config error [card=%d]\n",
diff --git a/drivers/media/video/saa7134/saa7134-tvaudio.c b/drivers/media/video/saa7134/saa7134-tvaudio.c
index c5d0b44c179e..76b16407b01e 100644
--- a/drivers/media/video/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/video/saa7134/saa7134-tvaudio.c
@@ -159,7 +159,7 @@ static struct saa7134_tvaudio tvaudio[] = {
159 .mode = TVAUDIO_FM_MONO, 159 .mode = TVAUDIO_FM_MONO,
160 } 160 }
161}; 161};
162#define TVAUDIO (sizeof(tvaudio)/sizeof(struct saa7134_tvaudio)) 162#define TVAUDIO ARRAY_SIZE(tvaudio)
163 163
164/* ------------------------------------------------------------------ */ 164/* ------------------------------------------------------------------ */
165 165
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 24096d6e1ef8..f6c1fcc72070 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -275,8 +275,9 @@ struct saa7134_format {
275#define SAA7134_BOARD_REAL_ANGEL_220 150 275#define SAA7134_BOARD_REAL_ANGEL_220 150
276#define SAA7134_BOARD_ADS_INSTANT_HDTV_PCI 151 276#define SAA7134_BOARD_ADS_INSTANT_HDTV_PCI 151
277#define SAA7134_BOARD_ASUSTeK_TIGER 152 277#define SAA7134_BOARD_ASUSTeK_TIGER 152
278#define SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG 153
278 279
279#define SAA7134_MAXBOARDS 8 280#define SAA7134_MAXBOARDS 32
280#define SAA7134_INPUT_MAX 8 281#define SAA7134_INPUT_MAX 8
281 282
282/* ----------------------------------------------------------- */ 283/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/saa717x.c b/drivers/media/video/saa717x.c
index af60ede5310d..9befca65905e 100644
--- a/drivers/media/video/saa717x.c
+++ b/drivers/media/video/saa717x.c
@@ -37,7 +37,7 @@
37 37
38#include <linux/videodev2.h> 38#include <linux/videodev2.h>
39#include <linux/i2c.h> 39#include <linux/i2c.h>
40#include <media/v4l2-common.h> 40#include <media/v4l2-device.h>
41#include <media/v4l2-i2c-drv.h> 41#include <media/v4l2-i2c-drv.h>
42 42
43MODULE_DESCRIPTION("Philips SAA717x audio/video decoder driver"); 43MODULE_DESCRIPTION("Philips SAA717x audio/video decoder driver");
@@ -54,6 +54,7 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
54 */ 54 */
55 55
56struct saa717x_state { 56struct saa717x_state {
57 struct v4l2_subdev sd;
57 v4l2_std_id std; 58 v4l2_std_id std;
58 int input; 59 int input;
59 int enable; 60 int enable;
@@ -75,6 +76,11 @@ struct saa717x_state {
75 int audio_input; 76 int audio_input;
76}; 77};
77 78
79static inline struct saa717x_state *to_state(struct v4l2_subdev *sd)
80{
81 return container_of(sd, struct saa717x_state, sd);
82}
83
78/* ----------------------------------------------------------------------- */ 84/* ----------------------------------------------------------------------- */
79 85
80/* for audio mode */ 86/* for audio mode */
@@ -88,8 +94,9 @@ struct saa717x_state {
88 94
89/* ----------------------------------------------------------------------- */ 95/* ----------------------------------------------------------------------- */
90 96
91static int saa717x_write(struct i2c_client *client, u32 reg, u32 value) 97static int saa717x_write(struct v4l2_subdev *sd, u32 reg, u32 value)
92{ 98{
99 struct i2c_client *client = v4l2_get_subdevdata(sd);
93 struct i2c_adapter *adap = client->adapter; 100 struct i2c_adapter *adap = client->adapter;
94 int fw_addr = reg == 0x454 || (reg >= 0x464 && reg <= 0x478) || reg == 0x480 || reg == 0x488; 101 int fw_addr = reg == 0x454 || (reg >= 0x464 && reg <= 0x478) || reg == 0x480 || reg == 0x488;
95 unsigned char mm1[6]; 102 unsigned char mm1[6];
@@ -109,20 +116,21 @@ static int saa717x_write(struct i2c_client *client, u32 reg, u32 value)
109 } 116 }
110 msg.len = fw_addr ? 5 : 3; /* Long Registers have *only* three bytes! */ 117 msg.len = fw_addr ? 5 : 3; /* Long Registers have *only* three bytes! */
111 msg.buf = mm1; 118 msg.buf = mm1;
112 v4l_dbg(2, debug, client, "wrote: reg 0x%03x=%08x\n", reg, value); 119 v4l2_dbg(2, debug, sd, "wrote: reg 0x%03x=%08x\n", reg, value);
113 return i2c_transfer(adap, &msg, 1) == 1; 120 return i2c_transfer(adap, &msg, 1) == 1;
114} 121}
115 122
116static void saa717x_write_regs(struct i2c_client *client, u32 *data) 123static void saa717x_write_regs(struct v4l2_subdev *sd, u32 *data)
117{ 124{
118 while (data[0] || data[1]) { 125 while (data[0] || data[1]) {
119 saa717x_write(client, data[0], data[1]); 126 saa717x_write(sd, data[0], data[1]);
120 data += 2; 127 data += 2;
121 } 128 }
122} 129}
123 130
124static u32 saa717x_read(struct i2c_client *client, u32 reg) 131static u32 saa717x_read(struct v4l2_subdev *sd, u32 reg)
125{ 132{
133 struct i2c_client *client = v4l2_get_subdevdata(sd);
126 struct i2c_adapter *adap = client->adapter; 134 struct i2c_adapter *adap = client->adapter;
127 int fw_addr = (reg >= 0x404 && reg <= 0x4b8) || reg == 0x528; 135 int fw_addr = (reg >= 0x404 && reg <= 0x4b8) || reg == 0x528;
128 unsigned char mm1[2]; 136 unsigned char mm1[2];
@@ -146,7 +154,7 @@ static u32 saa717x_read(struct i2c_client *client, u32 reg)
146 else 154 else
147 value = mm2[0] & 0xff; 155 value = mm2[0] & 0xff;
148 156
149 v4l_dbg(2, debug, client, "read: reg 0x%03x=0x%08x\n", reg, value); 157 v4l2_dbg(2, debug, sd, "read: reg 0x%03x=0x%08x\n", reg, value);
150 return value; 158 return value;
151} 159}
152 160
@@ -680,7 +688,7 @@ static u32 reg_set_audio_template[4][2] =
680 688
681 689
682/* Get detected audio flags (from saa7134 driver) */ 690/* Get detected audio flags (from saa7134 driver) */
683static void get_inf_dev_status(struct i2c_client *client, 691static void get_inf_dev_status(struct v4l2_subdev *sd,
684 int *dual_flag, int *stereo_flag) 692 int *dual_flag, int *stereo_flag)
685{ 693{
686 u32 reg_data3; 694 u32 reg_data3;
@@ -719,13 +727,13 @@ static void get_inf_dev_status(struct i2c_client *client,
719 /* (demdec status: 0x528) */ 727 /* (demdec status: 0x528) */
720 728
721 /* read current status */ 729 /* read current status */
722 reg_data3 = saa717x_read(client, 0x0528); 730 reg_data3 = saa717x_read(sd, 0x0528);
723 731
724 v4l_dbg(1, debug, client, "tvaudio thread status: 0x%x [%s%s%s]\n", 732 v4l2_dbg(1, debug, sd, "tvaudio thread status: 0x%x [%s%s%s]\n",
725 reg_data3, stdres[reg_data3 & 0x1f], 733 reg_data3, stdres[reg_data3 & 0x1f],
726 (reg_data3 & 0x000020) ? ",stereo" : "", 734 (reg_data3 & 0x000020) ? ",stereo" : "",
727 (reg_data3 & 0x000040) ? ",dual" : ""); 735 (reg_data3 & 0x000040) ? ",dual" : "");
728 v4l_dbg(1, debug, client, "detailed status: " 736 v4l2_dbg(1, debug, sd, "detailed status: "
729 "%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s\n", 737 "%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s\n",
730 (reg_data3 & 0x000080) ? " A2/EIAJ pilot tone " : "", 738 (reg_data3 & 0x000080) ? " A2/EIAJ pilot tone " : "",
731 (reg_data3 & 0x000100) ? " A2/EIAJ dual " : "", 739 (reg_data3 & 0x000100) ? " A2/EIAJ dual " : "",
@@ -746,51 +754,51 @@ static void get_inf_dev_status(struct i2c_client *client,
746 (reg_data3 & 0x100000) ? " init done " : ""); 754 (reg_data3 & 0x100000) ? " init done " : "");
747 755
748 if (reg_data3 & 0x000220) { 756 if (reg_data3 & 0x000220) {
749 v4l_dbg(1, debug, client, "ST!!!\n"); 757 v4l2_dbg(1, debug, sd, "ST!!!\n");
750 *stereo_flag = 1; 758 *stereo_flag = 1;
751 } 759 }
752 760
753 if (reg_data3 & 0x000140) { 761 if (reg_data3 & 0x000140) {
754 v4l_dbg(1, debug, client, "DUAL!!!\n"); 762 v4l2_dbg(1, debug, sd, "DUAL!!!\n");
755 *dual_flag = 1; 763 *dual_flag = 1;
756 } 764 }
757} 765}
758 766
759/* regs write to set audio mode */ 767/* regs write to set audio mode */
760static void set_audio_mode(struct i2c_client *client, int audio_mode) 768static void set_audio_mode(struct v4l2_subdev *sd, int audio_mode)
761{ 769{
762 v4l_dbg(1, debug, client, "writing registers to set audio mode by set %d\n", 770 v4l2_dbg(1, debug, sd, "writing registers to set audio mode by set %d\n",
763 audio_mode); 771 audio_mode);
764 772
765 saa717x_write(client, 0x46c, reg_set_audio_template[audio_mode][0]); 773 saa717x_write(sd, 0x46c, reg_set_audio_template[audio_mode][0]);
766 saa717x_write(client, 0x470, reg_set_audio_template[audio_mode][1]); 774 saa717x_write(sd, 0x470, reg_set_audio_template[audio_mode][1]);
767} 775}
768 776
769/* write regs to video output level (bright,contrast,hue,sat) */ 777/* write regs to video output level (bright,contrast,hue,sat) */
770static void set_video_output_level_regs(struct i2c_client *client, 778static void set_video_output_level_regs(struct v4l2_subdev *sd,
771 struct saa717x_state *decoder) 779 struct saa717x_state *decoder)
772{ 780{
773 /* brightness ffh (bright) - 80h (ITU level) - 00h (dark) */ 781 /* brightness ffh (bright) - 80h (ITU level) - 00h (dark) */
774 saa717x_write(client, 0x10a, decoder->bright); 782 saa717x_write(sd, 0x10a, decoder->bright);
775 783
776 /* contrast 7fh (max: 1.984) - 44h (ITU) - 40h (1.0) - 784 /* contrast 7fh (max: 1.984) - 44h (ITU) - 40h (1.0) -
777 0h (luminance off) 40: i2c dump 785 0h (luminance off) 40: i2c dump
778 c0h (-1.0 inverse chrominance) 786 c0h (-1.0 inverse chrominance)
779 80h (-2.0 inverse chrominance) */ 787 80h (-2.0 inverse chrominance) */
780 saa717x_write(client, 0x10b, decoder->contrast); 788 saa717x_write(sd, 0x10b, decoder->contrast);
781 789
782 /* saturation? 7fh(max)-40h(ITU)-0h(color off) 790 /* saturation? 7fh(max)-40h(ITU)-0h(color off)
783 c0h (-1.0 inverse chrominance) 791 c0h (-1.0 inverse chrominance)
784 80h (-2.0 inverse chrominance) */ 792 80h (-2.0 inverse chrominance) */
785 saa717x_write(client, 0x10c, decoder->sat); 793 saa717x_write(sd, 0x10c, decoder->sat);
786 794
787 /* color hue (phase) control 795 /* color hue (phase) control
788 7fh (+178.6) - 0h (0 normal) - 80h (-180.0) */ 796 7fh (+178.6) - 0h (0 normal) - 80h (-180.0) */
789 saa717x_write(client, 0x10d, decoder->hue); 797 saa717x_write(sd, 0x10d, decoder->hue);
790} 798}
791 799
792/* write regs to set audio volume, bass and treble */ 800/* write regs to set audio volume, bass and treble */
793static int set_audio_regs(struct i2c_client *client, 801static int set_audio_regs(struct v4l2_subdev *sd,
794 struct saa717x_state *decoder) 802 struct saa717x_state *decoder)
795{ 803{
796 u8 mute = 0xac; /* -84 dB */ 804 u8 mute = 0xac; /* -84 dB */
@@ -798,8 +806,8 @@ static int set_audio_regs(struct i2c_client *client,
798 unsigned int work_l, work_r; 806 unsigned int work_l, work_r;
799 807
800 /* set SIF analog I/O select */ 808 /* set SIF analog I/O select */
801 saa717x_write(client, 0x0594, decoder->audio_input); 809 saa717x_write(sd, 0x0594, decoder->audio_input);
802 v4l_dbg(1, debug, client, "set audio input %d\n", 810 v4l2_dbg(1, debug, sd, "set audio input %d\n",
803 decoder->audio_input); 811 decoder->audio_input);
804 812
805 /* normalize ( 65535 to 0 -> 24 to -40 (not -84)) */ 813 /* normalize ( 65535 to 0 -> 24 to -40 (not -84)) */
@@ -819,17 +827,17 @@ static int set_audio_regs(struct i2c_client *client,
819 ((u8)decoder->audio_main_vol_r << 8); 827 ((u8)decoder->audio_main_vol_r << 8);
820 } 828 }
821 829
822 saa717x_write(client, 0x480, val); 830 saa717x_write(sd, 0x480, val);
823 831
824 /* bass and treble; go to another function */ 832 /* bass and treble; go to another function */
825 /* set bass and treble */ 833 /* set bass and treble */
826 val = decoder->audio_main_bass | (decoder->audio_main_treble << 8); 834 val = decoder->audio_main_bass | (decoder->audio_main_treble << 8);
827 saa717x_write(client, 0x488, val); 835 saa717x_write(sd, 0x488, val);
828 return 0; 836 return 0;
829} 837}
830 838
831/********** scaling staff ***********/ 839/********** scaling staff ***********/
832static void set_h_prescale(struct i2c_client *client, 840static void set_h_prescale(struct v4l2_subdev *sd,
833 int task, int prescale) 841 int task, int prescale)
834{ 842{
835 static const struct { 843 static const struct {
@@ -862,107 +870,101 @@ static void set_h_prescale(struct i2c_client *client,
862 return; 870 return;
863 871
864 /* horizonal prescaling */ 872 /* horizonal prescaling */
865 saa717x_write(client, 0x60 + task_shift, vals[i].xpsc); 873 saa717x_write(sd, 0x60 + task_shift, vals[i].xpsc);
866 /* accumulation length */ 874 /* accumulation length */
867 saa717x_write(client, 0x61 + task_shift, vals[i].xacl); 875 saa717x_write(sd, 0x61 + task_shift, vals[i].xacl);
868 /* level control */ 876 /* level control */
869 saa717x_write(client, 0x62 + task_shift, 877 saa717x_write(sd, 0x62 + task_shift,
870 (vals[i].xc2_1 << 3) | vals[i].xdcg); 878 (vals[i].xc2_1 << 3) | vals[i].xdcg);
871 /*FIR prefilter control */ 879 /*FIR prefilter control */
872 saa717x_write(client, 0x63 + task_shift, 880 saa717x_write(sd, 0x63 + task_shift,
873 (vals[i].vpfy << 2) | vals[i].vpfy); 881 (vals[i].vpfy << 2) | vals[i].vpfy);
874} 882}
875 883
876/********** scaling staff ***********/ 884/********** scaling staff ***********/
877static void set_v_scale(struct i2c_client *client, int task, int yscale) 885static void set_v_scale(struct v4l2_subdev *sd, int task, int yscale)
878{ 886{
879 int task_shift; 887 int task_shift;
880 888
881 task_shift = task * 0x40; 889 task_shift = task * 0x40;
882 /* Vertical scaling ratio (LOW) */ 890 /* Vertical scaling ratio (LOW) */
883 saa717x_write(client, 0x70 + task_shift, yscale & 0xff); 891 saa717x_write(sd, 0x70 + task_shift, yscale & 0xff);
884 /* Vertical scaling ratio (HI) */ 892 /* Vertical scaling ratio (HI) */
885 saa717x_write(client, 0x71 + task_shift, yscale >> 8); 893 saa717x_write(sd, 0x71 + task_shift, yscale >> 8);
886}
887
888static int saa717x_set_audio_clock_freq(struct i2c_client *client, u32 freq)
889{
890 /* not yet implament, so saa717x_cfg_??hz_??_audio is not defined. */
891 return 0;
892} 894}
893 895
894static int saa717x_set_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl) 896static int saa717x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
895{ 897{
896 struct saa717x_state *state = i2c_get_clientdata(client); 898 struct saa717x_state *state = to_state(sd);
897 899
898 switch (ctrl->id) { 900 switch (ctrl->id) {
899 case V4L2_CID_BRIGHTNESS: 901 case V4L2_CID_BRIGHTNESS:
900 if (ctrl->value < 0 || ctrl->value > 255) { 902 if (ctrl->value < 0 || ctrl->value > 255) {
901 v4l_err(client, "invalid brightness setting %d\n", ctrl->value); 903 v4l2_err(sd, "invalid brightness setting %d\n", ctrl->value);
902 return -ERANGE; 904 return -ERANGE;
903 } 905 }
904 906
905 state->bright = ctrl->value; 907 state->bright = ctrl->value;
906 v4l_dbg(1, debug, client, "bright:%d\n", state->bright); 908 v4l2_dbg(1, debug, sd, "bright:%d\n", state->bright);
907 saa717x_write(client, 0x10a, state->bright); 909 saa717x_write(sd, 0x10a, state->bright);
908 break; 910 break;
909 911
910 case V4L2_CID_CONTRAST: 912 case V4L2_CID_CONTRAST:
911 if (ctrl->value < 0 || ctrl->value > 127) { 913 if (ctrl->value < 0 || ctrl->value > 127) {
912 v4l_err(client, "invalid contrast setting %d\n", ctrl->value); 914 v4l2_err(sd, "invalid contrast setting %d\n", ctrl->value);
913 return -ERANGE; 915 return -ERANGE;
914 } 916 }
915 917
916 state->contrast = ctrl->value; 918 state->contrast = ctrl->value;
917 v4l_dbg(1, debug, client, "contrast:%d\n", state->contrast); 919 v4l2_dbg(1, debug, sd, "contrast:%d\n", state->contrast);
918 saa717x_write(client, 0x10b, state->contrast); 920 saa717x_write(sd, 0x10b, state->contrast);
919 break; 921 break;
920 922
921 case V4L2_CID_SATURATION: 923 case V4L2_CID_SATURATION:
922 if (ctrl->value < 0 || ctrl->value > 127) { 924 if (ctrl->value < 0 || ctrl->value > 127) {
923 v4l_err(client, "invalid saturation setting %d\n", ctrl->value); 925 v4l2_err(sd, "invalid saturation setting %d\n", ctrl->value);
924 return -ERANGE; 926 return -ERANGE;
925 } 927 }
926 928
927 state->sat = ctrl->value; 929 state->sat = ctrl->value;
928 v4l_dbg(1, debug, client, "sat:%d\n", state->sat); 930 v4l2_dbg(1, debug, sd, "sat:%d\n", state->sat);
929 saa717x_write(client, 0x10c, state->sat); 931 saa717x_write(sd, 0x10c, state->sat);
930 break; 932 break;
931 933
932 case V4L2_CID_HUE: 934 case V4L2_CID_HUE:
933 if (ctrl->value < -127 || ctrl->value > 127) { 935 if (ctrl->value < -127 || ctrl->value > 127) {
934 v4l_err(client, "invalid hue setting %d\n", ctrl->value); 936 v4l2_err(sd, "invalid hue setting %d\n", ctrl->value);
935 return -ERANGE; 937 return -ERANGE;
936 } 938 }
937 939
938 state->hue = ctrl->value; 940 state->hue = ctrl->value;
939 v4l_dbg(1, debug, client, "hue:%d\n", state->hue); 941 v4l2_dbg(1, debug, sd, "hue:%d\n", state->hue);
940 saa717x_write(client, 0x10d, state->hue); 942 saa717x_write(sd, 0x10d, state->hue);
941 break; 943 break;
942 944
943 case V4L2_CID_AUDIO_MUTE: 945 case V4L2_CID_AUDIO_MUTE:
944 state->audio_main_mute = ctrl->value; 946 state->audio_main_mute = ctrl->value;
945 set_audio_regs(client, state); 947 set_audio_regs(sd, state);
946 break; 948 break;
947 949
948 case V4L2_CID_AUDIO_VOLUME: 950 case V4L2_CID_AUDIO_VOLUME:
949 state->audio_main_volume = ctrl->value; 951 state->audio_main_volume = ctrl->value;
950 set_audio_regs(client, state); 952 set_audio_regs(sd, state);
951 break; 953 break;
952 954
953 case V4L2_CID_AUDIO_BALANCE: 955 case V4L2_CID_AUDIO_BALANCE:
954 state->audio_main_balance = ctrl->value; 956 state->audio_main_balance = ctrl->value;
955 set_audio_regs(client, state); 957 set_audio_regs(sd, state);
956 break; 958 break;
957 959
958 case V4L2_CID_AUDIO_TREBLE: 960 case V4L2_CID_AUDIO_TREBLE:
959 state->audio_main_treble = ctrl->value; 961 state->audio_main_treble = ctrl->value;
960 set_audio_regs(client, state); 962 set_audio_regs(sd, state);
961 break; 963 break;
962 964
963 case V4L2_CID_AUDIO_BASS: 965 case V4L2_CID_AUDIO_BASS:
964 state->audio_main_bass = ctrl->value; 966 state->audio_main_bass = ctrl->value;
965 set_audio_regs(client, state); 967 set_audio_regs(sd, state);
966 break; 968 break;
967 969
968 default: 970 default:
@@ -972,9 +974,9 @@ static int saa717x_set_v4lctrl(struct i2c_client *client, struct v4l2_control *c
972 return 0; 974 return 0;
973} 975}
974 976
975static int saa717x_get_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl) 977static int saa717x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
976{ 978{
977 struct saa717x_state *state = i2c_get_clientdata(client); 979 struct saa717x_state *state = to_state(sd);
978 980
979 switch (ctrl->id) { 981 switch (ctrl->id) {
980 case V4L2_CID_BRIGHTNESS: 982 case V4L2_CID_BRIGHTNESS:
@@ -1103,13 +1105,15 @@ static struct v4l2_queryctrl saa717x_qctrl[] = {
1103 }, 1105 },
1104}; 1106};
1105 1107
1106static int saa717x_set_video_input(struct i2c_client *client, struct saa717x_state *decoder, int inp) 1108static int saa717x_s_video_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
1107{ 1109{
1110 struct saa717x_state *decoder = to_state(sd);
1111 int inp = route->input;
1108 int is_tuner = inp & 0x80; /* tuner input flag */ 1112 int is_tuner = inp & 0x80; /* tuner input flag */
1109 1113
1110 inp &= 0x7f; 1114 inp &= 0x7f;
1111 1115
1112 v4l_dbg(1, debug, client, "decoder set input (%d)\n", inp); 1116 v4l2_dbg(1, debug, sd, "decoder set input (%d)\n", inp);
1113 /* inputs from 0-9 are available*/ 1117 /* inputs from 0-9 are available*/
1114 /* saa717x have mode0-mode9 but mode5 is reserved. */ 1118 /* saa717x have mode0-mode9 but mode5 is reserved. */
1115 if (inp < 0 || inp > 9 || inp == 5) 1119 if (inp < 0 || inp > 9 || inp == 5)
@@ -1119,222 +1123,197 @@ static int saa717x_set_video_input(struct i2c_client *client, struct saa717x_sta
1119 int input_line = inp; 1123 int input_line = inp;
1120 1124
1121 decoder->input = input_line; 1125 decoder->input = input_line;
1122 v4l_dbg(1, debug, client, "now setting %s input %d\n", 1126 v4l2_dbg(1, debug, sd, "now setting %s input %d\n",
1123 input_line >= 6 ? "S-Video" : "Composite", 1127 input_line >= 6 ? "S-Video" : "Composite",
1124 input_line); 1128 input_line);
1125 1129
1126 /* select mode */ 1130 /* select mode */
1127 saa717x_write(client, 0x102, 1131 saa717x_write(sd, 0x102,
1128 (saa717x_read(client, 0x102) & 0xf0) | 1132 (saa717x_read(sd, 0x102) & 0xf0) |
1129 input_line); 1133 input_line);
1130 1134
1131 /* bypass chrominance trap for modes 6..9 */ 1135 /* bypass chrominance trap for modes 6..9 */
1132 saa717x_write(client, 0x109, 1136 saa717x_write(sd, 0x109,
1133 (saa717x_read(client, 0x109) & 0x7f) | 1137 (saa717x_read(sd, 0x109) & 0x7f) |
1134 (input_line < 6 ? 0x0 : 0x80)); 1138 (input_line < 6 ? 0x0 : 0x80));
1135 1139
1136 /* change audio_mode */ 1140 /* change audio_mode */
1137 if (is_tuner) { 1141 if (is_tuner) {
1138 /* tuner */ 1142 /* tuner */
1139 set_audio_mode(client, decoder->tuner_audio_mode); 1143 set_audio_mode(sd, decoder->tuner_audio_mode);
1140 } else { 1144 } else {
1141 /* Force to STEREO mode if Composite or 1145 /* Force to STEREO mode if Composite or
1142 * S-Video were chosen */ 1146 * S-Video were chosen */
1143 set_audio_mode(client, TUNER_AUDIO_STEREO); 1147 set_audio_mode(sd, TUNER_AUDIO_STEREO);
1144 } 1148 }
1145 /* change initialize procedure (Composite/S-Video) */ 1149 /* change initialize procedure (Composite/S-Video) */
1146 if (is_tuner) 1150 if (is_tuner)
1147 saa717x_write_regs(client, reg_init_tuner_input); 1151 saa717x_write_regs(sd, reg_init_tuner_input);
1148 else if (input_line >= 6) 1152 else if (input_line >= 6)
1149 saa717x_write_regs(client, reg_init_svideo_input); 1153 saa717x_write_regs(sd, reg_init_svideo_input);
1150 else 1154 else
1151 saa717x_write_regs(client, reg_init_composite_input); 1155 saa717x_write_regs(sd, reg_init_composite_input);
1152 } 1156 }
1153 1157
1154 return 0; 1158 return 0;
1155} 1159}
1156 1160
1157static int saa717x_command(struct i2c_client *client, unsigned cmd, void *arg) 1161static int saa717x_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
1158{ 1162{
1159 struct saa717x_state *decoder = i2c_get_clientdata(client); 1163 int i;
1160
1161 v4l_dbg(1, debug, client, "IOCTL: %08x\n", cmd);
1162 1164
1163 switch (cmd) { 1165 for (i = 0; i < ARRAY_SIZE(saa717x_qctrl); i++)
1164 case VIDIOC_INT_AUDIO_CLOCK_FREQ: 1166 if (qc->id && qc->id == saa717x_qctrl[i].id) {
1165 return saa717x_set_audio_clock_freq(client, *(u32 *)arg); 1167 memcpy(qc, &saa717x_qctrl[i], sizeof(*qc));
1168 return 0;
1169 }
1170 return -EINVAL;
1171}
1166 1172
1167 case VIDIOC_G_CTRL: 1173#ifdef CONFIG_VIDEO_ADV_DEBUG
1168 return saa717x_get_v4lctrl(client, (struct v4l2_control *)arg); 1174static int saa717x_g_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
1175{
1176 struct i2c_client *client = v4l2_get_subdevdata(sd);
1169 1177
1170 case VIDIOC_S_CTRL: 1178 if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
1171 return saa717x_set_v4lctrl(client, (struct v4l2_control *)arg); 1179 return -EINVAL;
1180 if (!capable(CAP_SYS_ADMIN))
1181 return -EPERM;
1182 reg->val = saa717x_read(sd, reg->reg);
1183 return 0;
1184}
1172 1185
1173 case VIDIOC_QUERYCTRL: { 1186static int saa717x_s_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
1174 struct v4l2_queryctrl *qc = arg; 1187{
1175 int i; 1188 struct i2c_client *client = v4l2_get_subdevdata(sd);
1189 u16 addr = reg->reg & 0xffff;
1190 u8 val = reg->val & 0xff;
1176 1191
1177 for (i = 0; i < ARRAY_SIZE(saa717x_qctrl); i++) 1192 if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
1178 if (qc->id && qc->id == saa717x_qctrl[i].id) {
1179 memcpy(qc, &saa717x_qctrl[i], sizeof(*qc));
1180 return 0;
1181 }
1182 return -EINVAL; 1193 return -EINVAL;
1183 } 1194 if (!capable(CAP_SYS_ADMIN))
1184 1195 return -EPERM;
1185#ifdef CONFIG_VIDEO_ADV_DEBUG 1196 saa717x_write(sd, addr, val);
1186 case VIDIOC_DBG_G_REGISTER: { 1197 return 0;
1187 struct v4l2_register *reg = arg; 1198}
1188 1199#endif
1189 if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip))
1190 return -EINVAL;
1191 if (!capable(CAP_SYS_ADMIN))
1192 return -EPERM;
1193 reg->val = saa717x_read(client, reg->reg);
1194 break;
1195 }
1196 1200
1197 case VIDIOC_DBG_S_REGISTER: { 1201static int saa717x_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
1198 struct v4l2_register *reg = arg; 1202{
1199 u16 addr = reg->reg & 0xffff; 1203 struct v4l2_pix_format *pix;
1200 u8 val = reg->val & 0xff; 1204 int prescale, h_scale, v_scale;
1201 1205
1202 if (!v4l2_chip_match_i2c_client(client, reg->match_type, reg->match_chip)) 1206 pix = &fmt->fmt.pix;
1203 return -EINVAL; 1207 v4l2_dbg(1, debug, sd, "decoder set size\n");
1204 if (!capable(CAP_SYS_ADMIN))
1205 return -EPERM;
1206 saa717x_write(client, addr, val);
1207 break;
1208 }
1209#endif
1210 1208
1211 case VIDIOC_S_FMT: { 1209 /* FIXME need better bounds checking here */
1212 struct v4l2_format *fmt = (struct v4l2_format *)arg; 1210 if (pix->width < 1 || pix->width > 1440)
1213 struct v4l2_pix_format *pix; 1211 return -EINVAL;
1214 int prescale, h_scale, v_scale; 1212 if (pix->height < 1 || pix->height > 960)
1215 1213 return -EINVAL;
1216 pix = &fmt->fmt.pix;
1217 v4l_dbg(1, debug, client, "decoder set size\n");
1218
1219 /* FIXME need better bounds checking here */
1220 if (pix->width < 1 || pix->width > 1440)
1221 return -EINVAL;
1222 if (pix->height < 1 || pix->height > 960)
1223 return -EINVAL;
1224
1225 /* scaling setting */
1226 /* NTSC and interlace only */
1227 prescale = SAA717X_NTSC_WIDTH / pix->width;
1228 if (prescale == 0)
1229 prescale = 1;
1230 h_scale = 1024 * SAA717X_NTSC_WIDTH / prescale / pix->width;
1231 /* interlace */
1232 v_scale = 512 * 2 * SAA717X_NTSC_HEIGHT / pix->height;
1233
1234 /* Horizontal prescaling etc */
1235 set_h_prescale(client, 0, prescale);
1236 set_h_prescale(client, 1, prescale);
1237
1238 /* Horizontal scaling increment */
1239 /* TASK A */
1240 saa717x_write(client, 0x6C, (u8)(h_scale & 0xFF));
1241 saa717x_write(client, 0x6D, (u8)((h_scale >> 8) & 0xFF));
1242 /* TASK B */
1243 saa717x_write(client, 0xAC, (u8)(h_scale & 0xFF));
1244 saa717x_write(client, 0xAD, (u8)((h_scale >> 8) & 0xFF));
1245
1246 /* Vertical prescaling etc */
1247 set_v_scale(client, 0, v_scale);
1248 set_v_scale(client, 1, v_scale);
1249
1250 /* set video output size */
1251 /* video number of pixels at output */
1252 /* TASK A */
1253 saa717x_write(client, 0x5C, (u8)(pix->width & 0xFF));
1254 saa717x_write(client, 0x5D, (u8)((pix->width >> 8) & 0xFF));
1255 /* TASK B */
1256 saa717x_write(client, 0x9C, (u8)(pix->width & 0xFF));
1257 saa717x_write(client, 0x9D, (u8)((pix->width >> 8) & 0xFF));
1258
1259 /* video number of lines at output */
1260 /* TASK A */
1261 saa717x_write(client, 0x5E, (u8)(pix->height & 0xFF));
1262 saa717x_write(client, 0x5F, (u8)((pix->height >> 8) & 0xFF));
1263 /* TASK B */
1264 saa717x_write(client, 0x9E, (u8)(pix->height & 0xFF));
1265 saa717x_write(client, 0x9F, (u8)((pix->height >> 8) & 0xFF));
1266 break;
1267 }
1268 1214
1269 case AUDC_SET_RADIO: 1215 /* scaling setting */
1270 decoder->radio = 1; 1216 /* NTSC and interlace only */
1271 break; 1217 prescale = SAA717X_NTSC_WIDTH / pix->width;
1218 if (prescale == 0)
1219 prescale = 1;
1220 h_scale = 1024 * SAA717X_NTSC_WIDTH / prescale / pix->width;
1221 /* interlace */
1222 v_scale = 512 * 2 * SAA717X_NTSC_HEIGHT / pix->height;
1223
1224 /* Horizontal prescaling etc */
1225 set_h_prescale(sd, 0, prescale);
1226 set_h_prescale(sd, 1, prescale);
1227
1228 /* Horizontal scaling increment */
1229 /* TASK A */
1230 saa717x_write(sd, 0x6C, (u8)(h_scale & 0xFF));
1231 saa717x_write(sd, 0x6D, (u8)((h_scale >> 8) & 0xFF));
1232 /* TASK B */
1233 saa717x_write(sd, 0xAC, (u8)(h_scale & 0xFF));
1234 saa717x_write(sd, 0xAD, (u8)((h_scale >> 8) & 0xFF));
1235
1236 /* Vertical prescaling etc */
1237 set_v_scale(sd, 0, v_scale);
1238 set_v_scale(sd, 1, v_scale);
1239
1240 /* set video output size */
1241 /* video number of pixels at output */
1242 /* TASK A */
1243 saa717x_write(sd, 0x5C, (u8)(pix->width & 0xFF));
1244 saa717x_write(sd, 0x5D, (u8)((pix->width >> 8) & 0xFF));
1245 /* TASK B */
1246 saa717x_write(sd, 0x9C, (u8)(pix->width & 0xFF));
1247 saa717x_write(sd, 0x9D, (u8)((pix->width >> 8) & 0xFF));
1248
1249 /* video number of lines at output */
1250 /* TASK A */
1251 saa717x_write(sd, 0x5E, (u8)(pix->height & 0xFF));
1252 saa717x_write(sd, 0x5F, (u8)((pix->height >> 8) & 0xFF));
1253 /* TASK B */
1254 saa717x_write(sd, 0x9E, (u8)(pix->height & 0xFF));
1255 saa717x_write(sd, 0x9F, (u8)((pix->height >> 8) & 0xFF));
1256 return 0;
1257}
1272 1258
1273 case VIDIOC_S_STD: { 1259static int saa717x_s_radio(struct v4l2_subdev *sd)
1274 v4l2_std_id std = *(v4l2_std_id *) arg; 1260{
1261 struct saa717x_state *decoder = to_state(sd);
1275 1262
1276 v4l_dbg(1, debug, client, "decoder set norm "); 1263 decoder->radio = 1;
1277 v4l_dbg(1, debug, client, "(not yet implementd)\n"); 1264 return 0;
1265}
1278 1266
1279 decoder->radio = 0; 1267static int saa717x_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
1280 decoder->std = std; 1268{
1281 break; 1269 struct saa717x_state *decoder = to_state(sd);
1282 }
1283 1270
1284 case VIDIOC_INT_G_AUDIO_ROUTING: { 1271 v4l2_dbg(1, debug, sd, "decoder set norm ");
1285 struct v4l2_routing *route = arg; 1272 v4l2_dbg(1, debug, sd, "(not yet implementd)\n");
1286 1273
1287 route->input = decoder->audio_input; 1274 decoder->radio = 0;
1288 route->output = 0; 1275 decoder->std = std;
1289 break; 1276 return 0;
1290 } 1277}
1291 1278
1292 case VIDIOC_INT_S_AUDIO_ROUTING: { 1279static int saa717x_s_audio_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
1293 struct v4l2_routing *route = arg; 1280{
1281 struct saa717x_state *decoder = to_state(sd);
1294 1282
1295 if (route->input < 3) { /* FIXME! --tadachi */ 1283 if (route->input < 3) { /* FIXME! --tadachi */
1296 decoder->audio_input = route->input; 1284 decoder->audio_input = route->input;
1297 v4l_dbg(1, debug, client, 1285 v4l2_dbg(1, debug, sd,
1298 "set decoder audio input to %d\n", 1286 "set decoder audio input to %d\n",
1299 decoder->audio_input); 1287 decoder->audio_input);
1300 set_audio_regs(client, decoder); 1288 set_audio_regs(sd, decoder);
1301 break; 1289 return 0;
1302 }
1303 return -ERANGE;
1304 }
1305
1306 case VIDIOC_INT_S_VIDEO_ROUTING: {
1307 struct v4l2_routing *route = arg;
1308 int inp = route->input;
1309
1310 return saa717x_set_video_input(client, decoder, inp);
1311 } 1290 }
1291 return -ERANGE;
1292}
1312 1293
1313 case VIDIOC_STREAMON: { 1294static int saa717x_s_stream(struct v4l2_subdev *sd, int enable)
1314 v4l_dbg(1, debug, client, "decoder enable output\n"); 1295{
1315 decoder->enable = 1; 1296 struct saa717x_state *decoder = to_state(sd);
1316 saa717x_write(client, 0x193, 0xa6);
1317 break;
1318 }
1319 1297
1320 case VIDIOC_STREAMOFF: { 1298 v4l2_dbg(1, debug, sd, "decoder %s output\n",
1321 v4l_dbg(1, debug, client, "decoder disable output\n"); 1299 enable ? "enable" : "disable");
1322 decoder->enable = 0; 1300 decoder->enable = enable;
1323 saa717x_write(client, 0x193, 0x26); /* right? FIXME!--tadachi */ 1301 saa717x_write(sd, 0x193, enable ? 0xa6 : 0x26);
1324 break; 1302 return 0;
1325 } 1303}
1326 1304
1327 /* change audio mode */ 1305/* change audio mode */
1328 case VIDIOC_S_TUNER: { 1306static int saa717x_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1329 struct v4l2_tuner *vt = (struct v4l2_tuner *)arg; 1307{
1330 int audio_mode; 1308 struct saa717x_state *decoder = to_state(sd);
1331 char *mes[4] = { 1309 int audio_mode;
1332 "MONO", "STEREO", "LANG1", "LANG2/SAP" 1310 char *mes[4] = {
1333 }; 1311 "MONO", "STEREO", "LANG1", "LANG2/SAP"
1312 };
1334 1313
1335 audio_mode = V4L2_TUNER_MODE_STEREO; 1314 audio_mode = V4L2_TUNER_MODE_STEREO;
1336 1315
1337 switch (vt->audmode) { 1316 switch (vt->audmode) {
1338 case V4L2_TUNER_MODE_MONO: 1317 case V4L2_TUNER_MODE_MONO:
1339 audio_mode = TUNER_AUDIO_MONO; 1318 audio_mode = TUNER_AUDIO_MONO;
1340 break; 1319 break;
@@ -1347,70 +1326,101 @@ static int saa717x_command(struct i2c_client *client, unsigned cmd, void *arg)
1347 case V4L2_TUNER_MODE_LANG1: 1326 case V4L2_TUNER_MODE_LANG1:
1348 audio_mode = TUNER_AUDIO_LANG1; 1327 audio_mode = TUNER_AUDIO_LANG1;
1349 break; 1328 break;
1350 }
1351
1352 v4l_dbg(1, debug, client, "change audio mode to %s\n",
1353 mes[audio_mode]);
1354 decoder->tuner_audio_mode = audio_mode;
1355 /* The registers are not changed here. */
1356 /* See DECODER_ENABLE_OUTPUT section. */
1357 set_audio_mode(client, decoder->tuner_audio_mode);
1358 break;
1359 } 1329 }
1360 1330
1361 case VIDIOC_G_TUNER: { 1331 v4l2_dbg(1, debug, sd, "change audio mode to %s\n",
1362 struct v4l2_tuner *vt = (struct v4l2_tuner *)arg; 1332 mes[audio_mode]);
1363 int dual_f, stereo_f; 1333 decoder->tuner_audio_mode = audio_mode;
1334 /* The registers are not changed here. */
1335 /* See DECODER_ENABLE_OUTPUT section. */
1336 set_audio_mode(sd, decoder->tuner_audio_mode);
1337 return 0;
1338}
1364 1339
1365 if (decoder->radio) 1340static int saa717x_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1366 break; 1341{
1367 get_inf_dev_status(client, &dual_f, &stereo_f); 1342 struct saa717x_state *decoder = to_state(sd);
1343 int dual_f, stereo_f;
1368 1344
1369 v4l_dbg(1, debug, client, "DETECT==st:%d dual:%d\n", 1345 if (decoder->radio)
1370 stereo_f, dual_f); 1346 return 0;
1347 get_inf_dev_status(sd, &dual_f, &stereo_f);
1371 1348
1372 /* mono */ 1349 v4l2_dbg(1, debug, sd, "DETECT==st:%d dual:%d\n",
1373 if ((dual_f == 0) && (stereo_f == 0)) { 1350 stereo_f, dual_f);
1374 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1375 v4l_dbg(1, debug, client, "DETECT==MONO\n");
1376 }
1377 1351
1378 /* stereo */ 1352 /* mono */
1379 if (stereo_f == 1) { 1353 if ((dual_f == 0) && (stereo_f == 0)) {
1380 if (vt->audmode == V4L2_TUNER_MODE_STEREO || 1354 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1381 vt->audmode == V4L2_TUNER_MODE_LANG1) { 1355 v4l2_dbg(1, debug, sd, "DETECT==MONO\n");
1382 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1356 }
1383 v4l_dbg(1, debug, client, "DETECT==ST(ST)\n");
1384 } else {
1385 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1386 v4l_dbg(1, debug, client, "DETECT==ST(MONO)\n");
1387 }
1388 }
1389 1357
1390 /* dual */ 1358 /* stereo */
1391 if (dual_f == 1) { 1359 if (stereo_f == 1) {
1392 if (vt->audmode == V4L2_TUNER_MODE_LANG2) { 1360 if (vt->audmode == V4L2_TUNER_MODE_STEREO ||
1393 vt->rxsubchans = V4L2_TUNER_SUB_LANG2 | V4L2_TUNER_SUB_MONO; 1361 vt->audmode == V4L2_TUNER_MODE_LANG1) {
1394 v4l_dbg(1, debug, client, "DETECT==DUAL1\n"); 1362 vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
1395 } else { 1363 v4l2_dbg(1, debug, sd, "DETECT==ST(ST)\n");
1396 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_MONO; 1364 } else {
1397 v4l_dbg(1, debug, client, "DETECT==DUAL2\n"); 1365 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1398 } 1366 v4l2_dbg(1, debug, sd, "DETECT==ST(MONO)\n");
1399 } 1367 }
1400 break;
1401 } 1368 }
1402 1369
1403 case VIDIOC_LOG_STATUS: 1370 /* dual */
1404 /* not yet implemented */ 1371 if (dual_f == 1) {
1405 break; 1372 if (vt->audmode == V4L2_TUNER_MODE_LANG2) {
1406 1373 vt->rxsubchans = V4L2_TUNER_SUB_LANG2 | V4L2_TUNER_SUB_MONO;
1407 default: 1374 v4l2_dbg(1, debug, sd, "DETECT==DUAL1\n");
1408 return -EINVAL; 1375 } else {
1376 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_MONO;
1377 v4l2_dbg(1, debug, sd, "DETECT==DUAL2\n");
1378 }
1409 } 1379 }
1410
1411 return 0; 1380 return 0;
1412} 1381}
1413 1382
1383static int saa717x_command(struct i2c_client *client, unsigned cmd, void *arg)
1384{
1385 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
1386}
1387
1388/* ----------------------------------------------------------------------- */
1389
1390static const struct v4l2_subdev_core_ops saa717x_core_ops = {
1391#ifdef CONFIG_VIDEO_ADV_DEBUG
1392 .g_register = saa717x_g_register,
1393 .s_register = saa717x_s_register,
1394#endif
1395 .queryctrl = saa717x_queryctrl,
1396 .g_ctrl = saa717x_g_ctrl,
1397 .s_ctrl = saa717x_s_ctrl,
1398};
1399
1400static const struct v4l2_subdev_tuner_ops saa717x_tuner_ops = {
1401 .g_tuner = saa717x_g_tuner,
1402 .s_tuner = saa717x_s_tuner,
1403 .s_std = saa717x_s_std,
1404 .s_radio = saa717x_s_radio,
1405};
1406
1407static const struct v4l2_subdev_video_ops saa717x_video_ops = {
1408 .s_routing = saa717x_s_video_routing,
1409 .s_fmt = saa717x_s_fmt,
1410 .s_stream = saa717x_s_stream,
1411};
1412
1413static const struct v4l2_subdev_audio_ops saa717x_audio_ops = {
1414 .s_routing = saa717x_s_audio_routing,
1415};
1416
1417static const struct v4l2_subdev_ops saa717x_ops = {
1418 .core = &saa717x_core_ops,
1419 .tuner = &saa717x_tuner_ops,
1420 .audio = &saa717x_audio_ops,
1421 .video = &saa717x_video_ops,
1422};
1423
1414/* ----------------------------------------------------------------------- */ 1424/* ----------------------------------------------------------------------- */
1415 1425
1416 1426
@@ -1421,6 +1431,7 @@ static int saa717x_probe(struct i2c_client *client,
1421 const struct i2c_device_id *did) 1431 const struct i2c_device_id *did)
1422{ 1432{
1423 struct saa717x_state *decoder; 1433 struct saa717x_state *decoder;
1434 struct v4l2_subdev *sd;
1424 u8 id = 0; 1435 u8 id = 0;
1425 char *p = ""; 1436 char *p = "";
1426 1437
@@ -1428,13 +1439,21 @@ static int saa717x_probe(struct i2c_client *client,
1428 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 1439 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
1429 return -EIO; 1440 return -EIO;
1430 1441
1431 if (saa717x_write(client, 0x5a4, 0xfe) && 1442 decoder = kzalloc(sizeof(struct saa717x_state), GFP_KERNEL);
1432 saa717x_write(client, 0x5a5, 0x0f) && 1443 if (decoder == NULL)
1433 saa717x_write(client, 0x5a6, 0x00) && 1444 return -ENOMEM;
1434 saa717x_write(client, 0x5a7, 0x01)) 1445
1435 id = saa717x_read(client, 0x5a0); 1446 sd = &decoder->sd;
1447 v4l2_i2c_subdev_init(sd, client, &saa717x_ops);
1448
1449 if (saa717x_write(sd, 0x5a4, 0xfe) &&
1450 saa717x_write(sd, 0x5a5, 0x0f) &&
1451 saa717x_write(sd, 0x5a6, 0x00) &&
1452 saa717x_write(sd, 0x5a7, 0x01))
1453 id = saa717x_read(sd, 0x5a0);
1436 if (id != 0xc2 && id != 0x32 && id != 0xf2 && id != 0x6c) { 1454 if (id != 0xc2 && id != 0x32 && id != 0xf2 && id != 0x6c) {
1437 v4l_dbg(1, debug, client, "saa717x not found (id=%02x)\n", id); 1455 v4l2_dbg(1, debug, sd, "saa717x not found (id=%02x)\n", id);
1456 kfree(decoder);
1438 return -ENODEV; 1457 return -ENODEV;
1439 } 1458 }
1440 if (id == 0xc2) 1459 if (id == 0xc2)
@@ -1445,14 +1464,8 @@ static int saa717x_probe(struct i2c_client *client,
1445 p = "saa7174HL"; 1464 p = "saa7174HL";
1446 else 1465 else
1447 p = "saa7171"; 1466 p = "saa7171";
1448 v4l_info(client, "%s found @ 0x%x (%s)\n", p, 1467 v4l2_info(sd, "%s found @ 0x%x (%s)\n", p,
1449 client->addr << 1, client->adapter->name); 1468 client->addr << 1, client->adapter->name);
1450
1451 decoder = kzalloc(sizeof(struct saa717x_state), GFP_KERNEL);
1452 i2c_set_clientdata(client, decoder);
1453
1454 if (decoder == NULL)
1455 return -ENOMEM;
1456 decoder->std = V4L2_STD_NTSC; 1469 decoder->std = V4L2_STD_NTSC;
1457 decoder->input = -1; 1470 decoder->input = -1;
1458 decoder->enable = 1; 1471 decoder->enable = 1;
@@ -1481,15 +1494,15 @@ static int saa717x_probe(struct i2c_client *client,
1481 decoder->audio_main_volume = 1494 decoder->audio_main_volume =
1482 (decoder->audio_main_vol_r + 41) * 65535 / (24 - (-40)); 1495 (decoder->audio_main_vol_r + 41) * 65535 / (24 - (-40));
1483 1496
1484 v4l_dbg(1, debug, client, "writing init values\n"); 1497 v4l2_dbg(1, debug, sd, "writing init values\n");
1485 1498
1486 /* FIXME!! */ 1499 /* FIXME!! */
1487 saa717x_write_regs(client, reg_init_initialize); 1500 saa717x_write_regs(sd, reg_init_initialize);
1488 set_video_output_level_regs(client, decoder); 1501 set_video_output_level_regs(sd, decoder);
1489 /* set bass,treble to 0db 20041101 K.Ohta */ 1502 /* set bass,treble to 0db 20041101 K.Ohta */
1490 decoder->audio_main_bass = 0; 1503 decoder->audio_main_bass = 0;
1491 decoder->audio_main_treble = 0; 1504 decoder->audio_main_treble = 0;
1492 set_audio_regs(client, decoder); 1505 set_audio_regs(sd, decoder);
1493 1506
1494 set_current_state(TASK_INTERRUPTIBLE); 1507 set_current_state(TASK_INTERRUPTIBLE);
1495 schedule_timeout(2*HZ); 1508 schedule_timeout(2*HZ);
@@ -1498,7 +1511,10 @@ static int saa717x_probe(struct i2c_client *client,
1498 1511
1499static int saa717x_remove(struct i2c_client *client) 1512static int saa717x_remove(struct i2c_client *client)
1500{ 1513{
1501 kfree(i2c_get_clientdata(client)); 1514 struct v4l2_subdev *sd = i2c_get_clientdata(client);
1515
1516 v4l2_device_unregister_subdev(sd);
1517 kfree(to_state(sd));
1502 return 0; 1518 return 0;
1503} 1519}
1504 1520
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index 044a2e94c34d..d652f25eef0e 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -975,8 +975,7 @@ static int se401_close(struct inode *inode, struct file *file)
975 return 0; 975 return 0;
976} 976}
977 977
978static int se401_do_ioctl(struct inode *inode, struct file *file, 978static int se401_do_ioctl(struct file *file, unsigned int cmd, void *arg)
979 unsigned int cmd, void *arg)
980{ 979{
981 struct video_device *vdev = file->private_data; 980 struct video_device *vdev = file->private_data;
982 struct usb_se401 *se401 = (struct usb_se401 *)vdev; 981 struct usb_se401 *se401 = (struct usb_se401 *)vdev;
@@ -1142,7 +1141,7 @@ static int se401_do_ioctl(struct inode *inode, struct file *file,
1142static int se401_ioctl(struct inode *inode, struct file *file, 1141static int se401_ioctl(struct inode *inode, struct file *file,
1143 unsigned int cmd, unsigned long arg) 1142 unsigned int cmd, unsigned long arg)
1144{ 1143{
1145 return video_usercopy(inode, file, cmd, arg, se401_do_ioctl); 1144 return video_usercopy(file, cmd, arg, se401_do_ioctl);
1146} 1145}
1147 1146
1148static ssize_t se401_read(struct file *file, char __user *buf, 1147static ssize_t se401_read(struct file *file, char __user *buf,
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 536b1a9b310c..9a2586b07a05 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -29,7 +29,6 @@
29#include <linux/version.h> 29#include <linux/version.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/mutex.h>
33#include <linux/videodev2.h> 32#include <linux/videodev2.h>
34#include <linux/clk.h> 33#include <linux/clk.h>
35 34
@@ -75,8 +74,6 @@
75#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */ 74#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */
76#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */ 75#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */
77 76
78static DEFINE_MUTEX(camera_lock);
79
80/* per video frame buffer */ 77/* per video frame buffer */
81struct sh_mobile_ceu_buffer { 78struct sh_mobile_ceu_buffer {
82 struct videobuf_buffer vb; /* v4l buffer must be first */ 79 struct videobuf_buffer vb; /* v4l buffer must be first */
@@ -97,18 +94,20 @@ struct sh_mobile_ceu_dev {
97 spinlock_t lock; 94 spinlock_t lock;
98 struct list_head capture; 95 struct list_head capture;
99 struct videobuf_buffer *active; 96 struct videobuf_buffer *active;
97 int is_interlace;
100 98
101 struct sh_mobile_ceu_info *pdata; 99 struct sh_mobile_ceu_info *pdata;
100
101 const struct soc_camera_data_format *camera_fmt;
102}; 102};
103 103
104static void ceu_write(struct sh_mobile_ceu_dev *priv, 104static void ceu_write(struct sh_mobile_ceu_dev *priv,
105 unsigned long reg_offs, unsigned long data) 105 unsigned long reg_offs, u32 data)
106{ 106{
107 iowrite32(data, priv->base + reg_offs); 107 iowrite32(data, priv->base + reg_offs);
108} 108}
109 109
110static unsigned long ceu_read(struct sh_mobile_ceu_dev *priv, 110static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs)
111 unsigned long reg_offs)
112{ 111{
113 return ioread32(priv->base + reg_offs); 112 return ioread32(priv->base + reg_offs);
114} 113}
@@ -156,21 +155,52 @@ static void free_buffer(struct videobuf_queue *vq,
156 buf->vb.state = VIDEOBUF_NEEDS_INIT; 155 buf->vb.state = VIDEOBUF_NEEDS_INIT;
157} 156}
158 157
158#define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */
159#define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */
160#define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */
161#define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */
162
163
159static void sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) 164static void sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
160{ 165{
161 ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~1); 166 struct soc_camera_device *icd = pcdev->icd;
162 ceu_write(pcdev, CETCR, ~ceu_read(pcdev, CETCR) & 0x0317f313); 167 dma_addr_t phys_addr_top, phys_addr_bottom;
163 ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | 1);
164
165 ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~0x10000);
166 168
167 ceu_write(pcdev, CETCR, 0x0317f313 ^ 0x10); 169 /* The hardware is _very_ picky about this sequence. Especially
170 * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge
171 * several not-so-well documented interrupt sources in CETCR.
172 */
173 ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_CPEIE);
174 ceu_write(pcdev, CETCR, ~ceu_read(pcdev, CETCR) & CEU_CETCR_MAGIC);
175 ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_CPEIE);
176 ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP);
177 ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW);
178
179 if (!pcdev->active)
180 return;
181
182 phys_addr_top = videobuf_to_dma_contig(pcdev->active);
183 ceu_write(pcdev, CDAYR, phys_addr_top);
184 if (pcdev->is_interlace) {
185 phys_addr_bottom = phys_addr_top + icd->width;
186 ceu_write(pcdev, CDBYR, phys_addr_bottom);
187 }
168 188
169 if (pcdev->active) { 189 switch (icd->current_fmt->fourcc) {
170 pcdev->active->state = VIDEOBUF_ACTIVE; 190 case V4L2_PIX_FMT_NV12:
171 ceu_write(pcdev, CDAYR, videobuf_to_dma_contig(pcdev->active)); 191 case V4L2_PIX_FMT_NV21:
172 ceu_write(pcdev, CAPSR, 0x1); /* start capture */ 192 case V4L2_PIX_FMT_NV16:
193 case V4L2_PIX_FMT_NV61:
194 phys_addr_top += icd->width * icd->height;
195 ceu_write(pcdev, CDACR, phys_addr_top);
196 if (pcdev->is_interlace) {
197 phys_addr_bottom = phys_addr_top + icd->width;
198 ceu_write(pcdev, CDBCR, phys_addr_bottom);
199 }
173 } 200 }
201
202 pcdev->active->state = VIDEOBUF_ACTIVE;
203 ceu_write(pcdev, CAPSR, 0x1); /* start capture */
174} 204}
175 205
176static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq, 206static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq,
@@ -292,14 +322,13 @@ static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
292 return IRQ_HANDLED; 322 return IRQ_HANDLED;
293} 323}
294 324
325/* Called with .video_lock held */
295static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) 326static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
296{ 327{
297 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 328 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
298 struct sh_mobile_ceu_dev *pcdev = ici->priv; 329 struct sh_mobile_ceu_dev *pcdev = ici->priv;
299 int ret = -EBUSY; 330 int ret = -EBUSY;
300 331
301 mutex_lock(&camera_lock);
302
303 if (pcdev->icd) 332 if (pcdev->icd)
304 goto err; 333 goto err;
305 334
@@ -319,11 +348,10 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
319 348
320 pcdev->icd = icd; 349 pcdev->icd = icd;
321err: 350err:
322 mutex_unlock(&camera_lock);
323
324 return ret; 351 return ret;
325} 352}
326 353
354/* Called with .video_lock held */
327static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) 355static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
328{ 356{
329 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 357 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
@@ -362,8 +390,9 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
362{ 390{
363 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 391 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
364 struct sh_mobile_ceu_dev *pcdev = ici->priv; 392 struct sh_mobile_ceu_dev *pcdev = ici->priv;
365 int ret, buswidth, width, cfszr_width, cdwdr_width; 393 int ret, buswidth, width, height, cfszr_width, cdwdr_width;
366 unsigned long camera_flags, common_flags, value; 394 unsigned long camera_flags, common_flags, value;
395 int yuv_mode, yuv_lineskip;
367 396
368 camera_flags = icd->ops->query_bus_param(icd); 397 camera_flags = icd->ops->query_bus_param(icd);
369 common_flags = soc_camera_bus_param_compatible(camera_flags, 398 common_flags = soc_camera_bus_param_compatible(camera_flags,
@@ -389,27 +418,71 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
389 ceu_write(pcdev, CRCNTR, 0); 418 ceu_write(pcdev, CRCNTR, 0);
390 ceu_write(pcdev, CRCMPR, 0); 419 ceu_write(pcdev, CRCMPR, 0);
391 420
392 value = 0x00000010; 421 value = 0x00000010; /* data fetch by default */
393 value |= (common_flags & SOCAM_VSYNC_ACTIVE_LOW) ? (1 << 1) : 0; 422 yuv_mode = yuv_lineskip = 0;
394 value |= (common_flags & SOCAM_HSYNC_ACTIVE_LOW) ? (1 << 0) : 0; 423
395 value |= (buswidth == 16) ? (1 << 12) : 0; 424 switch (icd->current_fmt->fourcc) {
425 case V4L2_PIX_FMT_NV12:
426 case V4L2_PIX_FMT_NV21:
427 yuv_lineskip = 1; /* skip for NV12/21, no skip for NV16/61 */
428 /* fall-through */
429 case V4L2_PIX_FMT_NV16:
430 case V4L2_PIX_FMT_NV61:
431 yuv_mode = 1;
432 switch (pcdev->camera_fmt->fourcc) {
433 case V4L2_PIX_FMT_UYVY:
434 value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
435 break;
436 case V4L2_PIX_FMT_VYUY:
437 value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
438 break;
439 case V4L2_PIX_FMT_YUYV:
440 value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
441 break;
442 case V4L2_PIX_FMT_YVYU:
443 value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
444 break;
445 default:
446 BUG();
447 }
448 }
449
450 if (icd->current_fmt->fourcc == V4L2_PIX_FMT_NV21 ||
451 icd->current_fmt->fourcc == V4L2_PIX_FMT_NV61)
452 value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */
453
454 value |= common_flags & SOCAM_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
455 value |= common_flags & SOCAM_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
456 value |= buswidth == 16 ? 1 << 12 : 0;
396 ceu_write(pcdev, CAMCR, value); 457 ceu_write(pcdev, CAMCR, value);
397 458
398 ceu_write(pcdev, CAPCR, 0x00300000); 459 ceu_write(pcdev, CAPCR, 0x00300000);
399 ceu_write(pcdev, CAIFR, 0); 460 ceu_write(pcdev, CAIFR, (pcdev->is_interlace) ? 0x101 : 0);
400 461
401 mdelay(1); 462 mdelay(1);
402 463
403 width = icd->width * (icd->current_fmt->depth / 8); 464 if (yuv_mode) {
404 width = (buswidth == 16) ? width / 2 : width; 465 width = icd->width * 2;
405 cfszr_width = (buswidth == 8) ? width / 2 : width; 466 width = buswidth == 16 ? width / 2 : width;
406 cdwdr_width = (buswidth == 16) ? width * 2 : width; 467 cfszr_width = cdwdr_width = icd->width;
468 } else {
469 width = icd->width * ((icd->current_fmt->depth + 7) >> 3);
470 width = buswidth == 16 ? width / 2 : width;
471 cfszr_width = buswidth == 8 ? width / 2 : width;
472 cdwdr_width = buswidth == 16 ? width * 2 : width;
473 }
474
475 height = icd->height;
476 if (pcdev->is_interlace) {
477 height /= 2;
478 cdwdr_width *= 2;
479 }
407 480
408 ceu_write(pcdev, CAMOR, 0); 481 ceu_write(pcdev, CAMOR, 0);
409 ceu_write(pcdev, CAPWR, (icd->height << 16) | width); 482 ceu_write(pcdev, CAPWR, (height << 16) | width);
410 ceu_write(pcdev, CFLCR, 0); /* data fetch mode - no scaling */ 483 ceu_write(pcdev, CFLCR, 0); /* no scaling */
411 ceu_write(pcdev, CFSZR, (icd->height << 16) | cfszr_width); 484 ceu_write(pcdev, CFSZR, (height << 16) | cfszr_width);
412 ceu_write(pcdev, CLFCR, 0); /* data fetch mode - no lowpass filter */ 485 ceu_write(pcdev, CLFCR, 0); /* no lowpass filter */
413 486
414 /* A few words about byte order (observed in Big Endian mode) 487 /* A few words about byte order (observed in Big Endian mode)
415 * 488 *
@@ -423,19 +496,20 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
423 * using 7 we swap the data bytes to match the incoming order: 496 * using 7 we swap the data bytes to match the incoming order:
424 * D0, D1, D2, D3, D4, D5, D6, D7 497 * D0, D1, D2, D3, D4, D5, D6, D7
425 */ 498 */
426 ceu_write(pcdev, CDOCR, 0x00000017); 499 value = 0x00000017;
500 if (yuv_lineskip)
501 value &= ~0x00000010; /* convert 4:2:2 -> 4:2:0 */
502
503 ceu_write(pcdev, CDOCR, value);
427 504
428 ceu_write(pcdev, CDWDR, cdwdr_width); 505 ceu_write(pcdev, CDWDR, cdwdr_width);
429 ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */ 506 ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
430 507
431 /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */ 508 /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */
432 /* in data fetch mode: no need for CDACR, CDBYR, CDBCR */
433
434 return 0; 509 return 0;
435} 510}
436 511
437static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd, 512static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd)
438 __u32 pixfmt)
439{ 513{
440 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 514 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
441 struct sh_mobile_ceu_dev *pcdev = ici->priv; 515 struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -450,15 +524,123 @@ static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
450 return 0; 524 return 0;
451} 525}
452 526
453static int sh_mobile_ceu_set_fmt_cap(struct soc_camera_device *icd, 527static const struct soc_camera_data_format sh_mobile_ceu_formats[] = {
454 __u32 pixfmt, struct v4l2_rect *rect) 528 {
529 .name = "NV12",
530 .depth = 12,
531 .fourcc = V4L2_PIX_FMT_NV12,
532 .colorspace = V4L2_COLORSPACE_JPEG,
533 },
534 {
535 .name = "NV21",
536 .depth = 12,
537 .fourcc = V4L2_PIX_FMT_NV21,
538 .colorspace = V4L2_COLORSPACE_JPEG,
539 },
540 {
541 .name = "NV16",
542 .depth = 16,
543 .fourcc = V4L2_PIX_FMT_NV16,
544 .colorspace = V4L2_COLORSPACE_JPEG,
545 },
546 {
547 .name = "NV61",
548 .depth = 16,
549 .fourcc = V4L2_PIX_FMT_NV61,
550 .colorspace = V4L2_COLORSPACE_JPEG,
551 },
552};
553
554static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
555 struct soc_camera_format_xlate *xlate)
556{
557 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
558 int ret, k, n;
559 int formats = 0;
560
561 ret = sh_mobile_ceu_try_bus_param(icd);
562 if (ret < 0)
563 return 0;
564
565 switch (icd->formats[idx].fourcc) {
566 case V4L2_PIX_FMT_UYVY:
567 case V4L2_PIX_FMT_VYUY:
568 case V4L2_PIX_FMT_YUYV:
569 case V4L2_PIX_FMT_YVYU:
570 n = ARRAY_SIZE(sh_mobile_ceu_formats);
571 formats += n;
572 for (k = 0; xlate && k < n; k++) {
573 xlate->host_fmt = &sh_mobile_ceu_formats[k];
574 xlate->cam_fmt = icd->formats + idx;
575 xlate->buswidth = icd->formats[idx].depth;
576 xlate++;
577 dev_dbg(&ici->dev, "Providing format %s using %s\n",
578 sh_mobile_ceu_formats[k].name,
579 icd->formats[idx].name);
580 }
581 default:
582 /* Generic pass-through */
583 formats++;
584 if (xlate) {
585 xlate->host_fmt = icd->formats + idx;
586 xlate->cam_fmt = icd->formats + idx;
587 xlate->buswidth = icd->formats[idx].depth;
588 xlate++;
589 dev_dbg(&ici->dev,
590 "Providing format %s in pass-through mode\n",
591 icd->formats[idx].name);
592 }
593 }
594
595 return formats;
596}
597
598static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
599 __u32 pixfmt, struct v4l2_rect *rect)
455{ 600{
456 return icd->ops->set_fmt_cap(icd, pixfmt, rect); 601 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
602 struct sh_mobile_ceu_dev *pcdev = ici->priv;
603 const struct soc_camera_format_xlate *xlate;
604 int ret;
605
606 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
607 if (!xlate) {
608 dev_warn(&ici->dev, "Format %x not found\n", pixfmt);
609 return -EINVAL;
610 }
611
612 switch (pixfmt) {
613 case 0: /* Only geometry change */
614 ret = icd->ops->set_fmt(icd, pixfmt, rect);
615 break;
616 default:
617 ret = icd->ops->set_fmt(icd, xlate->cam_fmt->fourcc, rect);
618 }
619
620 if (pixfmt && !ret) {
621 icd->buswidth = xlate->buswidth;
622 icd->current_fmt = xlate->host_fmt;
623 pcdev->camera_fmt = xlate->cam_fmt;
624 }
625
626 return ret;
457} 627}
458 628
459static int sh_mobile_ceu_try_fmt_cap(struct soc_camera_device *icd, 629static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
460 struct v4l2_format *f) 630 struct v4l2_format *f)
461{ 631{
632 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
633 struct sh_mobile_ceu_dev *pcdev = ici->priv;
634 const struct soc_camera_format_xlate *xlate;
635 __u32 pixfmt = f->fmt.pix.pixelformat;
636 int ret;
637
638 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
639 if (!xlate) {
640 dev_warn(&ici->dev, "Format %x not found\n", pixfmt);
641 return -EINVAL;
642 }
643
462 /* FIXME: calculate using depth and bus width */ 644 /* FIXME: calculate using depth and bus width */
463 645
464 if (f->fmt.pix.height < 4) 646 if (f->fmt.pix.height < 4)
@@ -472,8 +654,31 @@ static int sh_mobile_ceu_try_fmt_cap(struct soc_camera_device *icd,
472 f->fmt.pix.width &= ~0x01; 654 f->fmt.pix.width &= ~0x01;
473 f->fmt.pix.height &= ~0x03; 655 f->fmt.pix.height &= ~0x03;
474 656
657 f->fmt.pix.bytesperline = f->fmt.pix.width *
658 DIV_ROUND_UP(xlate->host_fmt->depth, 8);
659 f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
660
475 /* limit to sensor capabilities */ 661 /* limit to sensor capabilities */
476 return icd->ops->try_fmt_cap(icd, f); 662 ret = icd->ops->try_fmt(icd, f);
663 if (ret < 0)
664 return ret;
665
666 switch (f->fmt.pix.field) {
667 case V4L2_FIELD_INTERLACED:
668 pcdev->is_interlace = 1;
669 break;
670 case V4L2_FIELD_ANY:
671 f->fmt.pix.field = V4L2_FIELD_NONE;
672 /* fall-through */
673 case V4L2_FIELD_NONE:
674 pcdev->is_interlace = 0;
675 break;
676 default:
677 ret = -EINVAL;
678 break;
679 }
680
681 return ret;
477} 682}
478 683
479static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf, 684static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf,
@@ -532,7 +737,7 @@ static void sh_mobile_ceu_init_videobuf(struct videobuf_queue *q,
532 &sh_mobile_ceu_videobuf_ops, 737 &sh_mobile_ceu_videobuf_ops,
533 &ici->dev, &pcdev->lock, 738 &ici->dev, &pcdev->lock,
534 V4L2_BUF_TYPE_VIDEO_CAPTURE, 739 V4L2_BUF_TYPE_VIDEO_CAPTURE,
535 V4L2_FIELD_NONE, 740 V4L2_FIELD_ANY,
536 sizeof(struct sh_mobile_ceu_buffer), 741 sizeof(struct sh_mobile_ceu_buffer),
537 icd); 742 icd);
538} 743}
@@ -541,12 +746,12 @@ static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
541 .owner = THIS_MODULE, 746 .owner = THIS_MODULE,
542 .add = sh_mobile_ceu_add_device, 747 .add = sh_mobile_ceu_add_device,
543 .remove = sh_mobile_ceu_remove_device, 748 .remove = sh_mobile_ceu_remove_device,
544 .set_fmt_cap = sh_mobile_ceu_set_fmt_cap, 749 .get_formats = sh_mobile_ceu_get_formats,
545 .try_fmt_cap = sh_mobile_ceu_try_fmt_cap, 750 .set_fmt = sh_mobile_ceu_set_fmt,
751 .try_fmt = sh_mobile_ceu_try_fmt,
546 .reqbufs = sh_mobile_ceu_reqbufs, 752 .reqbufs = sh_mobile_ceu_reqbufs,
547 .poll = sh_mobile_ceu_poll, 753 .poll = sh_mobile_ceu_poll,
548 .querycap = sh_mobile_ceu_querycap, 754 .querycap = sh_mobile_ceu_querycap,
549 .try_bus_param = sh_mobile_ceu_try_bus_param,
550 .set_bus_param = sh_mobile_ceu_set_bus_param, 755 .set_bus_param = sh_mobile_ceu_set_bus_param,
551 .init_videobuf = sh_mobile_ceu_init_videobuf, 756 .init_videobuf = sh_mobile_ceu_init_videobuf,
552}; 757};
@@ -616,7 +821,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
616 821
617 /* request irq */ 822 /* request irq */
618 err = request_irq(pcdev->irq, sh_mobile_ceu_irq, IRQF_DISABLED, 823 err = request_irq(pcdev->irq, sh_mobile_ceu_irq, IRQF_DISABLED,
619 pdev->dev.bus_id, pcdev); 824 dev_name(&pdev->dev), pcdev);
620 if (err) { 825 if (err) {
621 dev_err(&pdev->dev, "Unable to register CEU interrupt.\n"); 826 dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
622 goto exit_release_mem; 827 goto exit_release_mem;
@@ -633,8 +838,8 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
633 pcdev->ici.priv = pcdev; 838 pcdev->ici.priv = pcdev;
634 pcdev->ici.dev.parent = &pdev->dev; 839 pcdev->ici.dev.parent = &pdev->dev;
635 pcdev->ici.nr = pdev->id; 840 pcdev->ici.nr = pdev->id;
636 pcdev->ici.drv_name = pdev->dev.bus_id, 841 pcdev->ici.drv_name = dev_name(&pdev->dev);
637 pcdev->ici.ops = &sh_mobile_ceu_host_ops, 842 pcdev->ici.ops = &sh_mobile_ceu_host_ops;
638 843
639 err = soc_camera_host_register(&pcdev->ici); 844 err = soc_camera_host_register(&pcdev->ici);
640 if (err) 845 if (err)
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index fcd2b62f92c4..01a8efb8deb1 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -2162,7 +2162,7 @@ sn9c102_vidioc_querycap(struct sn9c102_device* cam, void __user * arg)
2162 2162
2163 strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card)); 2163 strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card));
2164 if (usb_make_path(cam->usbdev, cap.bus_info, sizeof(cap.bus_info)) < 0) 2164 if (usb_make_path(cam->usbdev, cap.bus_info, sizeof(cap.bus_info)) < 0)
2165 strlcpy(cap.bus_info, cam->usbdev->dev.bus_id, 2165 strlcpy(cap.bus_info, dev_name(&cam->usbdev->dev),
2166 sizeof(cap.bus_info)); 2166 sizeof(cap.bus_info));
2167 2167
2168 if (copy_to_user(arg, &cap, sizeof(cap))) 2168 if (copy_to_user(arg, &cap, sizeof(cap)))
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index e23734f6d6e2..8cb3457e778d 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -55,7 +55,9 @@ static const struct usb_device_id sn9c102_id_table[] = {
55 { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), }, 55 { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
56 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), }, 56 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
57 { SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), }, 57 { SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), },
58#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
58 { SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), }, 59 { SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), },
60#endif
59/* { SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */ 61/* { SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */
60 { SN9C102_USB_DEVICE(0x0c45, 0x602e, BRIDGE_SN9C102), }, 62 { SN9C102_USB_DEVICE(0x0c45, 0x602e, BRIDGE_SN9C102), },
61 { SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), }, 63 { SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), },
@@ -91,10 +93,14 @@ static const struct usb_device_id sn9c102_id_table[] = {
91 { SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), }, 93 { SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), },
92 { SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), }, 94 { SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), },
93 /* SN9C105 */ 95 /* SN9C105 */
96#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
94 { SN9C102_USB_DEVICE(0x045e, 0x00f5, BRIDGE_SN9C105), }, 97 { SN9C102_USB_DEVICE(0x045e, 0x00f5, BRIDGE_SN9C105), },
95 { SN9C102_USB_DEVICE(0x045e, 0x00f7, BRIDGE_SN9C105), }, 98 { SN9C102_USB_DEVICE(0x045e, 0x00f7, BRIDGE_SN9C105), },
99#endif
96 { SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), }, 100 { SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), },
101#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
97 { SN9C102_USB_DEVICE(0x0471, 0x0328, BRIDGE_SN9C105), }, 102 { SN9C102_USB_DEVICE(0x0471, 0x0328, BRIDGE_SN9C105), },
103#endif
98 { SN9C102_USB_DEVICE(0x0c45, 0x60c0, BRIDGE_SN9C105), }, 104 { SN9C102_USB_DEVICE(0x0c45, 0x60c0, BRIDGE_SN9C105), },
99 { SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), }, 105 { SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), },
100 { SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), }, 106 { SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), },
@@ -113,7 +119,9 @@ static const struct usb_device_id sn9c102_id_table[] = {
113 { SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), }, 119 { SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
114 { SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), }, 120 { SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
115/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */ 121/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
122#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
116 { SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), }, 123 { SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
124#endif
117 { SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), }, 125 { SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), },
118 { SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), }, 126 { SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), },
119 { SN9C102_USB_DEVICE(0x0c45, 0x613e, BRIDGE_SN9C120), }, 127 { SN9C102_USB_DEVICE(0x0c45, 0x613e, BRIDGE_SN9C120), },
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 66ebe5956a87..90077cb4fe66 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -33,10 +33,9 @@
33static LIST_HEAD(hosts); 33static LIST_HEAD(hosts);
34static LIST_HEAD(devices); 34static LIST_HEAD(devices);
35static DEFINE_MUTEX(list_lock); 35static DEFINE_MUTEX(list_lock);
36static DEFINE_MUTEX(video_lock);
37 36
38const static struct soc_camera_data_format* 37const struct soc_camera_data_format *soc_camera_format_by_fourcc(
39format_by_fourcc(struct soc_camera_device *icd, unsigned int fourcc) 38 struct soc_camera_device *icd, unsigned int fourcc)
40{ 39{
41 unsigned int i; 40 unsigned int i;
42 41
@@ -45,67 +44,87 @@ format_by_fourcc(struct soc_camera_device *icd, unsigned int fourcc)
45 return icd->formats + i; 44 return icd->formats + i;
46 return NULL; 45 return NULL;
47} 46}
47EXPORT_SYMBOL(soc_camera_format_by_fourcc);
48 48
49static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, 49const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
50 struct v4l2_format *f) 50 struct soc_camera_device *icd, unsigned int fourcc)
51{ 51{
52 struct soc_camera_file *icf = file->private_data; 52 unsigned int i;
53 struct soc_camera_device *icd = icf->icd;
54 struct soc_camera_host *ici =
55 to_soc_camera_host(icd->dev.parent);
56 enum v4l2_field field;
57 const struct soc_camera_data_format *fmt;
58 int ret;
59 53
60 WARN_ON(priv != file->private_data); 54 for (i = 0; i < icd->num_user_formats; i++)
55 if (icd->user_formats[i].host_fmt->fourcc == fourcc)
56 return icd->user_formats + i;
57 return NULL;
58}
59EXPORT_SYMBOL(soc_camera_xlate_by_fourcc);
61 60
62 fmt = format_by_fourcc(icd, f->fmt.pix.pixelformat); 61/**
63 if (!fmt) { 62 * soc_camera_apply_sensor_flags() - apply platform SOCAM_SENSOR_INVERT_* flags
64 dev_dbg(&icd->dev, "invalid format 0x%08x\n", 63 * @icl: camera platform parameters
65 f->fmt.pix.pixelformat); 64 * @flags: flags to be inverted according to platform configuration
66 return -EINVAL; 65 * @return: resulting flags
67 } 66 */
67unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl,
68 unsigned long flags)
69{
70 unsigned long f;
68 71
69 dev_dbg(&icd->dev, "fmt: 0x%08x\n", fmt->fourcc); 72 /* If only one of the two polarities is supported, switch to the opposite */
73 if (icl->flags & SOCAM_SENSOR_INVERT_HSYNC) {
74 f = flags & (SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW);
75 if (f == SOCAM_HSYNC_ACTIVE_HIGH || f == SOCAM_HSYNC_ACTIVE_LOW)
76 flags ^= SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW;
77 }
70 78
71 field = f->fmt.pix.field; 79 if (icl->flags & SOCAM_SENSOR_INVERT_VSYNC) {
80 f = flags & (SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW);
81 if (f == SOCAM_VSYNC_ACTIVE_HIGH || f == SOCAM_VSYNC_ACTIVE_LOW)
82 flags ^= SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW;
83 }
72 84
73 if (field == V4L2_FIELD_ANY) { 85 if (icl->flags & SOCAM_SENSOR_INVERT_PCLK) {
74 field = V4L2_FIELD_NONE; 86 f = flags & (SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING);
75 } else if (V4L2_FIELD_NONE != field) { 87 if (f == SOCAM_PCLK_SAMPLE_RISING || f == SOCAM_PCLK_SAMPLE_FALLING)
76 dev_err(&icd->dev, "Field type invalid.\n"); 88 flags ^= SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING;
77 return -EINVAL;
78 } 89 }
79 90
80 /* test physical bus parameters */ 91 return flags;
81 ret = ici->ops->try_bus_param(icd, f->fmt.pix.pixelformat); 92}
82 if (ret) 93EXPORT_SYMBOL(soc_camera_apply_sensor_flags);
83 return ret;
84 94
85 /* limit format to hardware capabilities */ 95static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
86 ret = ici->ops->try_fmt_cap(icd, f); 96 struct v4l2_format *f)
97{
98 struct soc_camera_file *icf = file->private_data;
99 struct soc_camera_device *icd = icf->icd;
100 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
87 101
88 /* calculate missing fields */ 102 WARN_ON(priv != file->private_data);
89 f->fmt.pix.field = field;
90 f->fmt.pix.bytesperline =
91 (f->fmt.pix.width * fmt->depth) >> 3;
92 f->fmt.pix.sizeimage =
93 f->fmt.pix.height * f->fmt.pix.bytesperline;
94 103
95 return ret; 104 /* limit format to hardware capabilities */
105 return ici->ops->try_fmt(icd, f);
96} 106}
97 107
98static int soc_camera_enum_input(struct file *file, void *priv, 108static int soc_camera_enum_input(struct file *file, void *priv,
99 struct v4l2_input *inp) 109 struct v4l2_input *inp)
100{ 110{
111 struct soc_camera_file *icf = file->private_data;
112 struct soc_camera_device *icd = icf->icd;
113 int ret = 0;
114
101 if (inp->index != 0) 115 if (inp->index != 0)
102 return -EINVAL; 116 return -EINVAL;
103 117
104 inp->type = V4L2_INPUT_TYPE_CAMERA; 118 if (icd->ops->enum_input)
105 inp->std = V4L2_STD_UNKNOWN; 119 ret = icd->ops->enum_input(icd, inp);
106 strcpy(inp->name, "Camera"); 120 else {
121 /* default is camera */
122 inp->type = V4L2_INPUT_TYPE_CAMERA;
123 inp->std = V4L2_STD_UNKNOWN;
124 strcpy(inp->name, "Camera");
125 }
107 126
108 return 0; 127 return ret;
109} 128}
110 129
111static int soc_camera_g_input(struct file *file, void *priv, unsigned int *i) 130static int soc_camera_g_input(struct file *file, void *priv, unsigned int *i)
@@ -125,7 +144,14 @@ static int soc_camera_s_input(struct file *file, void *priv, unsigned int i)
125 144
126static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id *a) 145static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id *a)
127{ 146{
128 return 0; 147 struct soc_camera_file *icf = file->private_data;
148 struct soc_camera_device *icd = icf->icd;
149 int ret = 0;
150
151 if (icd->ops->set_std)
152 ret = icd->ops->set_std(icd, a);
153
154 return ret;
129} 155}
130 156
131static int soc_camera_reqbufs(struct file *file, void *priv, 157static int soc_camera_reqbufs(struct file *file, void *priv,
@@ -134,8 +160,7 @@ static int soc_camera_reqbufs(struct file *file, void *priv,
134 int ret; 160 int ret;
135 struct soc_camera_file *icf = file->private_data; 161 struct soc_camera_file *icf = file->private_data;
136 struct soc_camera_device *icd = icf->icd; 162 struct soc_camera_device *icd = icf->icd;
137 struct soc_camera_host *ici = 163 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
138 to_soc_camera_host(icd->dev.parent);
139 164
140 WARN_ON(priv != file->private_data); 165 WARN_ON(priv != file->private_data);
141 166
@@ -178,6 +203,59 @@ static int soc_camera_dqbuf(struct file *file, void *priv,
178 return videobuf_dqbuf(&icf->vb_vidq, p, file->f_flags & O_NONBLOCK); 203 return videobuf_dqbuf(&icf->vb_vidq, p, file->f_flags & O_NONBLOCK);
179} 204}
180 205
206static int soc_camera_init_user_formats(struct soc_camera_device *icd)
207{
208 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
209 int i, fmts = 0;
210
211 if (!ici->ops->get_formats)
212 /*
213 * Fallback mode - the host will have to serve all
214 * sensor-provided formats one-to-one to the user
215 */
216 fmts = icd->num_formats;
217 else
218 /*
219 * First pass - only count formats this host-sensor
220 * configuration can provide
221 */
222 for (i = 0; i < icd->num_formats; i++)
223 fmts += ici->ops->get_formats(icd, i, NULL);
224
225 if (!fmts)
226 return -ENXIO;
227
228 icd->user_formats =
229 vmalloc(fmts * sizeof(struct soc_camera_format_xlate));
230 if (!icd->user_formats)
231 return -ENOMEM;
232
233 icd->num_user_formats = fmts;
234 fmts = 0;
235
236 dev_dbg(&icd->dev, "Found %d supported formats.\n", fmts);
237
238 /* Second pass - actually fill data formats */
239 for (i = 0; i < icd->num_formats; i++)
240 if (!ici->ops->get_formats) {
241 icd->user_formats[i].host_fmt = icd->formats + i;
242 icd->user_formats[i].cam_fmt = icd->formats + i;
243 icd->user_formats[i].buswidth = icd->formats[i].depth;
244 } else {
245 fmts += ici->ops->get_formats(icd, i,
246 &icd->user_formats[fmts]);
247 }
248
249 icd->current_fmt = icd->user_formats[0].host_fmt;
250
251 return 0;
252}
253
254static void soc_camera_free_user_formats(struct soc_camera_device *icd)
255{
256 vfree(icd->user_formats);
257}
258
181static int soc_camera_open(struct inode *inode, struct file *file) 259static int soc_camera_open(struct inode *inode, struct file *file)
182{ 260{
183 struct video_device *vdev; 261 struct video_device *vdev;
@@ -190,8 +268,10 @@ static int soc_camera_open(struct inode *inode, struct file *file)
190 if (!icf) 268 if (!icf)
191 return -ENOMEM; 269 return -ENOMEM;
192 270
193 /* Protect against icd->remove() until we module_get() both drivers. */ 271 /*
194 mutex_lock(&video_lock); 272 * It is safe to dereference these pointers now as long as a user has
273 * the video device open - we are protected by the held cdev reference.
274 */
195 275
196 vdev = video_devdata(file); 276 vdev = video_devdata(file);
197 icd = container_of(vdev->parent, struct soc_camera_device, dev); 277 icd = container_of(vdev->parent, struct soc_camera_device, dev);
@@ -209,20 +289,25 @@ static int soc_camera_open(struct inode *inode, struct file *file)
209 goto emgi; 289 goto emgi;
210 } 290 }
211 291
292 /* Protect against icd->remove() until we module_get() both drivers. */
293 mutex_lock(&icd->video_lock);
294
212 icf->icd = icd; 295 icf->icd = icd;
213 icd->use_count++; 296 icd->use_count++;
214 297
215 /* Now we really have to activate the camera */ 298 /* Now we really have to activate the camera */
216 if (icd->use_count == 1) { 299 if (icd->use_count == 1) {
300 ret = soc_camera_init_user_formats(icd);
301 if (ret < 0)
302 goto eiufmt;
217 ret = ici->ops->add(icd); 303 ret = ici->ops->add(icd);
218 if (ret < 0) { 304 if (ret < 0) {
219 dev_err(&icd->dev, "Couldn't activate the camera: %d\n", ret); 305 dev_err(&icd->dev, "Couldn't activate the camera: %d\n", ret);
220 icd->use_count--;
221 goto eiciadd; 306 goto eiciadd;
222 } 307 }
223 } 308 }
224 309
225 mutex_unlock(&video_lock); 310 mutex_unlock(&icd->video_lock);
226 311
227 file->private_data = icf; 312 file->private_data = icf;
228 dev_dbg(&icd->dev, "camera device open\n"); 313 dev_dbg(&icd->dev, "camera device open\n");
@@ -231,13 +316,16 @@ static int soc_camera_open(struct inode *inode, struct file *file)
231 316
232 return 0; 317 return 0;
233 318
234 /* All errors are entered with the video_lock held */ 319 /* First two errors are entered with the .video_lock held */
235eiciadd: 320eiciadd:
321 soc_camera_free_user_formats(icd);
322eiufmt:
323 icd->use_count--;
324 mutex_unlock(&icd->video_lock);
236 module_put(ici->ops->owner); 325 module_put(ici->ops->owner);
237emgi: 326emgi:
238 module_put(icd->ops->owner); 327 module_put(icd->ops->owner);
239emgd: 328emgd:
240 mutex_unlock(&video_lock);
241 vfree(icf); 329 vfree(icf);
242 return ret; 330 return ret;
243} 331}
@@ -249,13 +337,16 @@ static int soc_camera_close(struct inode *inode, struct file *file)
249 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 337 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
250 struct video_device *vdev = icd->vdev; 338 struct video_device *vdev = icd->vdev;
251 339
252 mutex_lock(&video_lock); 340 mutex_lock(&icd->video_lock);
253 icd->use_count--; 341 icd->use_count--;
254 if (!icd->use_count) 342 if (!icd->use_count) {
255 ici->ops->remove(icd); 343 ici->ops->remove(icd);
344 soc_camera_free_user_formats(icd);
345 }
346 mutex_unlock(&icd->video_lock);
347
256 module_put(icd->ops->owner); 348 module_put(icd->ops->owner);
257 module_put(ici->ops->owner); 349 module_put(ici->ops->owner);
258 mutex_unlock(&video_lock);
259 350
260 vfree(icf); 351 vfree(icf);
261 352
@@ -265,7 +356,7 @@ static int soc_camera_close(struct inode *inode, struct file *file)
265} 356}
266 357
267static ssize_t soc_camera_read(struct file *file, char __user *buf, 358static ssize_t soc_camera_read(struct file *file, char __user *buf,
268 size_t count, loff_t *ppos) 359 size_t count, loff_t *ppos)
269{ 360{
270 struct soc_camera_file *icf = file->private_data; 361 struct soc_camera_file *icf = file->private_data;
271 struct soc_camera_device *icd = icf->icd; 362 struct soc_camera_device *icd = icf->icd;
@@ -299,8 +390,7 @@ static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
299{ 390{
300 struct soc_camera_file *icf = file->private_data; 391 struct soc_camera_file *icf = file->private_data;
301 struct soc_camera_device *icd = icf->icd; 392 struct soc_camera_device *icd = icf->icd;
302 struct soc_camera_host *ici = 393 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
303 to_soc_camera_host(icd->dev.parent);
304 394
305 if (list_empty(&icf->vb_vidq.stream)) { 395 if (list_empty(&icf->vb_vidq.stream)) {
306 dev_err(&icd->dev, "Trying to poll with no queued buffers!\n"); 396 dev_err(&icd->dev, "Trying to poll with no queued buffers!\n");
@@ -310,7 +400,6 @@ static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
310 return ici->ops->poll(file, pt); 400 return ici->ops->poll(file, pt);
311} 401}
312 402
313
314static struct file_operations soc_camera_fops = { 403static struct file_operations soc_camera_fops = {
315 .owner = THIS_MODULE, 404 .owner = THIS_MODULE,
316 .open = soc_camera_open, 405 .open = soc_camera_open,
@@ -322,44 +411,50 @@ static struct file_operations soc_camera_fops = {
322 .llseek = no_llseek, 411 .llseek = no_llseek,
323}; 412};
324 413
325
326static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv, 414static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv,
327 struct v4l2_format *f) 415 struct v4l2_format *f)
328{ 416{
329 struct soc_camera_file *icf = file->private_data; 417 struct soc_camera_file *icf = file->private_data;
330 struct soc_camera_device *icd = icf->icd; 418 struct soc_camera_device *icd = icf->icd;
331 struct soc_camera_host *ici = 419 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
332 to_soc_camera_host(icd->dev.parent); 420 struct v4l2_pix_format *pix = &f->fmt.pix;
421 __u32 pixfmt = pix->pixelformat;
333 int ret; 422 int ret;
334 struct v4l2_rect rect; 423 struct v4l2_rect rect;
335 const static struct soc_camera_data_format *data_fmt;
336 424
337 WARN_ON(priv != file->private_data); 425 WARN_ON(priv != file->private_data);
338 426
339 data_fmt = format_by_fourcc(icd, f->fmt.pix.pixelformat); 427 ret = soc_camera_try_fmt_vid_cap(file, priv, f);
340 if (!data_fmt)
341 return -EINVAL;
342
343 /* buswidth may be further adjusted by the ici */
344 icd->buswidth = data_fmt->depth;
345
346 ret = soc_camera_try_fmt_vid_cap(file, icf, f);
347 if (ret < 0) 428 if (ret < 0)
348 return ret; 429 return ret;
349 430
431 mutex_lock(&icf->vb_vidq.vb_lock);
432
433 if (videobuf_queue_is_busy(&icf->vb_vidq)) {
434 dev_err(&icd->dev, "S_FMT denied: queue busy\n");
435 ret = -EBUSY;
436 goto unlock;
437 }
438
350 rect.left = icd->x_current; 439 rect.left = icd->x_current;
351 rect.top = icd->y_current; 440 rect.top = icd->y_current;
352 rect.width = f->fmt.pix.width; 441 rect.width = pix->width;
353 rect.height = f->fmt.pix.height; 442 rect.height = pix->height;
354 ret = ici->ops->set_fmt_cap(icd, f->fmt.pix.pixelformat, &rect); 443 ret = ici->ops->set_fmt(icd, pix->pixelformat, &rect);
355 if (ret < 0) 444 if (ret < 0) {
356 return ret; 445 goto unlock;
446 } else if (!icd->current_fmt ||
447 icd->current_fmt->fourcc != pixfmt) {
448 dev_err(&ici->dev,
449 "Host driver hasn't set up current format correctly!\n");
450 ret = -EINVAL;
451 goto unlock;
452 }
357 453
358 icd->current_fmt = data_fmt;
359 icd->width = rect.width; 454 icd->width = rect.width;
360 icd->height = rect.height; 455 icd->height = rect.height;
361 icf->vb_vidq.field = f->fmt.pix.field; 456 icf->vb_vidq.field = pix->field;
362 if (V4L2_BUF_TYPE_VIDEO_CAPTURE != f->type) 457 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
363 dev_warn(&icd->dev, "Attention! Wrong buf-type %d\n", 458 dev_warn(&icd->dev, "Attention! Wrong buf-type %d\n",
364 f->type); 459 f->type);
365 460
@@ -367,11 +462,16 @@ static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv,
367 icd->width, icd->height); 462 icd->width, icd->height);
368 463
369 /* set physical bus parameters */ 464 /* set physical bus parameters */
370 return ici->ops->set_bus_param(icd, f->fmt.pix.pixelformat); 465 ret = ici->ops->set_bus_param(icd, pixfmt);
466
467unlock:
468 mutex_unlock(&icf->vb_vidq.vb_lock);
469
470 return ret;
371} 471}
372 472
373static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv, 473static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
374 struct v4l2_fmtdesc *f) 474 struct v4l2_fmtdesc *f)
375{ 475{
376 struct soc_camera_file *icf = file->private_data; 476 struct soc_camera_file *icf = file->private_data;
377 struct soc_camera_device *icd = icf->icd; 477 struct soc_camera_device *icd = icf->icd;
@@ -379,10 +479,10 @@ static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
379 479
380 WARN_ON(priv != file->private_data); 480 WARN_ON(priv != file->private_data);
381 481
382 if (f->index >= icd->num_formats) 482 if (f->index >= icd->num_user_formats)
383 return -EINVAL; 483 return -EINVAL;
384 484
385 format = &icd->formats[f->index]; 485 format = icd->user_formats[f->index].host_fmt;
386 486
387 strlcpy(f->description, format->name, sizeof(f->description)); 487 strlcpy(f->description, format->name, sizeof(f->description));
388 f->pixelformat = format->fourcc; 488 f->pixelformat = format->fourcc;
@@ -390,21 +490,21 @@ static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
390} 490}
391 491
392static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv, 492static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
393 struct v4l2_format *f) 493 struct v4l2_format *f)
394{ 494{
395 struct soc_camera_file *icf = file->private_data; 495 struct soc_camera_file *icf = file->private_data;
396 struct soc_camera_device *icd = icf->icd; 496 struct soc_camera_device *icd = icf->icd;
497 struct v4l2_pix_format *pix = &f->fmt.pix;
397 498
398 WARN_ON(priv != file->private_data); 499 WARN_ON(priv != file->private_data);
399 500
400 f->fmt.pix.width = icd->width; 501 pix->width = icd->width;
401 f->fmt.pix.height = icd->height; 502 pix->height = icd->height;
402 f->fmt.pix.field = icf->vb_vidq.field; 503 pix->field = icf->vb_vidq.field;
403 f->fmt.pix.pixelformat = icd->current_fmt->fourcc; 504 pix->pixelformat = icd->current_fmt->fourcc;
404 f->fmt.pix.bytesperline = 505 pix->bytesperline = pix->width *
405 (f->fmt.pix.width * icd->current_fmt->depth) >> 3; 506 DIV_ROUND_UP(icd->current_fmt->depth, 8);
406 f->fmt.pix.sizeimage = 507 pix->sizeimage = pix->height * pix->bytesperline;
407 f->fmt.pix.height * f->fmt.pix.bytesperline;
408 dev_dbg(&icd->dev, "current_fmt->fourcc: 0x%08x\n", 508 dev_dbg(&icd->dev, "current_fmt->fourcc: 0x%08x\n",
409 icd->current_fmt->fourcc); 509 icd->current_fmt->fourcc);
410 return 0; 510 return 0;
@@ -415,8 +515,7 @@ static int soc_camera_querycap(struct file *file, void *priv,
415{ 515{
416 struct soc_camera_file *icf = file->private_data; 516 struct soc_camera_file *icf = file->private_data;
417 struct soc_camera_device *icd = icf->icd; 517 struct soc_camera_device *icd = icf->icd;
418 struct soc_camera_host *ici = 518 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
419 to_soc_camera_host(icd->dev.parent);
420 519
421 WARN_ON(priv != file->private_data); 520 WARN_ON(priv != file->private_data);
422 521
@@ -429,6 +528,7 @@ static int soc_camera_streamon(struct file *file, void *priv,
429{ 528{
430 struct soc_camera_file *icf = file->private_data; 529 struct soc_camera_file *icf = file->private_data;
431 struct soc_camera_device *icd = icf->icd; 530 struct soc_camera_device *icd = icf->icd;
531 int ret;
432 532
433 WARN_ON(priv != file->private_data); 533 WARN_ON(priv != file->private_data);
434 534
@@ -437,10 +537,16 @@ static int soc_camera_streamon(struct file *file, void *priv,
437 if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) 537 if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
438 return -EINVAL; 538 return -EINVAL;
439 539
540 mutex_lock(&icd->video_lock);
541
440 icd->ops->start_capture(icd); 542 icd->ops->start_capture(icd);
441 543
442 /* This calls buf_queue from host driver's videobuf_queue_ops */ 544 /* This calls buf_queue from host driver's videobuf_queue_ops */
443 return videobuf_streamon(&icf->vb_vidq); 545 ret = videobuf_streamon(&icf->vb_vidq);
546
547 mutex_unlock(&icd->video_lock);
548
549 return ret;
444} 550}
445 551
446static int soc_camera_streamoff(struct file *file, void *priv, 552static int soc_camera_streamoff(struct file *file, void *priv,
@@ -456,12 +562,16 @@ static int soc_camera_streamoff(struct file *file, void *priv,
456 if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) 562 if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
457 return -EINVAL; 563 return -EINVAL;
458 564
565 mutex_lock(&icd->video_lock);
566
459 /* This calls buf_release from host driver's videobuf_queue_ops for all 567 /* This calls buf_release from host driver's videobuf_queue_ops for all
460 * remaining buffers. When the last buffer is freed, stop capture */ 568 * remaining buffers. When the last buffer is freed, stop capture */
461 videobuf_streamoff(&icf->vb_vidq); 569 videobuf_streamoff(&icf->vb_vidq);
462 570
463 icd->ops->stop_capture(icd); 571 icd->ops->stop_capture(icd);
464 572
573 mutex_unlock(&icd->video_lock);
574
465 return 0; 575 return 0;
466} 576}
467 577
@@ -567,14 +677,16 @@ static int soc_camera_s_crop(struct file *file, void *fh,
567{ 677{
568 struct soc_camera_file *icf = file->private_data; 678 struct soc_camera_file *icf = file->private_data;
569 struct soc_camera_device *icd = icf->icd; 679 struct soc_camera_device *icd = icf->icd;
570 struct soc_camera_host *ici = 680 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
571 to_soc_camera_host(icd->dev.parent);
572 int ret; 681 int ret;
573 682
574 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 683 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
575 return -EINVAL; 684 return -EINVAL;
576 685
577 ret = ici->ops->set_fmt_cap(icd, 0, &a->c); 686 /* Cropping is allowed during a running capture, guard consistency */
687 mutex_lock(&icf->vb_vidq.vb_lock);
688
689 ret = ici->ops->set_fmt(icd, 0, &a->c);
578 if (!ret) { 690 if (!ret) {
579 icd->width = a->c.width; 691 icd->width = a->c.width;
580 icd->height = a->c.height; 692 icd->height = a->c.height;
@@ -582,6 +694,8 @@ static int soc_camera_s_crop(struct file *file, void *fh,
582 icd->y_current = a->c.top; 694 icd->y_current = a->c.top;
583 } 695 }
584 696
697 mutex_unlock(&icf->vb_vidq.vb_lock);
698
585 return ret; 699 return ret;
586} 700}
587 701
@@ -692,18 +806,35 @@ static int scan_add_device(struct soc_camera_device *icd)
692static int soc_camera_probe(struct device *dev) 806static int soc_camera_probe(struct device *dev)
693{ 807{
694 struct soc_camera_device *icd = to_soc_camera_dev(dev); 808 struct soc_camera_device *icd = to_soc_camera_dev(dev);
695 struct soc_camera_host *ici = 809 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
696 to_soc_camera_host(icd->dev.parent);
697 int ret; 810 int ret;
698 811
699 if (!icd->ops->probe) 812 /*
700 return -ENODEV; 813 * Possible race scenario:
814 * modprobe <camera-host-driver> triggers __func__
815 * at this moment respective <camera-sensor-driver> gets rmmod'ed
816 * to protect take module references.
817 */
818
819 if (!try_module_get(icd->ops->owner)) {
820 dev_err(&icd->dev, "Couldn't lock sensor driver.\n");
821 ret = -EINVAL;
822 goto emgd;
823 }
824
825 if (!try_module_get(ici->ops->owner)) {
826 dev_err(&icd->dev, "Couldn't lock capture bus driver.\n");
827 ret = -EINVAL;
828 goto emgi;
829 }
830
831 mutex_lock(&icd->video_lock);
701 832
702 /* We only call ->add() here to activate and probe the camera. 833 /* We only call ->add() here to activate and probe the camera.
703 * We shall ->remove() and deactivate it immediately afterwards. */ 834 * We shall ->remove() and deactivate it immediately afterwards. */
704 ret = ici->ops->add(icd); 835 ret = ici->ops->add(icd);
705 if (ret < 0) 836 if (ret < 0)
706 return ret; 837 goto eiadd;
707 838
708 ret = icd->ops->probe(icd); 839 ret = icd->ops->probe(icd);
709 if (ret >= 0) { 840 if (ret >= 0) {
@@ -717,6 +848,12 @@ static int soc_camera_probe(struct device *dev)
717 } 848 }
718 ici->ops->remove(icd); 849 ici->ops->remove(icd);
719 850
851eiadd:
852 mutex_unlock(&icd->video_lock);
853 module_put(ici->ops->owner);
854emgi:
855 module_put(icd->ops->owner);
856emgd:
720 return ret; 857 return ret;
721} 858}
722 859
@@ -779,11 +916,20 @@ int soc_camera_host_register(struct soc_camera_host *ici)
779 int ret; 916 int ret;
780 struct soc_camera_host *ix; 917 struct soc_camera_host *ix;
781 918
782 if (!ici->ops->init_videobuf || !ici->ops->add || !ici->ops->remove) 919 if (!ici || !ici->ops ||
920 !ici->ops->try_fmt ||
921 !ici->ops->set_fmt ||
922 !ici->ops->set_bus_param ||
923 !ici->ops->querycap ||
924 !ici->ops->init_videobuf ||
925 !ici->ops->reqbufs ||
926 !ici->ops->add ||
927 !ici->ops->remove ||
928 !ici->ops->poll)
783 return -EINVAL; 929 return -EINVAL;
784 930
785 /* Number might be equal to the platform device ID */ 931 /* Number might be equal to the platform device ID */
786 sprintf(ici->dev.bus_id, "camera_host%d", ici->nr); 932 dev_set_name(&ici->dev, "camera_host%d", ici->nr);
787 933
788 mutex_lock(&list_lock); 934 mutex_lock(&list_lock);
789 list_for_each_entry(ix, &hosts, list) { 935 list_for_each_entry(ix, &hosts, list) {
@@ -847,7 +993,16 @@ int soc_camera_device_register(struct soc_camera_device *icd)
847 struct soc_camera_device *ix; 993 struct soc_camera_device *ix;
848 int num = -1, i; 994 int num = -1, i;
849 995
850 if (!icd) 996 if (!icd || !icd->ops ||
997 !icd->ops->probe ||
998 !icd->ops->init ||
999 !icd->ops->release ||
1000 !icd->ops->start_capture ||
1001 !icd->ops->stop_capture ||
1002 !icd->ops->set_fmt ||
1003 !icd->ops->try_fmt ||
1004 !icd->ops->query_bus_param ||
1005 !icd->ops->set_bus_param)
851 return -EINVAL; 1006 return -EINVAL;
852 1007
853 for (i = 0; i < 256 && num < 0; i++) { 1008 for (i = 0; i < 256 && num < 0; i++) {
@@ -867,10 +1022,12 @@ int soc_camera_device_register(struct soc_camera_device *icd)
867 1022
868 icd->devnum = num; 1023 icd->devnum = num;
869 icd->dev.bus = &soc_camera_bus_type; 1024 icd->dev.bus = &soc_camera_bus_type;
870 snprintf(icd->dev.bus_id, sizeof(icd->dev.bus_id), 1025 dev_set_name(&icd->dev, "%u-%u", icd->iface, icd->devnum);
871 "%u-%u", icd->iface, icd->devnum);
872 1026
873 icd->dev.release = dummy_release; 1027 icd->dev.release = dummy_release;
1028 icd->use_count = 0;
1029 icd->host_priv = NULL;
1030 mutex_init(&icd->video_lock);
874 1031
875 return scan_add_device(icd); 1032 return scan_add_device(icd);
876} 1033}
@@ -917,6 +1074,10 @@ static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
917#endif 1074#endif
918}; 1075};
919 1076
1077/*
1078 * Usually called from the struct soc_camera_ops .probe() method, i.e., from
1079 * soc_camera_probe() above with .video_lock held
1080 */
920int soc_camera_video_start(struct soc_camera_device *icd) 1081int soc_camera_video_start(struct soc_camera_device *icd)
921{ 1082{
922 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1083 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
@@ -932,7 +1093,7 @@ int soc_camera_video_start(struct soc_camera_device *icd)
932 dev_dbg(&ici->dev, "Allocated video_device %p\n", vdev); 1093 dev_dbg(&ici->dev, "Allocated video_device %p\n", vdev);
933 1094
934 strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name)); 1095 strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name));
935 /* Maybe better &ici->dev */ 1096
936 vdev->parent = &icd->dev; 1097 vdev->parent = &icd->dev;
937 vdev->current_norm = V4L2_STD_UNKNOWN; 1098 vdev->current_norm = V4L2_STD_UNKNOWN;
938 vdev->fops = &soc_camera_fops; 1099 vdev->fops = &soc_camera_fops;
@@ -941,8 +1102,6 @@ int soc_camera_video_start(struct soc_camera_device *icd)
941 vdev->minor = -1; 1102 vdev->minor = -1;
942 vdev->tvnorms = V4L2_STD_UNKNOWN, 1103 vdev->tvnorms = V4L2_STD_UNKNOWN,
943 1104
944 icd->current_fmt = &icd->formats[0];
945
946 err = video_register_device(vdev, VFL_TYPE_GRABBER, vdev->minor); 1105 err = video_register_device(vdev, VFL_TYPE_GRABBER, vdev->minor);
947 if (err < 0) { 1106 if (err < 0) {
948 dev_err(vdev->parent, "video_register_device failed\n"); 1107 dev_err(vdev->parent, "video_register_device failed\n");
@@ -968,10 +1127,10 @@ void soc_camera_video_stop(struct soc_camera_device *icd)
968 if (!icd->dev.parent || !vdev) 1127 if (!icd->dev.parent || !vdev)
969 return; 1128 return;
970 1129
971 mutex_lock(&video_lock); 1130 mutex_lock(&icd->video_lock);
972 video_unregister_device(vdev); 1131 video_unregister_device(vdev);
973 icd->vdev = NULL; 1132 icd->vdev = NULL;
974 mutex_unlock(&video_lock); 1133 mutex_unlock(&icd->video_lock);
975} 1134}
976EXPORT_SYMBOL(soc_camera_video_stop); 1135EXPORT_SYMBOL(soc_camera_video_stop);
977 1136
diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c
index bb7a9d480e8f..013ab06e3180 100644
--- a/drivers/media/video/soc_camera_platform.c
+++ b/drivers/media/video/soc_camera_platform.c
@@ -79,19 +79,20 @@ soc_camera_platform_query_bus_param(struct soc_camera_device *icd)
79 return p->bus_param; 79 return p->bus_param;
80} 80}
81 81
82static int soc_camera_platform_set_fmt_cap(struct soc_camera_device *icd, 82static int soc_camera_platform_set_fmt(struct soc_camera_device *icd,
83 __u32 pixfmt, struct v4l2_rect *rect) 83 __u32 pixfmt, struct v4l2_rect *rect)
84{ 84{
85 return 0; 85 return 0;
86} 86}
87 87
88static int soc_camera_platform_try_fmt_cap(struct soc_camera_device *icd, 88static int soc_camera_platform_try_fmt(struct soc_camera_device *icd,
89 struct v4l2_format *f) 89 struct v4l2_format *f)
90{ 90{
91 struct soc_camera_platform_info *p = soc_camera_platform_get_info(icd); 91 struct soc_camera_platform_info *p = soc_camera_platform_get_info(icd);
92 struct v4l2_pix_format *pix = &f->fmt.pix;
92 93
93 f->fmt.pix.width = p->format.width; 94 pix->width = p->format.width;
94 f->fmt.pix.height = p->format.height; 95 pix->height = p->format.height;
95 return 0; 96 return 0;
96} 97}
97 98
@@ -124,8 +125,8 @@ static struct soc_camera_ops soc_camera_platform_ops = {
124 .release = soc_camera_platform_release, 125 .release = soc_camera_platform_release,
125 .start_capture = soc_camera_platform_start_capture, 126 .start_capture = soc_camera_platform_start_capture,
126 .stop_capture = soc_camera_platform_stop_capture, 127 .stop_capture = soc_camera_platform_stop_capture,
127 .set_fmt_cap = soc_camera_platform_set_fmt_cap, 128 .set_fmt = soc_camera_platform_set_fmt,
128 .try_fmt_cap = soc_camera_platform_try_fmt_cap, 129 .try_fmt = soc_camera_platform_try_fmt,
129 .set_bus_param = soc_camera_platform_set_bus_param, 130 .set_bus_param = soc_camera_platform_set_bus_param,
130 .query_bus_param = soc_camera_platform_query_bus_param, 131 .query_bus_param = soc_camera_platform_query_bus_param,
131}; 132};
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index e9eb6d754d5c..f9516d0f3c11 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -1262,6 +1262,25 @@ static int stk_vidioc_g_parm(struct file *filp,
1262 return 0; 1262 return 0;
1263} 1263}
1264 1264
1265static int stk_vidioc_enum_framesizes(struct file *filp,
1266 void *priv, struct v4l2_frmsizeenum *frms)
1267{
1268 if (frms->index >= ARRAY_SIZE(stk_sizes))
1269 return -EINVAL;
1270 switch (frms->pixel_format) {
1271 case V4L2_PIX_FMT_RGB565:
1272 case V4L2_PIX_FMT_RGB565X:
1273 case V4L2_PIX_FMT_UYVY:
1274 case V4L2_PIX_FMT_YUYV:
1275 case V4L2_PIX_FMT_SBGGR8:
1276 frms->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1277 frms->discrete.width = stk_sizes[frms->index].w;
1278 frms->discrete.height = stk_sizes[frms->index].h;
1279 return 0;
1280 default: return -EINVAL;
1281 }
1282}
1283
1265static struct file_operations v4l_stk_fops = { 1284static struct file_operations v4l_stk_fops = {
1266 .owner = THIS_MODULE, 1285 .owner = THIS_MODULE,
1267 .open = v4l_stk_open, 1286 .open = v4l_stk_open,
@@ -1296,6 +1315,7 @@ static const struct v4l2_ioctl_ops v4l_stk_ioctl_ops = {
1296 .vidioc_g_ctrl = stk_vidioc_g_ctrl, 1315 .vidioc_g_ctrl = stk_vidioc_g_ctrl,
1297 .vidioc_s_ctrl = stk_vidioc_s_ctrl, 1316 .vidioc_s_ctrl = stk_vidioc_s_ctrl,
1298 .vidioc_g_parm = stk_vidioc_g_parm, 1317 .vidioc_g_parm = stk_vidioc_g_parm,
1318 .vidioc_enum_framesizes = stk_vidioc_enum_framesizes,
1299}; 1319};
1300 1320
1301static void stk_v4l_dev_release(struct video_device *vd) 1321static void stk_v4l_dev_release(struct video_device *vd)
@@ -1376,12 +1396,9 @@ static int stk_camera_probe(struct usb_interface *interface,
1376 endpoint = &iface_desc->endpoint[i].desc; 1396 endpoint = &iface_desc->endpoint[i].desc;
1377 1397
1378 if (!dev->isoc_ep 1398 if (!dev->isoc_ep
1379 && ((endpoint->bEndpointAddress 1399 && usb_endpoint_is_isoc_in(endpoint)) {
1380 & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
1381 && ((endpoint->bmAttributes
1382 & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_ISOC)) {
1383 /* we found an isoc in endpoint */ 1400 /* we found an isoc in endpoint */
1384 dev->isoc_ep = (endpoint->bEndpointAddress & 0xF); 1401 dev->isoc_ep = usb_endpoint_num(endpoint);
1385 break; 1402 break;
1386 } 1403 }
1387 } 1404 }
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 328c41b1517d..42acc92c182d 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -1132,8 +1132,7 @@ static int stv_close (struct inode *inode, struct file *file)
1132 return 0; 1132 return 0;
1133} 1133}
1134 1134
1135static int stv680_do_ioctl (struct inode *inode, struct file *file, 1135static int stv680_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1136 unsigned int cmd, void *arg)
1137{ 1136{
1138 struct video_device *vdev = file->private_data; 1137 struct video_device *vdev = file->private_data;
1139 struct usb_stv *stv680 = video_get_drvdata(vdev); 1138 struct usb_stv *stv680 = video_get_drvdata(vdev);
@@ -1303,7 +1302,7 @@ static int stv680_do_ioctl (struct inode *inode, struct file *file,
1303static int stv680_ioctl(struct inode *inode, struct file *file, 1302static int stv680_ioctl(struct inode *inode, struct file *file,
1304 unsigned int cmd, unsigned long arg) 1303 unsigned int cmd, unsigned long arg)
1305{ 1304{
1306 return video_usercopy(inode, file, cmd, arg, stv680_do_ioctl); 1305 return video_usercopy(file, cmd, arg, stv680_do_ioctl);
1307} 1306}
1308 1307
1309static int stv680_mmap (struct file *file, struct vm_area_struct *vma) 1308static int stv680_mmap (struct file *file, struct vm_area_struct *vma)
diff --git a/drivers/media/video/tda7432.c b/drivers/media/video/tda7432.c
index 4963d4264880..0c020585fffb 100644
--- a/drivers/media/video/tda7432.c
+++ b/drivers/media/video/tda7432.c
@@ -47,9 +47,10 @@
47#include <linux/videodev2.h> 47#include <linux/videodev2.h>
48#include <linux/i2c.h> 48#include <linux/i2c.h>
49 49
50#include <media/v4l2-common.h> 50#include <media/v4l2-device.h>
51#include <media/v4l2-ioctl.h> 51#include <media/v4l2-ioctl.h>
52#include <media/i2c-addr.h> 52#include <media/i2c-addr.h>
53#include <media/v4l2-i2c-drv-legacy.h>
53 54
54#ifndef VIDEO_AUDIO_BALANCE 55#ifndef VIDEO_AUDIO_BALANCE
55# define VIDEO_AUDIO_BALANCE 32 56# define VIDEO_AUDIO_BALANCE 32
@@ -79,6 +80,7 @@ I2C_CLIENT_INSMOD;
79/* Structure of address and subaddresses for the tda7432 */ 80/* Structure of address and subaddresses for the tda7432 */
80 81
81struct tda7432 { 82struct tda7432 {
83 struct v4l2_subdev sd;
82 int addr; 84 int addr;
83 int input; 85 int input;
84 int volume; 86 int volume;
@@ -86,10 +88,12 @@ struct tda7432 {
86 int bass, treble; 88 int bass, treble;
87 int lf, lr, rf, rr; 89 int lf, lr, rf, rr;
88 int loud; 90 int loud;
89 struct i2c_client c;
90}; 91};
91static struct i2c_driver driver; 92
92static struct i2c_client client_template; 93static inline struct tda7432 *to_state(struct v4l2_subdev *sd)
94{
95 return container_of(sd, struct tda7432, sd);
96}
93 97
94/* The TDA7432 is made by STS-Thompson 98/* The TDA7432 is made by STS-Thompson
95 * http://www.st.com 99 * http://www.st.com
@@ -224,32 +228,33 @@ static struct i2c_client client_template;
224 228
225/* Begin code */ 229/* Begin code */
226 230
227static int tda7432_write(struct i2c_client *client, int subaddr, int val) 231static int tda7432_write(struct v4l2_subdev *sd, int subaddr, int val)
228{ 232{
233 struct i2c_client *client = v4l2_get_subdevdata(sd);
229 unsigned char buffer[2]; 234 unsigned char buffer[2];
230 v4l_dbg(2, debug,client,"In tda7432_write\n"); 235
231 v4l_dbg(1, debug,client,"Writing %d 0x%x\n", subaddr, val); 236 v4l2_dbg(2, debug, sd, "In tda7432_write\n");
237 v4l2_dbg(1, debug, sd, "Writing %d 0x%x\n", subaddr, val);
232 buffer[0] = subaddr; 238 buffer[0] = subaddr;
233 buffer[1] = val; 239 buffer[1] = val;
234 if (2 != i2c_master_send(client,buffer,2)) { 240 if (2 != i2c_master_send(client, buffer, 2)) {
235 v4l_err(client,"I/O error, trying (write %d 0x%x)\n", 241 v4l2_err(sd, "I/O error, trying (write %d 0x%x)\n",
236 subaddr, val); 242 subaddr, val);
237 return -1; 243 return -1;
238 } 244 }
239 return 0; 245 return 0;
240} 246}
241 247
242/* I don't think we ever actually _read_ the chip... */ 248static int tda7432_set(struct v4l2_subdev *sd)
243
244static int tda7432_set(struct i2c_client *client)
245{ 249{
246 struct tda7432 *t = i2c_get_clientdata(client); 250 struct i2c_client *client = v4l2_get_subdevdata(sd);
251 struct tda7432 *t = to_state(sd);
247 unsigned char buf[16]; 252 unsigned char buf[16];
248 v4l_dbg(2, debug,client,"In tda7432_set\n");
249 253
250 v4l_dbg(1, debug,client, 254 v4l2_dbg(1, debug, sd,
251 "tda7432: 7432_set(0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x)\n", 255 "tda7432: 7432_set(0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x)\n",
252 t->input,t->volume,t->bass,t->treble,t->lf,t->lr,t->rf,t->rr,t->loud); 256 t->input, t->volume, t->bass, t->treble, t->lf, t->lr,
257 t->rf, t->rr, t->loud);
253 buf[0] = TDA7432_IN; 258 buf[0] = TDA7432_IN;
254 buf[1] = t->input; 259 buf[1] = t->input;
255 buf[2] = t->volume; 260 buf[2] = t->volume;
@@ -260,18 +265,19 @@ static int tda7432_set(struct i2c_client *client)
260 buf[7] = t->rf; 265 buf[7] = t->rf;
261 buf[8] = t->rr; 266 buf[8] = t->rr;
262 buf[9] = t->loud; 267 buf[9] = t->loud;
263 if (10 != i2c_master_send(client,buf,10)) { 268 if (10 != i2c_master_send(client, buf, 10)) {
264 v4l_err(client,"I/O error, trying tda7432_set\n"); 269 v4l2_err(sd, "I/O error, trying tda7432_set\n");
265 return -1; 270 return -1;
266 } 271 }
267 272
268 return 0; 273 return 0;
269} 274}
270 275
271static void do_tda7432_init(struct i2c_client *client) 276static void do_tda7432_init(struct v4l2_subdev *sd)
272{ 277{
273 struct tda7432 *t = i2c_get_clientdata(client); 278 struct tda7432 *t = to_state(sd);
274 v4l_dbg(2, debug,client,"In tda7432_init\n"); 279
280 v4l2_dbg(2, debug, sd, "In tda7432_init\n");
275 281
276 t->input = TDA7432_STEREO_IN | /* Main (stereo) input */ 282 t->input = TDA7432_STEREO_IN | /* Main (stereo) input */
277 TDA7432_BASS_SYM | /* Symmetric bass cut */ 283 TDA7432_BASS_SYM | /* Symmetric bass cut */
@@ -288,57 +294,12 @@ static void do_tda7432_init(struct i2c_client *client)
288 t->rr = TDA7432_ATTEN_0DB; /* 0dB attenuation */ 294 t->rr = TDA7432_ATTEN_0DB; /* 0dB attenuation */
289 t->loud = loudness; /* insmod parameter */ 295 t->loud = loudness; /* insmod parameter */
290 296
291 tda7432_set(client); 297 tda7432_set(sd);
292} 298}
293 299
294/* *********************** * 300static int tda7432_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
295 * i2c interface functions *
296 * *********************** */
297
298static int tda7432_attach(struct i2c_adapter *adap, int addr, int kind)
299{ 301{
300 struct tda7432 *t; 302 struct tda7432 *t = to_state(sd);
301 struct i2c_client *client;
302
303 t = kzalloc(sizeof *t,GFP_KERNEL);
304 if (!t)
305 return -ENOMEM;
306
307 client = &t->c;
308 memcpy(client,&client_template,sizeof(struct i2c_client));
309 client->adapter = adap;
310 client->addr = addr;
311 i2c_set_clientdata(client, t);
312
313 do_tda7432_init(client);
314 i2c_attach_client(client);
315
316 v4l_info(client, "chip found @ 0x%x (%s)\n", addr << 1, adap->name);
317 return 0;
318}
319
320static int tda7432_probe(struct i2c_adapter *adap)
321{
322 if (adap->class & I2C_CLASS_TV_ANALOG)
323 return i2c_probe(adap, &addr_data, tda7432_attach);
324 return 0;
325}
326
327static int tda7432_detach(struct i2c_client *client)
328{
329 struct tda7432 *t = i2c_get_clientdata(client);
330
331 do_tda7432_init(client);
332 i2c_detach_client(client);
333
334 kfree(t);
335 return 0;
336}
337
338static int tda7432_get_ctrl(struct i2c_client *client,
339 struct v4l2_control *ctrl)
340{
341 struct tda7432 *t = i2c_get_clientdata(client);
342 303
343 switch (ctrl->id) { 304 switch (ctrl->id) {
344 case V4L2_CID_AUDIO_MUTE: 305 case V4L2_CID_AUDIO_MUTE:
@@ -382,10 +343,9 @@ static int tda7432_get_ctrl(struct i2c_client *client,
382 return -EINVAL; 343 return -EINVAL;
383} 344}
384 345
385static int tda7432_set_ctrl(struct i2c_client *client, 346static int tda7432_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
386 struct v4l2_control *ctrl)
387{ 347{
388 struct tda7432 *t = i2c_get_clientdata(client); 348 struct tda7432 *t = to_state(sd);
389 349
390 switch (ctrl->id) { 350 switch (ctrl->id) {
391 case V4L2_CID_AUDIO_MUTE: 351 case V4L2_CID_AUDIO_MUTE:
@@ -400,7 +360,7 @@ static int tda7432_set_ctrl(struct i2c_client *client,
400 if (loudness) /* Turn on the loudness bit */ 360 if (loudness) /* Turn on the loudness bit */
401 t->volume |= TDA7432_LD_ON; 361 t->volume |= TDA7432_LD_ON;
402 362
403 tda7432_write(client,TDA7432_VL, t->volume); 363 tda7432_write(sd, TDA7432_VL, t->volume);
404 return 0; 364 return 0;
405 case V4L2_CID_AUDIO_BALANCE: 365 case V4L2_CID_AUDIO_BALANCE:
406 if (ctrl->value < 32768) { 366 if (ctrl->value < 32768) {
@@ -428,14 +388,14 @@ static int tda7432_set_ctrl(struct i2c_client *client,
428 if(t->bass>= 0x8) 388 if(t->bass>= 0x8)
429 t->bass = (~t->bass & 0xf) + 0x8 ; 389 t->bass = (~t->bass & 0xf) + 0x8 ;
430 390
431 tda7432_write(client,TDA7432_TN, 0x10 | (t->bass << 4) | t->treble ); 391 tda7432_write(sd, TDA7432_TN, 0x10 | (t->bass << 4) | t->treble);
432 return 0; 392 return 0;
433 case V4L2_CID_AUDIO_TREBLE: 393 case V4L2_CID_AUDIO_TREBLE:
434 t->treble= ctrl->value >> 12; 394 t->treble= ctrl->value >> 12;
435 if(t->treble>= 0x8) 395 if(t->treble>= 0x8)
436 t->treble = (~t->treble & 0xf) + 0x8 ; 396 t->treble = (~t->treble & 0xf) + 0x8 ;
437 397
438 tda7432_write(client,TDA7432_TN, 0x10 | (t->bass << 4) | t->treble ); 398 tda7432_write(sd, TDA7432_TN, 0x10 | (t->bass << 4) | t->treble);
439 return 0; 399 return 0;
440 default: 400 default:
441 return -EINVAL; 401 return -EINVAL;
@@ -445,92 +405,102 @@ static int tda7432_set_ctrl(struct i2c_client *client,
445 if (t->muted) 405 if (t->muted)
446 { 406 {
447 /* Mute & update balance*/ 407 /* Mute & update balance*/
448 tda7432_write(client,TDA7432_LF, t->lf | TDA7432_MUTE); 408 tda7432_write(sd, TDA7432_LF, t->lf | TDA7432_MUTE);
449 tda7432_write(client,TDA7432_LR, t->lr | TDA7432_MUTE); 409 tda7432_write(sd, TDA7432_LR, t->lr | TDA7432_MUTE);
450 tda7432_write(client,TDA7432_RF, t->rf | TDA7432_MUTE); 410 tda7432_write(sd, TDA7432_RF, t->rf | TDA7432_MUTE);
451 tda7432_write(client,TDA7432_RR, t->rr | TDA7432_MUTE); 411 tda7432_write(sd, TDA7432_RR, t->rr | TDA7432_MUTE);
452 } else { 412 } else {
453 tda7432_write(client,TDA7432_LF, t->lf); 413 tda7432_write(sd, TDA7432_LF, t->lf);
454 tda7432_write(client,TDA7432_LR, t->lr); 414 tda7432_write(sd, TDA7432_LR, t->lr);
455 tda7432_write(client,TDA7432_RF, t->rf); 415 tda7432_write(sd, TDA7432_RF, t->rf);
456 tda7432_write(client,TDA7432_RR, t->rr); 416 tda7432_write(sd, TDA7432_RR, t->rr);
457 } 417 }
458 return 0; 418 return 0;
459} 419}
460 420
461static int tda7432_command(struct i2c_client *client, 421static int tda7432_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
462 unsigned int cmd, void *arg)
463{ 422{
464 v4l_dbg(2, debug,client,"In tda7432_command\n"); 423 switch (qc->id) {
465 if (debug>1) 424 case V4L2_CID_AUDIO_MUTE:
466 v4l_i2c_print_ioctl(client,cmd); 425 case V4L2_CID_AUDIO_VOLUME:
467 426 case V4L2_CID_AUDIO_BALANCE:
468 switch (cmd) { 427 case V4L2_CID_AUDIO_BASS:
469 /* --- v4l ioctls --- */ 428 case V4L2_CID_AUDIO_TREBLE:
470 /* take care: bttv does userspace copying, we'll get a
471 kernel pointer here... */
472 case VIDIOC_QUERYCTRL:
473 {
474 struct v4l2_queryctrl *qc = arg;
475
476 switch (qc->id) {
477 case V4L2_CID_AUDIO_MUTE:
478 case V4L2_CID_AUDIO_VOLUME:
479 case V4L2_CID_AUDIO_BALANCE:
480 case V4L2_CID_AUDIO_BASS:
481 case V4L2_CID_AUDIO_TREBLE:
482 default:
483 return -EINVAL;
484 }
485 return v4l2_ctrl_query_fill_std(qc); 429 return v4l2_ctrl_query_fill_std(qc);
486 } 430 }
487 case VIDIOC_S_CTRL: 431 return -EINVAL;
488 return tda7432_set_ctrl(client, arg); 432}
489
490 case VIDIOC_G_CTRL:
491 return tda7432_get_ctrl(client, arg);
492
493 } /* end of (cmd) switch */
494 433
495 return 0; 434static int tda7432_command(struct i2c_client *client, unsigned cmd, void *arg)
435{
436 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
496} 437}
497 438
498static struct i2c_driver driver = { 439/* ----------------------------------------------------------------------- */
499 .driver = { 440
500 .name = "tda7432", 441static const struct v4l2_subdev_core_ops tda7432_core_ops = {
501 }, 442 .queryctrl = tda7432_queryctrl,
502 .id = I2C_DRIVERID_TDA7432, 443 .g_ctrl = tda7432_g_ctrl,
503 .attach_adapter = tda7432_probe, 444 .s_ctrl = tda7432_s_ctrl,
504 .detach_client = tda7432_detach,
505 .command = tda7432_command,
506}; 445};
507 446
508static struct i2c_client client_template = 447static const struct v4l2_subdev_ops tda7432_ops = {
509{ 448 .core = &tda7432_core_ops,
510 .name = "tda7432",
511 .driver = &driver,
512}; 449};
513 450
514static int __init tda7432_init(void) 451/* ----------------------------------------------------------------------- */
452
453/* *********************** *
454 * i2c interface functions *
455 * *********************** */
456
457static int tda7432_probe(struct i2c_client *client,
458 const struct i2c_device_id *id)
515{ 459{
516 if ( (loudness < 0) || (loudness > 15) ) { 460 struct tda7432 *t;
517 printk(KERN_ERR "loudness parameter must be between 0 and 15\n"); 461 struct v4l2_subdev *sd;
518 return -EINVAL; 462
463 v4l_info(client, "chip found @ 0x%02x (%s)\n",
464 client->addr << 1, client->adapter->name);
465
466 t = kzalloc(sizeof(*t), GFP_KERNEL);
467 if (!t)
468 return -ENOMEM;
469 sd = &t->sd;
470 v4l2_i2c_subdev_init(sd, client, &tda7432_ops);
471 if (loudness < 0 || loudness > 15) {
472 v4l2_warn(sd, "loudness parameter must be between 0 and 15\n");
473 if (loudness < 0)
474 loudness = 0;
475 if (loudness > 15)
476 loudness = 15;
519 } 477 }
520 478
521 return i2c_add_driver(&driver); 479 do_tda7432_init(sd);
480 return 0;
522} 481}
523 482
524static void __exit tda7432_fini(void) 483static int tda7432_remove(struct i2c_client *client)
525{ 484{
526 i2c_del_driver(&driver); 485 struct v4l2_subdev *sd = i2c_get_clientdata(client);
527}
528 486
529module_init(tda7432_init); 487 do_tda7432_init(sd);
530module_exit(tda7432_fini); 488 v4l2_device_unregister_subdev(sd);
489 kfree(to_state(sd));
490 return 0;
491}
531 492
532/* 493static const struct i2c_device_id tda7432_id[] = {
533 * Local variables: 494 { "tda7432", 0 },
534 * c-basic-offset: 8 495 { }
535 * End: 496};
536 */ 497MODULE_DEVICE_TABLE(i2c, tda7432_id);
498
499static struct v4l2_i2c_driver_data v4l2_i2c_data = {
500 .name = "tda7432",
501 .driverid = I2C_DRIVERID_TDA7432,
502 .command = tda7432_command,
503 .probe = tda7432_probe,
504 .remove = tda7432_remove,
505 .id_table = tda7432_id,
506};
diff --git a/drivers/media/video/tda9840.c b/drivers/media/video/tda9840.c
index 1c391f0328fd..2644e0dc9251 100644
--- a/drivers/media/video/tda9840.c
+++ b/drivers/media/video/tda9840.c
@@ -29,7 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/ioctl.h> 30#include <linux/ioctl.h>
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <media/v4l2-common.h> 32#include <media/v4l2-device.h>
33#include <media/v4l2-i2c-drv-legacy.h> 33#include <media/v4l2-i2c-drv-legacy.h>
34#include "tda9840.h" 34#include "tda9840.h"
35 35
@@ -62,85 +62,89 @@ static unsigned short normal_i2c[] = { I2C_ADDR_TDA9840, I2C_CLIENT_END };
62/* magic definition of all other variables and things */ 62/* magic definition of all other variables and things */
63I2C_CLIENT_INSMOD; 63I2C_CLIENT_INSMOD;
64 64
65static void tda9840_write(struct i2c_client *client, u8 reg, u8 val) 65static void tda9840_write(struct v4l2_subdev *sd, u8 reg, u8 val)
66{ 66{
67 struct i2c_client *client = v4l2_get_subdevdata(sd);
68
67 if (i2c_smbus_write_byte_data(client, reg, val)) 69 if (i2c_smbus_write_byte_data(client, reg, val))
68 v4l_dbg(1, debug, client, "error writing %02x to %02x\n", 70 v4l2_dbg(1, debug, sd, "error writing %02x to %02x\n",
69 val, reg); 71 val, reg);
70} 72}
71 73
72static int tda9840_command(struct i2c_client *client, unsigned cmd, void *arg) 74static int tda9840_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t)
73{ 75{
74 int byte = *(int *)arg; 76 int byte;
75
76 switch (cmd) {
77 case VIDIOC_S_TUNER: {
78 struct v4l2_tuner *t = arg;
79 int byte;
80 77
81 if (t->index) 78 if (t->index)
82 return -EINVAL; 79 return -EINVAL;
83 80
84 switch (t->audmode) { 81 switch (t->audmode) {
85 case V4L2_TUNER_MODE_STEREO: 82 case V4L2_TUNER_MODE_STEREO:
86 byte = TDA9840_SET_STEREO; 83 byte = TDA9840_SET_STEREO;
87 break;
88 case V4L2_TUNER_MODE_LANG1_LANG2:
89 byte = TDA9840_SET_BOTH;
90 break;
91 case V4L2_TUNER_MODE_LANG1:
92 byte = TDA9840_SET_LANG1;
93 break;
94 case V4L2_TUNER_MODE_LANG2:
95 byte = TDA9840_SET_LANG2;
96 break;
97 default:
98 byte = TDA9840_SET_MONO;
99 break;
100 }
101 v4l_dbg(1, debug, client, "TDA9840_SWITCH: 0x%02x\n", byte);
102 tda9840_write(client, SWITCH, byte);
103 break; 84 break;
85 case V4L2_TUNER_MODE_LANG1_LANG2:
86 byte = TDA9840_SET_BOTH;
87 break;
88 case V4L2_TUNER_MODE_LANG1:
89 byte = TDA9840_SET_LANG1;
90 break;
91 case V4L2_TUNER_MODE_LANG2:
92 byte = TDA9840_SET_LANG2;
93 break;
94 default:
95 byte = TDA9840_SET_MONO;
96 break;
97 }
98 v4l2_dbg(1, debug, sd, "TDA9840_SWITCH: 0x%02x\n", byte);
99 tda9840_write(sd, SWITCH, byte);
100 return 0;
101}
102
103static int tda9840_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t)
104{
105 struct i2c_client *client = v4l2_get_subdevdata(sd);
106 u8 byte;
107
108 t->rxsubchans = V4L2_TUNER_SUB_MONO;
109 if (1 != i2c_master_recv(client, &byte, 1)) {
110 v4l2_dbg(1, debug, sd,
111 "i2c_master_recv() failed\n");
112 return -EIO;
113 }
114
115 if (byte & 0x80) {
116 v4l2_dbg(1, debug, sd,
117 "TDA9840_DETECT: register contents invalid\n");
118 return -EINVAL;
104 } 119 }
105 120
106 case VIDIOC_G_TUNER: { 121 v4l2_dbg(1, debug, sd, "TDA9840_DETECT: byte: 0x%02x\n", byte);
107 struct v4l2_tuner *t = arg;
108 u8 byte;
109 122
123 switch (byte & 0x60) {
124 case 0x00:
110 t->rxsubchans = V4L2_TUNER_SUB_MONO; 125 t->rxsubchans = V4L2_TUNER_SUB_MONO;
111 if (1 != i2c_master_recv(client, &byte, 1)) { 126 break;
112 v4l_dbg(1, debug, client, 127 case 0x20:
113 "i2c_master_recv() failed\n"); 128 t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
114 return -EIO; 129 break;
115 } 130 case 0x40:
116 131 t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
117 if (byte & 0x80) { 132 break;
118 v4l_dbg(1, debug, client, 133 default: /* Incorrect detect */
119 "TDA9840_DETECT: register contents invalid\n"); 134 t->rxsubchans = V4L2_TUNER_MODE_MONO;
120 return -EINVAL;
121 }
122
123 v4l_dbg(1, debug, client, "TDA9840_DETECT: byte: 0x%02x\n", byte);
124
125 switch (byte & 0x60) {
126 case 0x00:
127 t->rxsubchans = V4L2_TUNER_SUB_MONO;
128 break;
129 case 0x20:
130 t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
131 break;
132 case 0x40:
133 t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
134 break;
135 default: /* Incorrect detect */
136 t->rxsubchans = V4L2_TUNER_MODE_MONO;
137 break;
138 }
139 break; 135 break;
140 } 136 }
137 return 0;
138}
141 139
140static int tda9840_ioctl(struct v4l2_subdev *sd, unsigned cmd, void *arg)
141{
142 int byte;
143
144 switch (cmd) {
142 case TDA9840_LEVEL_ADJUST: 145 case TDA9840_LEVEL_ADJUST:
143 v4l_dbg(1, debug, client, "TDA9840_LEVEL_ADJUST: %d\n", byte); 146 byte = *(int *)arg;
147 v4l2_dbg(1, debug, sd, "TDA9840_LEVEL_ADJUST: %d\n", byte);
144 148
145 /* check for correct range */ 149 /* check for correct range */
146 if (byte > 25 || byte < -20) 150 if (byte > 25 || byte < -20)
@@ -152,11 +156,12 @@ static int tda9840_command(struct i2c_client *client, unsigned cmd, void *arg)
152 byte += 0x8; 156 byte += 0x8;
153 else 157 else
154 byte = -byte; 158 byte = -byte;
155 tda9840_write(client, LEVEL_ADJUST, byte); 159 tda9840_write(sd, LEVEL_ADJUST, byte);
156 break; 160 break;
157 161
158 case TDA9840_STEREO_ADJUST: 162 case TDA9840_STEREO_ADJUST:
159 v4l_dbg(1, debug, client, "TDA9840_STEREO_ADJUST: %d\n", byte); 163 byte = *(int *)arg;
164 v4l2_dbg(1, debug, sd, "TDA9840_STEREO_ADJUST: %d\n", byte);
160 165
161 /* check for correct range */ 166 /* check for correct range */
162 if (byte > 25 || byte < -24) 167 if (byte > 25 || byte < -24)
@@ -169,18 +174,41 @@ static int tda9840_command(struct i2c_client *client, unsigned cmd, void *arg)
169 else 174 else
170 byte = -byte; 175 byte = -byte;
171 176
172 tda9840_write(client, STEREO_ADJUST, byte); 177 tda9840_write(sd, STEREO_ADJUST, byte);
173 break; 178 break;
174 default: 179 default:
175 return -ENOIOCTLCMD; 180 return -ENOIOCTLCMD;
176 } 181 }
177
178 return 0; 182 return 0;
179} 183}
180 184
185static int tda9840_command(struct i2c_client *client, unsigned cmd, void *arg)
186{
187 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
188}
189
190/* ----------------------------------------------------------------------- */
191
192static const struct v4l2_subdev_core_ops tda9840_core_ops = {
193 .ioctl = tda9840_ioctl,
194};
195
196static const struct v4l2_subdev_tuner_ops tda9840_tuner_ops = {
197 .s_tuner = tda9840_s_tuner,
198 .g_tuner = tda9840_g_tuner,
199};
200
201static const struct v4l2_subdev_ops tda9840_ops = {
202 .core = &tda9840_core_ops,
203 .tuner = &tda9840_tuner_ops,
204};
205
206/* ----------------------------------------------------------------------- */
207
181static int tda9840_probe(struct i2c_client *client, 208static int tda9840_probe(struct i2c_client *client,
182 const struct i2c_device_id *id) 209 const struct i2c_device_id *id)
183{ 210{
211 struct v4l2_subdev *sd;
184 int result; 212 int result;
185 int byte; 213 int byte;
186 214
@@ -188,23 +216,38 @@ static int tda9840_probe(struct i2c_client *client,
188 if (!i2c_check_functionality(client->adapter, 216 if (!i2c_check_functionality(client->adapter,
189 I2C_FUNC_SMBUS_READ_BYTE_DATA | 217 I2C_FUNC_SMBUS_READ_BYTE_DATA |
190 I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) 218 I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
191 return 0; 219 return -EIO;
192 220
193 v4l_info(client, "chip found @ 0x%x (%s)\n", 221 v4l_info(client, "chip found @ 0x%x (%s)\n",
194 client->addr << 1, client->adapter->name); 222 client->addr << 1, client->adapter->name);
195 223
224 sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
225 if (sd == NULL)
226 return -ENOMEM;
227 v4l2_i2c_subdev_init(sd, client, &tda9840_ops);
228
196 /* set initial values for level & stereo - adjustment, mode */ 229 /* set initial values for level & stereo - adjustment, mode */
197 byte = 0; 230 byte = 0;
198 result = tda9840_command(client, TDA9840_LEVEL_ADJUST, &byte); 231 result = tda9840_ioctl(sd, TDA9840_LEVEL_ADJUST, &byte);
199 result += tda9840_command(client, TDA9840_STEREO_ADJUST, &byte); 232 result |= tda9840_ioctl(sd, TDA9840_STEREO_ADJUST, &byte);
200 tda9840_write(client, SWITCH, TDA9840_SET_STEREO); 233 tda9840_write(sd, SWITCH, TDA9840_SET_STEREO);
201 if (result) { 234 if (result) {
202 v4l_dbg(1, debug, client, "could not initialize tda9840\n"); 235 v4l2_dbg(1, debug, sd, "could not initialize tda9840\n");
236 kfree(sd);
203 return -ENODEV; 237 return -ENODEV;
204 } 238 }
205 return 0; 239 return 0;
206} 240}
207 241
242static int tda9840_remove(struct i2c_client *client)
243{
244 struct v4l2_subdev *sd = i2c_get_clientdata(client);
245
246 v4l2_device_unregister_subdev(sd);
247 kfree(sd);
248 return 0;
249}
250
208static int tda9840_legacy_probe(struct i2c_adapter *adapter) 251static int tda9840_legacy_probe(struct i2c_adapter *adapter)
209{ 252{
210 /* Let's see whether this is a known adapter we can attach to. 253 /* Let's see whether this is a known adapter we can attach to.
@@ -222,6 +265,7 @@ static struct v4l2_i2c_driver_data v4l2_i2c_data = {
222 .driverid = I2C_DRIVERID_TDA9840, 265 .driverid = I2C_DRIVERID_TDA9840,
223 .command = tda9840_command, 266 .command = tda9840_command,
224 .probe = tda9840_probe, 267 .probe = tda9840_probe,
268 .remove = tda9840_remove,
225 .legacy_probe = tda9840_legacy_probe, 269 .legacy_probe = tda9840_legacy_probe,
226 .id_table = tda9840_id, 270 .id_table = tda9840_id,
227}; 271};
diff --git a/drivers/media/video/tda9875.c b/drivers/media/video/tda9875.c
index 792f0b079909..56f0c0eb500f 100644
--- a/drivers/media/video/tda9875.c
+++ b/drivers/media/video/tda9875.c
@@ -25,11 +25,10 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/errno.h> 26#include <linux/errno.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/videodev2.h>
29#include <media/v4l2-common.h>
30#include <linux/i2c.h> 28#include <linux/i2c.h>
31#include <linux/init.h> 29#include <linux/videodev2.h>
32 30#include <media/v4l2-device.h>
31#include <media/v4l2-i2c-drv-legacy.h>
33#include <media/i2c-addr.h> 32#include <media/i2c-addr.h>
34 33
35static int debug; /* insmod parameter */ 34static int debug; /* insmod parameter */
@@ -46,13 +45,15 @@ I2C_CLIENT_INSMOD;
46 45
47/* This is a superset of the TDA9875 */ 46/* This is a superset of the TDA9875 */
48struct tda9875 { 47struct tda9875 {
48 struct v4l2_subdev sd;
49 int rvol, lvol; 49 int rvol, lvol;
50 int bass, treble; 50 int bass, treble;
51 struct i2c_client c;
52}; 51};
53 52
54static struct i2c_driver driver; 53static inline struct tda9875 *to_state(struct v4l2_subdev *sd)
55static struct i2c_client client_template; 54{
55 return container_of(sd, struct tda9875, sd);
56}
56 57
57#define dprintk if (debug) printk 58#define dprintk if (debug) printk
58 59
@@ -105,15 +106,16 @@ static struct i2c_client client_template;
105 106
106/* Begin code */ 107/* Begin code */
107 108
108static int tda9875_write(struct i2c_client *client, int subaddr, unsigned char val) 109static int tda9875_write(struct v4l2_subdev *sd, int subaddr, unsigned char val)
109{ 110{
111 struct i2c_client *client = v4l2_get_subdevdata(sd);
110 unsigned char buffer[2]; 112 unsigned char buffer[2];
111 dprintk("In tda9875_write\n"); 113
112 dprintk("Writing %d 0x%x\n", subaddr, val); 114 v4l2_dbg(1, debug, sd, "Writing %d 0x%x\n", subaddr, val);
113 buffer[0] = subaddr; 115 buffer[0] = subaddr;
114 buffer[1] = val; 116 buffer[1] = val;
115 if (2 != i2c_master_send(client,buffer,2)) { 117 if (2 != i2c_master_send(client, buffer, 2)) {
116 printk(KERN_WARNING "tda9875: I/O error, trying (write %d 0x%x)\n", 118 v4l2_warn(sd, "I/O error, trying (write %d 0x%x)\n",
117 subaddr, val); 119 subaddr, val);
118 return -1; 120 return -1;
119 } 121 }
@@ -121,7 +123,7 @@ static int tda9875_write(struct i2c_client *client, int subaddr, unsigned char v
121} 123}
122 124
123 125
124static int i2c_read_register(struct i2c_adapter *adap, int addr, int reg) 126static int i2c_read_register(struct i2c_client *client, int addr, int reg)
125{ 127{
126 unsigned char write[1]; 128 unsigned char write[1];
127 unsigned char read[1]; 129 unsigned char read[1];
@@ -129,150 +131,83 @@ static int i2c_read_register(struct i2c_adapter *adap, int addr, int reg)
129 { addr, 0, 1, write }, 131 { addr, 0, 1, write },
130 { addr, I2C_M_RD, 1, read } 132 { addr, I2C_M_RD, 1, read }
131 }; 133 };
134
132 write[0] = reg; 135 write[0] = reg;
133 136
134 if (2 != i2c_transfer(adap,msgs,2)) { 137 if (2 != i2c_transfer(client->adapter, msgs, 2)) {
135 printk(KERN_WARNING "tda9875: I/O error (read2)\n"); 138 v4l_warn(client, "I/O error (read2)\n");
136 return -1; 139 return -1;
137 } 140 }
138 dprintk("tda9875: chip_read2: reg%d=0x%x\n",reg,read[0]); 141 v4l_dbg(1, debug, client, "chip_read2: reg%d=0x%x\n", reg, read[0]);
139 return read[0]; 142 return read[0];
140} 143}
141 144
142static void tda9875_set(struct i2c_client *client) 145static void tda9875_set(struct v4l2_subdev *sd)
143{ 146{
144 struct tda9875 *tda = i2c_get_clientdata(client); 147 struct tda9875 *tda = to_state(sd);
145 unsigned char a; 148 unsigned char a;
146 149
147 dprintk(KERN_DEBUG "tda9875_set(%04x,%04x,%04x,%04x)\n", 150 v4l2_dbg(1, debug, sd, "tda9875_set(%04x,%04x,%04x,%04x)\n",
148 tda->lvol,tda->rvol,tda->bass,tda->treble); 151 tda->lvol, tda->rvol, tda->bass, tda->treble);
149
150 152
151 a = tda->lvol & 0xff; 153 a = tda->lvol & 0xff;
152 tda9875_write(client, TDA9875_MVL, a); 154 tda9875_write(sd, TDA9875_MVL, a);
153 a =tda->rvol & 0xff; 155 a =tda->rvol & 0xff;
154 tda9875_write(client, TDA9875_MVR, a); 156 tda9875_write(sd, TDA9875_MVR, a);
155 a =tda->bass & 0xff; 157 a =tda->bass & 0xff;
156 tda9875_write(client, TDA9875_MBA, a); 158 tda9875_write(sd, TDA9875_MBA, a);
157 a =tda->treble & 0xff; 159 a =tda->treble & 0xff;
158 tda9875_write(client, TDA9875_MTR, a); 160 tda9875_write(sd, TDA9875_MTR, a);
159} 161}
160 162
161static void do_tda9875_init(struct i2c_client *client) 163static void do_tda9875_init(struct v4l2_subdev *sd)
162{ 164{
163 struct tda9875 *t = i2c_get_clientdata(client); 165 struct tda9875 *t = to_state(sd);
164 dprintk("In tda9875_init\n"); 166
165 tda9875_write(client, TDA9875_CFG, 0xd0 ); /*reg de config 0 (reset)*/ 167 v4l2_dbg(1, debug, sd, "In tda9875_init\n");
166 tda9875_write(client, TDA9875_MSR, 0x03 ); /* Monitor 0b00000XXX*/ 168 tda9875_write(sd, TDA9875_CFG, 0xd0); /*reg de config 0 (reset)*/
167 tda9875_write(client, TDA9875_C1MSB, 0x00 ); /*Car1(FM) MSB XMHz*/ 169 tda9875_write(sd, TDA9875_MSR, 0x03); /* Monitor 0b00000XXX*/
168 tda9875_write(client, TDA9875_C1MIB, 0x00 ); /*Car1(FM) MIB XMHz*/ 170 tda9875_write(sd, TDA9875_C1MSB, 0x00); /*Car1(FM) MSB XMHz*/
169 tda9875_write(client, TDA9875_C1LSB, 0x00 ); /*Car1(FM) LSB XMHz*/ 171 tda9875_write(sd, TDA9875_C1MIB, 0x00); /*Car1(FM) MIB XMHz*/
170 tda9875_write(client, TDA9875_C2MSB, 0x00 ); /*Car2(NICAM) MSB XMHz*/ 172 tda9875_write(sd, TDA9875_C1LSB, 0x00); /*Car1(FM) LSB XMHz*/
171 tda9875_write(client, TDA9875_C2MIB, 0x00 ); /*Car2(NICAM) MIB XMHz*/ 173 tda9875_write(sd, TDA9875_C2MSB, 0x00); /*Car2(NICAM) MSB XMHz*/
172 tda9875_write(client, TDA9875_C2LSB, 0x00 ); /*Car2(NICAM) LSB XMHz*/ 174 tda9875_write(sd, TDA9875_C2MIB, 0x00); /*Car2(NICAM) MIB XMHz*/
173 tda9875_write(client, TDA9875_DCR, 0x00 ); /*Demod config 0x00*/ 175 tda9875_write(sd, TDA9875_C2LSB, 0x00); /*Car2(NICAM) LSB XMHz*/
174 tda9875_write(client, TDA9875_DEEM, 0x44 ); /*DE-Emph 0b0100 0100*/ 176 tda9875_write(sd, TDA9875_DCR, 0x00); /*Demod config 0x00*/
175 tda9875_write(client, TDA9875_FMAT, 0x00 ); /*FM Matrix reg 0x00*/ 177 tda9875_write(sd, TDA9875_DEEM, 0x44); /*DE-Emph 0b0100 0100*/
176 tda9875_write(client, TDA9875_SC1, 0x00 ); /* SCART 1 (SC1)*/ 178 tda9875_write(sd, TDA9875_FMAT, 0x00); /*FM Matrix reg 0x00*/
177 tda9875_write(client, TDA9875_SC2, 0x01 ); /* SCART 2 (sc2)*/ 179 tda9875_write(sd, TDA9875_SC1, 0x00); /* SCART 1 (SC1)*/
178 180 tda9875_write(sd, TDA9875_SC2, 0x01); /* SCART 2 (sc2)*/
179 tda9875_write(client, TDA9875_CH1V, 0x10 ); /* Channel volume 1 mute*/ 181
180 tda9875_write(client, TDA9875_CH2V, 0x10 ); /* Channel volume 2 mute */ 182 tda9875_write(sd, TDA9875_CH1V, 0x10); /* Channel volume 1 mute*/
181 tda9875_write(client, TDA9875_DACOS, 0x02 ); /* sig DAC i/o(in:nicam)*/ 183 tda9875_write(sd, TDA9875_CH2V, 0x10); /* Channel volume 2 mute */
182 tda9875_write(client, TDA9875_ADCIS, 0x6f ); /* sig ADC input(in:mono)*/ 184 tda9875_write(sd, TDA9875_DACOS, 0x02); /* sig DAC i/o(in:nicam)*/
183 tda9875_write(client, TDA9875_LOSR, 0x00 ); /* line out (in:mono)*/ 185 tda9875_write(sd, TDA9875_ADCIS, 0x6f); /* sig ADC input(in:mono)*/
184 tda9875_write(client, TDA9875_AER, 0x00 ); /*06 Effect (AVL+PSEUDO) */ 186 tda9875_write(sd, TDA9875_LOSR, 0x00); /* line out (in:mono)*/
185 tda9875_write(client, TDA9875_MCS, 0x44 ); /* Main ch select (DAC) */ 187 tda9875_write(sd, TDA9875_AER, 0x00); /*06 Effect (AVL+PSEUDO) */
186 tda9875_write(client, TDA9875_MVL, 0x03 ); /* Vol Main left 10dB */ 188 tda9875_write(sd, TDA9875_MCS, 0x44); /* Main ch select (DAC) */
187 tda9875_write(client, TDA9875_MVR, 0x03 ); /* Vol Main right 10dB*/ 189 tda9875_write(sd, TDA9875_MVL, 0x03); /* Vol Main left 10dB */
188 tda9875_write(client, TDA9875_MBA, 0x00 ); /* Main Bass Main 0dB*/ 190 tda9875_write(sd, TDA9875_MVR, 0x03); /* Vol Main right 10dB*/
189 tda9875_write(client, TDA9875_MTR, 0x00 ); /* Main Treble Main 0dB*/ 191 tda9875_write(sd, TDA9875_MBA, 0x00); /* Main Bass Main 0dB*/
190 tda9875_write(client, TDA9875_ACS, 0x44 ); /* Aux chan select (dac)*/ 192 tda9875_write(sd, TDA9875_MTR, 0x00); /* Main Treble Main 0dB*/
191 tda9875_write(client, TDA9875_AVL, 0x00 ); /* Vol Aux left 0dB*/ 193 tda9875_write(sd, TDA9875_ACS, 0x44); /* Aux chan select (dac)*/
192 tda9875_write(client, TDA9875_AVR, 0x00 ); /* Vol Aux right 0dB*/ 194 tda9875_write(sd, TDA9875_AVL, 0x00); /* Vol Aux left 0dB*/
193 tda9875_write(client, TDA9875_ABA, 0x00 ); /* Aux Bass Main 0dB*/ 195 tda9875_write(sd, TDA9875_AVR, 0x00); /* Vol Aux right 0dB*/
194 tda9875_write(client, TDA9875_ATR, 0x00 ); /* Aux Aigus Main 0dB*/ 196 tda9875_write(sd, TDA9875_ABA, 0x00); /* Aux Bass Main 0dB*/
195 197 tda9875_write(sd, TDA9875_ATR, 0x00); /* Aux Aigus Main 0dB*/
196 tda9875_write(client, TDA9875_MUT, 0xcc ); /* General mute */ 198
197 199 tda9875_write(sd, TDA9875_MUT, 0xcc); /* General mute */
198 t->lvol=t->rvol =0; /* 0dB */ 200
199 t->bass=0; /* 0dB */ 201 t->lvol = t->rvol = 0; /* 0dB */
200 t->treble=0; /* 0dB */ 202 t->bass = 0; /* 0dB */
201 tda9875_set(client); 203 t->treble = 0; /* 0dB */
202 204 tda9875_set(sd);
203} 205}
204 206
205 207
206/* *********************** * 208static int tda9875_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
207 * i2c interface functions *
208 * *********************** */
209
210static int tda9875_checkit(struct i2c_adapter *adap, int addr)
211{
212 int dic,rev;
213
214 dic=i2c_read_register(adap,addr,254);
215 rev=i2c_read_register(adap,addr,255);
216
217 if(dic==0 || dic==2) { // tda9875 and tda9875A
218 printk("tda9875: TDA9875%s Rev.%d detected at 0x%x\n",
219 dic==0?"":"A", rev,addr<<1);
220 return 1;
221 }
222 printk("tda9875: no such chip at 0x%x (dic=0x%x rev=0x%x)\n",addr<<1,dic,rev);
223 return(0);
224}
225
226static int tda9875_attach(struct i2c_adapter *adap, int addr, int kind)
227{ 209{
228 struct tda9875 *t; 210 struct tda9875 *t = to_state(sd);
229 struct i2c_client *client;
230 dprintk("In tda9875_attach\n");
231
232 t = kzalloc(sizeof *t,GFP_KERNEL);
233 if (!t)
234 return -ENOMEM;
235
236 client = &t->c;
237 memcpy(client,&client_template,sizeof(struct i2c_client));
238 client->adapter = adap;
239 client->addr = addr;
240 i2c_set_clientdata(client, t);
241
242 if(!tda9875_checkit(adap,addr)) {
243 kfree(t);
244 return 1;
245 }
246
247 do_tda9875_init(client);
248 printk(KERN_INFO "tda9875: init\n");
249
250 i2c_attach_client(client);
251 return 0;
252}
253
254static int tda9875_probe(struct i2c_adapter *adap)
255{
256 if (adap->class & I2C_CLASS_TV_ANALOG)
257 return i2c_probe(adap, &addr_data, tda9875_attach);
258 return 0;
259}
260
261static int tda9875_detach(struct i2c_client *client)
262{
263 struct tda9875 *t = i2c_get_clientdata(client);
264
265 do_tda9875_init(client);
266 i2c_detach_client(client);
267
268 kfree(t);
269 return 0;
270}
271
272static int tda9875_get_ctrl(struct i2c_client *client,
273 struct v4l2_control *ctrl)
274{
275 struct tda9875 *t = i2c_get_clientdata(client);
276 211
277 switch (ctrl->id) { 212 switch (ctrl->id) {
278 case V4L2_CID_AUDIO_VOLUME: 213 case V4L2_CID_AUDIO_VOLUME:
@@ -304,10 +239,9 @@ static int tda9875_get_ctrl(struct i2c_client *client,
304 return -EINVAL; 239 return -EINVAL;
305} 240}
306 241
307static int tda9875_set_ctrl(struct i2c_client *client, 242static int tda9875_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
308 struct v4l2_control *ctrl)
309{ 243{
310 struct tda9875 *t = i2c_get_clientdata(client); 244 struct tda9875 *t = to_state(sd);
311 int chvol=0, volume, balance, left, right; 245 int chvol=0, volume, balance, left, right;
312 246
313 switch (ctrl->id) { 247 switch (ctrl->id) {
@@ -371,85 +305,105 @@ static int tda9875_set_ctrl(struct i2c_client *client,
371 t->rvol = -84 & 0xff; 305 t->rvol = -84 & 0xff;
372 } 306 }
373 307
374//printk("tda9875 bal:%04x vol:%04x bass:%04x treble:%04x\n",va->balance,va->volume,va->bass,va->treble); 308 tda9875_set(sd);
375
376 tda9875_set(client);
377
378 return 0; 309 return 0;
379} 310}
380 311
381 312static int tda9875_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
382static int tda9875_command(struct i2c_client *client,
383 unsigned int cmd, void *arg)
384{ 313{
385 dprintk("In tda9875_command...\n"); 314 switch (qc->id) {
386 315 case V4L2_CID_AUDIO_VOLUME:
387 switch (cmd) { 316 case V4L2_CID_AUDIO_BASS:
388 /* --- v4l ioctls --- */ 317 case V4L2_CID_AUDIO_TREBLE:
389 /* take care: bttv does userspace copying, we'll get a
390 kernel pointer here... */
391 case VIDIOC_QUERYCTRL:
392 {
393 struct v4l2_queryctrl *qc = arg;
394
395 switch (qc->id) {
396 case V4L2_CID_AUDIO_VOLUME:
397 case V4L2_CID_AUDIO_BASS:
398 case V4L2_CID_AUDIO_TREBLE:
399 default:
400 return -EINVAL;
401 }
402 return v4l2_ctrl_query_fill_std(qc); 318 return v4l2_ctrl_query_fill_std(qc);
403 } 319 }
404 case VIDIOC_S_CTRL: 320 return -EINVAL;
405 return tda9875_set_ctrl(client, arg); 321}
406 322
407 case VIDIOC_G_CTRL: 323static int tda9875_command(struct i2c_client *client, unsigned cmd, void *arg)
408 return tda9875_get_ctrl(client, arg); 324{
325 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
326}
409 327
410 default: /* Not VIDEOCGAUDIO or VIDEOCSAUDIO */ 328/* ----------------------------------------------------------------------- */
411 329
412 /* nothing */ 330static const struct v4l2_subdev_core_ops tda9875_core_ops = {
413 dprintk("Default\n"); 331 .queryctrl = tda9875_queryctrl,
332 .g_ctrl = tda9875_g_ctrl,
333 .s_ctrl = tda9875_s_ctrl,
334};
414 335
415 } /* end of (cmd) switch */ 336static const struct v4l2_subdev_ops tda9875_ops = {
337 .core = &tda9875_core_ops,
338};
416 339
417 return 0; 340/* ----------------------------------------------------------------------- */
418}
419 341
420 342
421static struct i2c_driver driver = { 343/* *********************** *
422 .driver = { 344 * i2c interface functions *
423 .name = "tda9875", 345 * *********************** */
424 },
425 .id = I2C_DRIVERID_TDA9875,
426 .attach_adapter = tda9875_probe,
427 .detach_client = tda9875_detach,
428 .command = tda9875_command,
429};
430 346
431static struct i2c_client client_template = 347static int tda9875_checkit(struct i2c_client *client, int addr)
432{ 348{
433 .name = "tda9875", 349 int dic, rev;
434 .driver = &driver,
435};
436 350
437static int __init tda9875_init(void) 351 dic = i2c_read_register(client, addr, 254);
438{ 352 rev = i2c_read_register(client, addr, 255);
439 return i2c_add_driver(&driver); 353
354 if (dic == 0 || dic == 2) { /* tda9875 and tda9875A */
355 v4l_info(client, "tda9875%s rev. %d detected at 0x%02x\n",
356 dic == 0 ? "" : "A", rev, addr << 1);
357 return 1;
358 }
359 v4l_info(client, "no such chip at 0x%02x (dic=0x%x rev=0x%x)\n",
360 addr << 1, dic, rev);
361 return 0;
440} 362}
441 363
442static void __exit tda9875_fini(void) 364static int tda9875_probe(struct i2c_client *client,
365 const struct i2c_device_id *id)
443{ 366{
444 i2c_del_driver(&driver); 367 struct tda9875 *t;
368 struct v4l2_subdev *sd;
369
370 v4l_info(client, "chip found @ 0x%02x (%s)\n",
371 client->addr << 1, client->adapter->name);
372
373 if (!tda9875_checkit(client, client->addr))
374 return -ENODEV;
375
376 t = kzalloc(sizeof(*t), GFP_KERNEL);
377 if (!t)
378 return -ENOMEM;
379 sd = &t->sd;
380 v4l2_i2c_subdev_init(sd, client, &tda9875_ops);
381
382 do_tda9875_init(sd);
383 return 0;
445} 384}
446 385
447module_init(tda9875_init); 386static int tda9875_remove(struct i2c_client *client)
448module_exit(tda9875_fini); 387{
388 struct v4l2_subdev *sd = i2c_get_clientdata(client);
449 389
450/* 390 do_tda9875_init(sd);
451 * Local variables: 391 v4l2_device_unregister_subdev(sd);
452 * c-basic-offset: 8 392 kfree(to_state(sd));
453 * End: 393 return 0;
454 */ 394}
455 395
396static const struct i2c_device_id tda9875_id[] = {
397 { "tda9875", 0 },
398 { }
399};
400MODULE_DEVICE_TABLE(i2c, tda9875_id);
401
402static struct v4l2_i2c_driver_data v4l2_i2c_data = {
403 .name = "tda9875",
404 .driverid = I2C_DRIVERID_TDA9875,
405 .command = tda9875_command,
406 .probe = tda9875_probe,
407 .remove = tda9875_remove,
408 .id_table = tda9875_id,
409};
diff --git a/drivers/media/video/tea6415c.c b/drivers/media/video/tea6415c.c
index cde092adbb5a..31dde86f2df4 100644
--- a/drivers/media/video/tea6415c.c
+++ b/drivers/media/video/tea6415c.c
@@ -31,7 +31,7 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/ioctl.h> 32#include <linux/ioctl.h>
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <media/v4l2-common.h> 34#include <media/v4l2-device.h>
35#include <media/v4l2-i2c-drv-legacy.h> 35#include <media/v4l2-i2c-drv-legacy.h>
36#include "tea6415c.h" 36#include "tea6415c.h"
37 37
@@ -122,31 +122,57 @@ static int switch_matrix(struct i2c_client *client, int i, int o)
122 return ret; 122 return ret;
123} 123}
124 124
125static int tea6415c_command(struct i2c_client *client, unsigned cmd, void *arg) 125static int tea6415c_ioctl(struct v4l2_subdev *sd, unsigned cmd, void *arg)
126{ 126{
127 struct tea6415c_multiplex *v = (struct tea6415c_multiplex *)arg; 127 if (cmd == TEA6415C_SWITCH) {
128 int result = 0; 128 struct i2c_client *client = v4l2_get_subdevdata(sd);
129 struct tea6415c_multiplex *v = (struct tea6415c_multiplex *)arg;
129 130
130 switch (cmd) { 131 return switch_matrix(client, v->in, v->out);
131 case TEA6415C_SWITCH:
132 result = switch_matrix(client, v->in, v->out);
133 break;
134 default:
135 return -ENOIOCTLCMD;
136 } 132 }
137 return result; 133 return -ENOIOCTLCMD;
138} 134}
139 135
136static int tea6415c_command(struct i2c_client *client, unsigned cmd, void *arg)
137{
138 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
139}
140
141/* ----------------------------------------------------------------------- */
142
143static const struct v4l2_subdev_core_ops tea6415c_core_ops = {
144 .ioctl = tea6415c_ioctl,
145};
146
147static const struct v4l2_subdev_ops tea6415c_ops = {
148 .core = &tea6415c_core_ops,
149};
150
140/* this function is called by i2c_probe */ 151/* this function is called by i2c_probe */
141static int tea6415c_probe(struct i2c_client *client, 152static int tea6415c_probe(struct i2c_client *client,
142 const struct i2c_device_id *id) 153 const struct i2c_device_id *id)
143{ 154{
155 struct v4l2_subdev *sd;
156
144 /* let's see whether this adapter can support what we need */ 157 /* let's see whether this adapter can support what we need */
145 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE)) 158 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE))
146 return 0; 159 return 0;
147 160
148 v4l_info(client, "chip found @ 0x%x (%s)\n", 161 v4l_info(client, "chip found @ 0x%x (%s)\n",
149 client->addr << 1, client->adapter->name); 162 client->addr << 1, client->adapter->name);
163 sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
164 if (sd == NULL)
165 return -ENOMEM;
166 v4l2_i2c_subdev_init(sd, client, &tea6415c_ops);
167 return 0;
168}
169
170static int tea6415c_remove(struct i2c_client *client)
171{
172 struct v4l2_subdev *sd = i2c_get_clientdata(client);
173
174 v4l2_device_unregister_subdev(sd);
175 kfree(sd);
150 return 0; 176 return 0;
151} 177}
152 178
@@ -168,6 +194,7 @@ static struct v4l2_i2c_driver_data v4l2_i2c_data = {
168 .driverid = I2C_DRIVERID_TEA6415C, 194 .driverid = I2C_DRIVERID_TEA6415C,
169 .command = tea6415c_command, 195 .command = tea6415c_command,
170 .probe = tea6415c_probe, 196 .probe = tea6415c_probe,
197 .remove = tea6415c_remove,
171 .legacy_probe = tea6415c_legacy_probe, 198 .legacy_probe = tea6415c_legacy_probe,
172 .id_table = tea6415c_id, 199 .id_table = tea6415c_id,
173}; 200};
diff --git a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c
index e50820969e64..38e519f04bde 100644
--- a/drivers/media/video/tea6420.c
+++ b/drivers/media/video/tea6420.c
@@ -31,7 +31,7 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/ioctl.h> 32#include <linux/ioctl.h>
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <media/v4l2-common.h> 34#include <media/v4l2-device.h>
35#include <media/v4l2-i2c-drv-legacy.h> 35#include <media/v4l2-i2c-drv-legacy.h>
36#include "tea6420.h" 36#include "tea6420.h"
37 37
@@ -90,26 +90,37 @@ static int tea6420_switch(struct i2c_client *client, int i, int o, int g)
90 return 0; 90 return 0;
91} 91}
92 92
93static int tea6420_command(struct i2c_client *client, unsigned cmd, void *arg) 93static int tea6420_ioctl(struct v4l2_subdev *sd, unsigned cmd, void *arg)
94{ 94{
95 struct tea6420_multiplex *a = (struct tea6420_multiplex *)arg; 95 if (cmd == TEA6420_SWITCH) {
96 int result = 0; 96 struct i2c_client *client = v4l2_get_subdevdata(sd);
97 struct tea6420_multiplex *a = (struct tea6420_multiplex *)arg;
97 98
98 switch (cmd) { 99 return tea6420_switch(client, a->in, a->out, a->gain);
99 case TEA6420_SWITCH:
100 result = tea6420_switch(client, a->in, a->out, a->gain);
101 break;
102 default:
103 return -ENOIOCTLCMD;
104 } 100 }
101 return -ENOIOCTLCMD;
102}
105 103
106 return result; 104static int tea6420_command(struct i2c_client *client, unsigned cmd, void *arg)
105{
106 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
107} 107}
108 108
109/* ----------------------------------------------------------------------- */
110
111static const struct v4l2_subdev_core_ops tea6420_core_ops = {
112 .ioctl = tea6420_ioctl,
113};
114
115static const struct v4l2_subdev_ops tea6420_ops = {
116 .core = &tea6420_core_ops,
117};
118
109/* this function is called by i2c_probe */ 119/* this function is called by i2c_probe */
110static int tea6420_probe(struct i2c_client *client, 120static int tea6420_probe(struct i2c_client *client,
111 const struct i2c_device_id *id) 121 const struct i2c_device_id *id)
112{ 122{
123 struct v4l2_subdev *sd;
113 int err, i; 124 int err, i;
114 125
115 /* let's see whether this adapter can support what we need */ 126 /* let's see whether this adapter can support what we need */
@@ -126,9 +137,22 @@ static int tea6420_probe(struct i2c_client *client,
126 } 137 }
127 if (err) { 138 if (err) {
128 v4l_dbg(1, debug, client, "could not initialize tea6420\n"); 139 v4l_dbg(1, debug, client, "could not initialize tea6420\n");
129 kfree(client);
130 return -ENODEV; 140 return -ENODEV;
131 } 141 }
142
143 sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
144 if (sd == NULL)
145 return -ENOMEM;
146 v4l2_i2c_subdev_init(sd, client, &tea6420_ops);
147 return 0;
148}
149
150static int tea6420_remove(struct i2c_client *client)
151{
152 struct v4l2_subdev *sd = i2c_get_clientdata(client);
153
154 v4l2_device_unregister_subdev(sd);
155 kfree(sd);
132 return 0; 156 return 0;
133} 157}
134 158
@@ -150,6 +174,7 @@ static struct v4l2_i2c_driver_data v4l2_i2c_data = {
150 .driverid = I2C_DRIVERID_TEA6420, 174 .driverid = I2C_DRIVERID_TEA6420,
151 .command = tea6420_command, 175 .command = tea6420_command,
152 .probe = tea6420_probe, 176 .probe = tea6420_probe,
177 .remove = tea6420_remove,
153 .legacy_probe = tea6420_legacy_probe, 178 .legacy_probe = tea6420_legacy_probe,
154 .id_table = tea6420_id, 179 .id_table = tea6420_id,
155}; 180};
diff --git a/drivers/media/video/tlv320aic23b.c b/drivers/media/video/tlv320aic23b.c
index 281065b9dd2d..5c95ecd09dc2 100644
--- a/drivers/media/video/tlv320aic23b.c
+++ b/drivers/media/video/tlv320aic23b.c
@@ -30,7 +30,7 @@
30#include <linux/i2c.h> 30#include <linux/i2c.h>
31#include <linux/i2c-id.h> 31#include <linux/i2c-id.h>
32#include <linux/videodev2.h> 32#include <linux/videodev2.h>
33#include <media/v4l2-common.h> 33#include <media/v4l2-device.h>
34#include <media/v4l2-i2c-drv-legacy.h> 34#include <media/v4l2-i2c-drv-legacy.h>
35 35
36MODULE_DESCRIPTION("tlv320aic23b driver"); 36MODULE_DESCRIPTION("tlv320aic23b driver");
@@ -44,15 +44,22 @@ I2C_CLIENT_INSMOD;
44/* ----------------------------------------------------------------------- */ 44/* ----------------------------------------------------------------------- */
45 45
46struct tlv320aic23b_state { 46struct tlv320aic23b_state {
47 struct v4l2_subdev sd;
47 u8 muted; 48 u8 muted;
48}; 49};
49 50
50static int tlv320aic23b_write(struct i2c_client *client, int reg, u16 val) 51static inline struct tlv320aic23b_state *to_state(struct v4l2_subdev *sd)
51{ 52{
53 return container_of(sd, struct tlv320aic23b_state, sd);
54}
55
56static int tlv320aic23b_write(struct v4l2_subdev *sd, int reg, u16 val)
57{
58 struct i2c_client *client = v4l2_get_subdevdata(sd);
52 int i; 59 int i;
53 60
54 if ((reg < 0 || reg > 9) && (reg != 15)) { 61 if ((reg < 0 || reg > 9) && (reg != 15)) {
55 v4l_err(client, "Invalid register R%d\n", reg); 62 v4l2_err(sd, "Invalid register R%d\n", reg);
56 return -1; 63 return -1;
57 } 64 }
58 65
@@ -60,61 +67,82 @@ static int tlv320aic23b_write(struct i2c_client *client, int reg, u16 val)
60 if (i2c_smbus_write_byte_data(client, 67 if (i2c_smbus_write_byte_data(client,
61 (reg << 1) | (val >> 8), val & 0xff) == 0) 68 (reg << 1) | (val >> 8), val & 0xff) == 0)
62 return 0; 69 return 0;
63 v4l_err(client, "I2C: cannot write %03x to register R%d\n", val, reg); 70 v4l2_err(sd, "I2C: cannot write %03x to register R%d\n", val, reg);
64 return -1; 71 return -1;
65} 72}
66 73
67static int tlv320aic23b_command(struct i2c_client *client, 74static int tlv320aic23b_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
68 unsigned int cmd, void *arg)
69{ 75{
70 struct tlv320aic23b_state *state = i2c_get_clientdata(client); 76 switch (freq) {
71 struct v4l2_control *ctrl = arg; 77 case 32000: /* set sample rate to 32 kHz */
72 u32 *freq = arg; 78 tlv320aic23b_write(sd, 8, 0x018);
73
74 switch (cmd) {
75 case VIDIOC_INT_AUDIO_CLOCK_FREQ:
76 switch (*freq) {
77 case 32000: /* set sample rate to 32 kHz */
78 tlv320aic23b_write(client, 8, 0x018);
79 break;
80 case 44100: /* set sample rate to 44.1 kHz */
81 tlv320aic23b_write(client, 8, 0x022);
82 break;
83 case 48000: /* set sample rate to 48 kHz */
84 tlv320aic23b_write(client, 8, 0x000);
85 break;
86 default:
87 return -EINVAL;
88 }
89 break;
90
91 case VIDIOC_G_CTRL:
92 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
93 return -EINVAL;
94 ctrl->value = state->muted;
95 break; 79 break;
96 80 case 44100: /* set sample rate to 44.1 kHz */
97 case VIDIOC_S_CTRL: 81 tlv320aic23b_write(sd, 8, 0x022);
98 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
99 return -EINVAL;
100 state->muted = ctrl->value;
101 tlv320aic23b_write(client, 0, 0x180); /* mute both channels */
102 /* set gain on both channels to +3.0 dB */
103 if (!state->muted)
104 tlv320aic23b_write(client, 0, 0x119);
105 break; 82 break;
106 83 case 48000: /* set sample rate to 48 kHz */
107 case VIDIOC_LOG_STATUS: 84 tlv320aic23b_write(sd, 8, 0x000);
108 v4l_info(client, "Input: %s\n",
109 state->muted ? "muted" : "active");
110 break; 85 break;
111
112 default: 86 default:
113 return -EINVAL; 87 return -EINVAL;
114 } 88 }
115 return 0; 89 return 0;
116} 90}
117 91
92static int tlv320aic23b_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
93{
94 struct tlv320aic23b_state *state = to_state(sd);
95
96 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
97 return -EINVAL;
98 ctrl->value = state->muted;
99 return 0;
100}
101
102static int tlv320aic23b_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
103{
104 struct tlv320aic23b_state *state = to_state(sd);
105
106 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
107 return -EINVAL;
108 state->muted = ctrl->value;
109 tlv320aic23b_write(sd, 0, 0x180); /* mute both channels */
110 /* set gain on both channels to +3.0 dB */
111 if (!state->muted)
112 tlv320aic23b_write(sd, 0, 0x119);
113 return 0;
114}
115
116static int tlv320aic23b_log_status(struct v4l2_subdev *sd)
117{
118 struct tlv320aic23b_state *state = to_state(sd);
119
120 v4l2_info(sd, "Input: %s\n", state->muted ? "muted" : "active");
121 return 0;
122}
123
124static int tlv320aic23b_command(struct i2c_client *client, unsigned cmd, void *arg)
125{
126 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
127}
128
129/* ----------------------------------------------------------------------- */
130
131static const struct v4l2_subdev_core_ops tlv320aic23b_core_ops = {
132 .log_status = tlv320aic23b_log_status,
133 .g_ctrl = tlv320aic23b_g_ctrl,
134 .s_ctrl = tlv320aic23b_s_ctrl,
135};
136
137static const struct v4l2_subdev_audio_ops tlv320aic23b_audio_ops = {
138 .s_clock_freq = tlv320aic23b_s_clock_freq,
139};
140
141static const struct v4l2_subdev_ops tlv320aic23b_ops = {
142 .core = &tlv320aic23b_core_ops,
143 .audio = &tlv320aic23b_audio_ops,
144};
145
118/* ----------------------------------------------------------------------- */ 146/* ----------------------------------------------------------------------- */
119 147
120/* i2c implementation */ 148/* i2c implementation */
@@ -128,6 +156,7 @@ static int tlv320aic23b_probe(struct i2c_client *client,
128 const struct i2c_device_id *id) 156 const struct i2c_device_id *id)
129{ 157{
130 struct tlv320aic23b_state *state; 158 struct tlv320aic23b_state *state;
159 struct v4l2_subdev *sd;
131 160
132 /* Check if the adapter supports the needed features */ 161 /* Check if the adapter supports the needed features */
133 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 162 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -136,32 +165,36 @@ static int tlv320aic23b_probe(struct i2c_client *client,
136 v4l_info(client, "chip found @ 0x%x (%s)\n", 165 v4l_info(client, "chip found @ 0x%x (%s)\n",
137 client->addr << 1, client->adapter->name); 166 client->addr << 1, client->adapter->name);
138 167
139 state = kmalloc(sizeof(struct tlv320aic23b_state), GFP_KERNEL); 168 state = kzalloc(sizeof(struct tlv320aic23b_state), GFP_KERNEL);
140 if (state == NULL) 169 if (state == NULL)
141 return -ENOMEM; 170 return -ENOMEM;
171 sd = &state->sd;
172 v4l2_i2c_subdev_init(sd, client, &tlv320aic23b_ops);
142 state->muted = 0; 173 state->muted = 0;
143 i2c_set_clientdata(client, state);
144 174
145 /* Initialize tlv320aic23b */ 175 /* Initialize tlv320aic23b */
146 176
147 /* RESET */ 177 /* RESET */
148 tlv320aic23b_write(client, 15, 0x000); 178 tlv320aic23b_write(sd, 15, 0x000);
149 /* turn off DAC & mic input */ 179 /* turn off DAC & mic input */
150 tlv320aic23b_write(client, 6, 0x00A); 180 tlv320aic23b_write(sd, 6, 0x00A);
151 /* left-justified, 24-bit, master mode */ 181 /* left-justified, 24-bit, master mode */
152 tlv320aic23b_write(client, 7, 0x049); 182 tlv320aic23b_write(sd, 7, 0x049);
153 /* set gain on both channels to +3.0 dB */ 183 /* set gain on both channels to +3.0 dB */
154 tlv320aic23b_write(client, 0, 0x119); 184 tlv320aic23b_write(sd, 0, 0x119);
155 /* set sample rate to 48 kHz */ 185 /* set sample rate to 48 kHz */
156 tlv320aic23b_write(client, 8, 0x000); 186 tlv320aic23b_write(sd, 8, 0x000);
157 /* activate digital interface */ 187 /* activate digital interface */
158 tlv320aic23b_write(client, 9, 0x001); 188 tlv320aic23b_write(sd, 9, 0x001);
159 return 0; 189 return 0;
160} 190}
161 191
162static int tlv320aic23b_remove(struct i2c_client *client) 192static int tlv320aic23b_remove(struct i2c_client *client)
163{ 193{
164 kfree(i2c_get_clientdata(client)); 194 struct v4l2_subdev *sd = i2c_get_clientdata(client);
195
196 v4l2_device_unregister_subdev(sd);
197 kfree(to_state(sd));
165 return 0; 198 return 0;
166} 199}
167 200
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 4a7735c6c1a6..97d7509d212f 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -18,7 +18,7 @@
18#include <linux/videodev.h> 18#include <linux/videodev.h>
19#include <media/tuner.h> 19#include <media/tuner.h>
20#include <media/tuner-types.h> 20#include <media/tuner-types.h>
21#include <media/v4l2-common.h> 21#include <media/v4l2-device.h>
22#include <media/v4l2-ioctl.h> 22#include <media/v4l2-ioctl.h>
23#include <media/v4l2-i2c-drv-legacy.h> 23#include <media/v4l2-i2c-drv-legacy.h>
24#include "mt20xx.h" 24#include "mt20xx.h"
@@ -78,6 +78,7 @@ struct tuner {
78 /* device */ 78 /* device */
79 struct dvb_frontend fe; 79 struct dvb_frontend fe;
80 struct i2c_client *i2c; 80 struct i2c_client *i2c;
81 struct v4l2_subdev sd;
81 struct list_head list; 82 struct list_head list;
82 unsigned int using_v4l2:1; 83 unsigned int using_v4l2:1;
83 84
@@ -95,6 +96,11 @@ struct tuner {
95 const char *name; 96 const char *name;
96}; 97};
97 98
99static inline struct tuner *to_tuner(struct v4l2_subdev *sd)
100{
101 return container_of(sd, struct tuner, sd);
102}
103
98/* standard i2c insmod options */ 104/* standard i2c insmod options */
99static unsigned short normal_i2c[] = { 105static unsigned short normal_i2c[] = {
100#if defined(CONFIG_MEDIA_TUNER_TEA5761) || (defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE) && defined(MODULE)) 106#if defined(CONFIG_MEDIA_TUNER_TEA5761) || (defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE) && defined(MODULE))
@@ -213,7 +219,7 @@ static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
213 219
214static void tuner_status(struct dvb_frontend *fe); 220static void tuner_status(struct dvb_frontend *fe);
215 221
216static struct analog_demod_ops tuner_core_ops = { 222static struct analog_demod_ops tuner_analog_ops = {
217 .set_params = fe_set_params, 223 .set_params = fe_set_params,
218 .standby = fe_standby, 224 .standby = fe_standby,
219 .has_signal = fe_has_signal, 225 .has_signal = fe_has_signal,
@@ -224,7 +230,7 @@ static struct analog_demod_ops tuner_core_ops = {
224/* Set tuner frequency, freq in Units of 62.5kHz = 1/16MHz */ 230/* Set tuner frequency, freq in Units of 62.5kHz = 1/16MHz */
225static void set_tv_freq(struct i2c_client *c, unsigned int freq) 231static void set_tv_freq(struct i2c_client *c, unsigned int freq)
226{ 232{
227 struct tuner *t = i2c_get_clientdata(c); 233 struct tuner *t = to_tuner(i2c_get_clientdata(c));
228 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; 234 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
229 235
230 struct analog_parameters params = { 236 struct analog_parameters params = {
@@ -259,7 +265,7 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
259 265
260static void set_radio_freq(struct i2c_client *c, unsigned int freq) 266static void set_radio_freq(struct i2c_client *c, unsigned int freq)
261{ 267{
262 struct tuner *t = i2c_get_clientdata(c); 268 struct tuner *t = to_tuner(i2c_get_clientdata(c));
263 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; 269 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
264 270
265 struct analog_parameters params = { 271 struct analog_parameters params = {
@@ -294,7 +300,7 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
294 300
295static void set_freq(struct i2c_client *c, unsigned long freq) 301static void set_freq(struct i2c_client *c, unsigned long freq)
296{ 302{
297 struct tuner *t = i2c_get_clientdata(c); 303 struct tuner *t = to_tuner(i2c_get_clientdata(c));
298 304
299 switch (t->mode) { 305 switch (t->mode) {
300 case V4L2_TUNER_RADIO: 306 case V4L2_TUNER_RADIO:
@@ -347,7 +353,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
347 unsigned int new_mode_mask, unsigned int new_config, 353 unsigned int new_mode_mask, unsigned int new_config,
348 int (*tuner_callback) (void *dev, int component, int cmd, int arg)) 354 int (*tuner_callback) (void *dev, int component, int cmd, int arg))
349{ 355{
350 struct tuner *t = i2c_get_clientdata(c); 356 struct tuner *t = to_tuner(i2c_get_clientdata(c));
351 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; 357 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
352 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; 358 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
353 unsigned char buffer[4]; 359 unsigned char buffer[4];
@@ -470,7 +476,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
470 t->name = fe_tuner_ops->info.name; 476 t->name = fe_tuner_ops->info.name;
471 477
472 t->fe.analog_demod_priv = t; 478 t->fe.analog_demod_priv = t;
473 memcpy(analog_ops, &tuner_core_ops, 479 memcpy(analog_ops, &tuner_analog_ops,
474 sizeof(struct analog_demod_ops)); 480 sizeof(struct analog_demod_ops));
475 481
476 } else { 482 } else {
@@ -515,7 +521,7 @@ attach_failed:
515 521
516static void set_addr(struct i2c_client *c, struct tuner_setup *tun_setup) 522static void set_addr(struct i2c_client *c, struct tuner_setup *tun_setup)
517{ 523{
518 struct tuner *t = i2c_get_clientdata(c); 524 struct tuner *t = to_tuner(i2c_get_clientdata(c));
519 525
520 if ( (t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) && 526 if ( (t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) &&
521 (t->mode_mask & tun_setup->mode_mask))) || 527 (t->mode_mask & tun_setup->mode_mask))) ||
@@ -727,6 +733,8 @@ static inline int set_mode(struct i2c_client *client, struct tuner *t, int mode,
727 t->mode = mode; 733 t->mode = mode;
728 734
729 if (check_mode(t, cmd) == -EINVAL) { 735 if (check_mode(t, cmd) == -EINVAL) {
736 tuner_dbg("Tuner doesn't support this mode. "
737 "Putting tuner to sleep\n");
730 t->mode = T_STANDBY; 738 t->mode = T_STANDBY;
731 if (analog_ops->standby) 739 if (analog_ops->standby)
732 analog_ops->standby(&t->fe); 740 analog_ops->standby(&t->fe);
@@ -748,43 +756,58 @@ static inline int check_v4l2(struct tuner *t)
748 return 0; 756 return 0;
749} 757}
750 758
751static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg) 759static int tuner_s_type_addr(struct v4l2_subdev *sd, struct tuner_setup *type)
752{ 760{
753 struct tuner *t = i2c_get_clientdata(client); 761 struct tuner *t = to_tuner(sd);
754 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; 762 struct i2c_client *client = v4l2_get_subdevdata(sd);
763
764 tuner_dbg("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=0x%02x\n",
765 type->type,
766 type->addr,
767 type->mode_mask,
768 type->config);
769
770 set_addr(client, type);
771 return 0;
772}
773
774static int tuner_s_radio(struct v4l2_subdev *sd)
775{
776 struct tuner *t = to_tuner(sd);
777 struct i2c_client *client = v4l2_get_subdevdata(sd);
778
779 if (set_mode(client, t, V4L2_TUNER_RADIO, "AUDC_SET_RADIO")
780 == -EINVAL)
781 return 0;
782 if (t->radio_freq)
783 set_freq(client, t->radio_freq);
784 return 0;
785}
786
787static int tuner_s_standby(struct v4l2_subdev *sd, u32 standby)
788{
789 struct tuner *t = to_tuner(sd);
755 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; 790 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
756 791
757 if (tuner_debug > 1) { 792 tuner_dbg("Putting tuner to sleep\n");
758 v4l_i2c_print_ioctl(client,cmd); 793
759 printk("\n"); 794 if (check_mode(t, "TUNER_SET_STANDBY") == -EINVAL)
760 } 795 return 0;
796 t->mode = T_STANDBY;
797 if (analog_ops->standby)
798 analog_ops->standby(&t->fe);
799 return 0;
800}
761 801
762 switch (cmd) {
763 /* --- configuration --- */
764 case TUNER_SET_TYPE_ADDR:
765 tuner_dbg ("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=0x%02x\n",
766 ((struct tuner_setup *)arg)->type,
767 ((struct tuner_setup *)arg)->addr,
768 ((struct tuner_setup *)arg)->mode_mask,
769 ((struct tuner_setup *)arg)->config);
770
771 set_addr(client, (struct tuner_setup *)arg);
772 break;
773 case AUDC_SET_RADIO:
774 if (set_mode(client, t, V4L2_TUNER_RADIO, "AUDC_SET_RADIO")
775 == -EINVAL)
776 return 0;
777 if (t->radio_freq)
778 set_freq(client, t->radio_freq);
779 break;
780 case TUNER_SET_STANDBY:
781 if (check_mode(t, "TUNER_SET_STANDBY") == -EINVAL)
782 return 0;
783 t->mode = T_STANDBY;
784 if (analog_ops->standby)
785 analog_ops->standby(&t->fe);
786 break;
787#ifdef CONFIG_VIDEO_ALLOW_V4L1 802#ifdef CONFIG_VIDEO_ALLOW_V4L1
803static int tuner_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
804{
805 struct tuner *t = to_tuner(sd);
806 struct i2c_client *client = v4l2_get_subdevdata(sd);
807 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
808 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
809
810 switch (cmd) {
788 case VIDIOCSAUDIO: 811 case VIDIOCSAUDIO:
789 if (check_mode(t, "VIDIOCSAUDIO") == -EINVAL) 812 if (check_mode(t, "VIDIOCSAUDIO") == -EINVAL)
790 return 0; 813 return 0;
@@ -897,149 +920,172 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
897 } 920 }
898 return 0; 921 return 0;
899 } 922 }
923 }
924 return -ENOIOCTLCMD;
925}
900#endif 926#endif
901 case TUNER_SET_CONFIG:
902 {
903 struct v4l2_priv_tun_config *cfg = arg;
904 927
905 if (t->type != cfg->tuner) 928static int tuner_s_config(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *cfg)
906 break; 929{
930 struct tuner *t = to_tuner(sd);
931 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
907 932
908 if (analog_ops->set_config) { 933 if (t->type != cfg->tuner)
909 analog_ops->set_config(&t->fe, cfg->priv); 934 return 0;
910 break;
911 }
912 935
913 tuner_dbg("Tuner frontend module has no way to set config\n"); 936 if (analog_ops->set_config) {
914 break; 937 analog_ops->set_config(&t->fe, cfg->priv);
938 return 0;
915 } 939 }
916 /* --- v4l ioctls --- */
917 /* take care: bttv does userspace copying, we'll get a
918 kernel pointer here... */
919 case VIDIOC_S_STD:
920 {
921 v4l2_std_id *id = arg;
922 940
923 if (set_mode (client, t, V4L2_TUNER_ANALOG_TV, "VIDIOC_S_STD") 941 tuner_dbg("Tuner frontend module has no way to set config\n");
924 == -EINVAL) 942 return 0;
925 return 0; 943}
926 944
927 switch_v4l2(); 945/* --- v4l ioctls --- */
946/* take care: bttv does userspace copying, we'll get a
947 kernel pointer here... */
948static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
949{
950 struct tuner *t = to_tuner(sd);
951 struct i2c_client *client = v4l2_get_subdevdata(sd);
928 952
929 t->std = *id; 953 if (set_mode(client, t, V4L2_TUNER_ANALOG_TV, "VIDIOC_S_STD")
930 tuner_fixup_std(t); 954 == -EINVAL)
931 if (t->tv_freq) 955 return 0;
932 set_freq(client, t->tv_freq);
933 break;
934 }
935 case VIDIOC_S_FREQUENCY:
936 {
937 struct v4l2_frequency *f = arg;
938 956
939 if (set_mode (client, t, f->type, "VIDIOC_S_FREQUENCY") 957 switch_v4l2();
940 == -EINVAL)
941 return 0;
942 switch_v4l2();
943 set_freq(client,f->frequency);
944 958
945 break; 959 t->std = std;
946 } 960 tuner_fixup_std(t);
947 case VIDIOC_G_FREQUENCY: 961 if (t->tv_freq)
948 { 962 set_freq(client, t->tv_freq);
949 struct v4l2_frequency *f = arg; 963 return 0;
964}
950 965
951 if (check_mode(t, "VIDIOC_G_FREQUENCY") == -EINVAL) 966static int tuner_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
952 return 0; 967{
953 switch_v4l2(); 968 struct tuner *t = to_tuner(sd);
954 f->type = t->mode; 969 struct i2c_client *client = v4l2_get_subdevdata(sd);
955 if (fe_tuner_ops->get_frequency) {
956 u32 abs_freq;
957
958 fe_tuner_ops->get_frequency(&t->fe, &abs_freq);
959 f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
960 (abs_freq * 2 + 125/2) / 125 :
961 (abs_freq + 62500/2) / 62500;
962 break;
963 }
964 f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
965 t->radio_freq : t->tv_freq;
966 break;
967 }
968 case VIDIOC_G_TUNER:
969 {
970 struct v4l2_tuner *tuner = arg;
971 970
972 if (check_mode(t, "VIDIOC_G_TUNER") == -EINVAL) 971 if (set_mode(client, t, f->type, "VIDIOC_S_FREQUENCY")
973 return 0; 972 == -EINVAL)
974 switch_v4l2(); 973 return 0;
975 974 switch_v4l2();
976 tuner->type = t->mode; 975 set_freq(client, f->frequency);
977 if (analog_ops->get_afc)
978 tuner->afc = analog_ops->get_afc(&t->fe);
979 if (t->mode == V4L2_TUNER_ANALOG_TV)
980 tuner->capability |= V4L2_TUNER_CAP_NORM;
981 if (t->mode != V4L2_TUNER_RADIO) {
982 tuner->rangelow = tv_range[0] * 16;
983 tuner->rangehigh = tv_range[1] * 16;
984 break;
985 }
986 976
987 /* radio mode */ 977 return 0;
988 tuner->rxsubchans = 978}
989 V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
990 if (fe_tuner_ops->get_status) {
991 u32 tuner_status;
992
993 fe_tuner_ops->get_status(&t->fe, &tuner_status);
994 tuner->rxsubchans =
995 (tuner_status & TUNER_STATUS_STEREO) ?
996 V4L2_TUNER_SUB_STEREO :
997 V4L2_TUNER_SUB_MONO;
998 } else {
999 if (analog_ops->is_stereo) {
1000 tuner->rxsubchans =
1001 analog_ops->is_stereo(&t->fe) ?
1002 V4L2_TUNER_SUB_STEREO :
1003 V4L2_TUNER_SUB_MONO;
1004 }
1005 }
1006 if (analog_ops->has_signal)
1007 tuner->signal = analog_ops->has_signal(&t->fe);
1008 tuner->capability |=
1009 V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
1010 tuner->audmode = t->audmode;
1011 tuner->rangelow = radio_range[0] * 16000;
1012 tuner->rangehigh = radio_range[1] * 16000;
1013 break;
1014 }
1015 case VIDIOC_S_TUNER:
1016 {
1017 struct v4l2_tuner *tuner = arg;
1018 979
1019 if (check_mode(t, "VIDIOC_S_TUNER") == -EINVAL) 980static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
1020 return 0; 981{
982 struct tuner *t = to_tuner(sd);
983 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
1021 984
1022 switch_v4l2(); 985 if (check_mode(t, "VIDIOC_G_FREQUENCY") == -EINVAL)
986 return 0;
987 switch_v4l2();
988 f->type = t->mode;
989 if (fe_tuner_ops->get_frequency) {
990 u32 abs_freq;
991
992 fe_tuner_ops->get_frequency(&t->fe, &abs_freq);
993 f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
994 (abs_freq * 2 + 125/2) / 125 :
995 (abs_freq + 62500/2) / 62500;
996 return 0;
997 }
998 f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
999 t->radio_freq : t->tv_freq;
1000 return 0;
1001}
1023 1002
1024 /* do nothing unless we're a radio tuner */ 1003static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1025 if (t->mode != V4L2_TUNER_RADIO) 1004{
1026 break; 1005 struct tuner *t = to_tuner(sd);
1027 t->audmode = tuner->audmode; 1006 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
1028 set_radio_freq(client, t->radio_freq); 1007 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
1029 break; 1008
1009 if (check_mode(t, "VIDIOC_G_TUNER") == -EINVAL)
1010 return 0;
1011 switch_v4l2();
1012
1013 vt->type = t->mode;
1014 if (analog_ops->get_afc)
1015 vt->afc = analog_ops->get_afc(&t->fe);
1016 if (t->mode == V4L2_TUNER_ANALOG_TV)
1017 vt->capability |= V4L2_TUNER_CAP_NORM;
1018 if (t->mode != V4L2_TUNER_RADIO) {
1019 vt->rangelow = tv_range[0] * 16;
1020 vt->rangehigh = tv_range[1] * 16;
1021 return 0;
1022 }
1023
1024 /* radio mode */
1025 vt->rxsubchans =
1026 V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
1027 if (fe_tuner_ops->get_status) {
1028 u32 tuner_status;
1029
1030 fe_tuner_ops->get_status(&t->fe, &tuner_status);
1031 vt->rxsubchans =
1032 (tuner_status & TUNER_STATUS_STEREO) ?
1033 V4L2_TUNER_SUB_STEREO :
1034 V4L2_TUNER_SUB_MONO;
1035 } else {
1036 if (analog_ops->is_stereo) {
1037 vt->rxsubchans =
1038 analog_ops->is_stereo(&t->fe) ?
1039 V4L2_TUNER_SUB_STEREO :
1040 V4L2_TUNER_SUB_MONO;
1030 } 1041 }
1031 case VIDIOC_LOG_STATUS:
1032 if (analog_ops->tuner_status)
1033 analog_ops->tuner_status(&t->fe);
1034 break;
1035 } 1042 }
1043 if (analog_ops->has_signal)
1044 vt->signal = analog_ops->has_signal(&t->fe);
1045 vt->capability |=
1046 V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
1047 vt->audmode = t->audmode;
1048 vt->rangelow = radio_range[0] * 16000;
1049 vt->rangehigh = radio_range[1] * 16000;
1050 return 0;
1051}
1052
1053static int tuner_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1054{
1055 struct tuner *t = to_tuner(sd);
1056 struct i2c_client *client = v4l2_get_subdevdata(sd);
1036 1057
1058 if (check_mode(t, "VIDIOC_S_TUNER") == -EINVAL)
1059 return 0;
1060
1061 switch_v4l2();
1062
1063 /* do nothing unless we're a radio tuner */
1064 if (t->mode != V4L2_TUNER_RADIO)
1065 return 0;
1066 t->audmode = vt->audmode;
1067 set_radio_freq(client, t->radio_freq);
1037 return 0; 1068 return 0;
1038} 1069}
1039 1070
1071static int tuner_log_status(struct v4l2_subdev *sd)
1072{
1073 struct tuner *t = to_tuner(sd);
1074 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
1075
1076 if (analog_ops->tuner_status)
1077 analog_ops->tuner_status(&t->fe);
1078 return 0;
1079}
1080
1081static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg)
1082{
1083 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
1084}
1085
1040static int tuner_suspend(struct i2c_client *c, pm_message_t state) 1086static int tuner_suspend(struct i2c_client *c, pm_message_t state)
1041{ 1087{
1042 struct tuner *t = i2c_get_clientdata(c); 1088 struct tuner *t = to_tuner(i2c_get_clientdata(c));
1043 1089
1044 tuner_dbg("suspend\n"); 1090 tuner_dbg("suspend\n");
1045 /* FIXME: power down ??? */ 1091 /* FIXME: power down ??? */
@@ -1048,7 +1094,7 @@ static int tuner_suspend(struct i2c_client *c, pm_message_t state)
1048 1094
1049static int tuner_resume(struct i2c_client *c) 1095static int tuner_resume(struct i2c_client *c)
1050{ 1096{
1051 struct tuner *t = i2c_get_clientdata(c); 1097 struct tuner *t = to_tuner(i2c_get_clientdata(c));
1052 1098
1053 tuner_dbg("resume\n"); 1099 tuner_dbg("resume\n");
1054 if (V4L2_TUNER_RADIO == t->mode) { 1100 if (V4L2_TUNER_RADIO == t->mode) {
@@ -1061,6 +1107,32 @@ static int tuner_resume(struct i2c_client *c)
1061 return 0; 1107 return 0;
1062} 1108}
1063 1109
1110/* ----------------------------------------------------------------------- */
1111
1112static const struct v4l2_subdev_core_ops tuner_core_ops = {
1113 .log_status = tuner_log_status,
1114 .s_standby = tuner_s_standby,
1115#ifdef CONFIG_VIDEO_ALLOW_V4L1
1116 .ioctl = tuner_ioctl,
1117#endif
1118};
1119
1120static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = {
1121 .s_std = tuner_s_std,
1122 .s_radio = tuner_s_radio,
1123 .g_tuner = tuner_g_tuner,
1124 .s_tuner = tuner_s_tuner,
1125 .s_frequency = tuner_s_frequency,
1126 .g_frequency = tuner_g_frequency,
1127 .s_type_addr = tuner_s_type_addr,
1128 .s_config = tuner_s_config,
1129};
1130
1131static const struct v4l2_subdev_ops tuner_ops = {
1132 .core = &tuner_core_ops,
1133 .tuner = &tuner_tuner_ops,
1134};
1135
1064/* ---------------------------------------------------------------------- */ 1136/* ---------------------------------------------------------------------- */
1065 1137
1066static LIST_HEAD(tuner_list); 1138static LIST_HEAD(tuner_list);
@@ -1109,9 +1181,9 @@ static int tuner_probe(struct i2c_client *client,
1109 t = kzalloc(sizeof(struct tuner), GFP_KERNEL); 1181 t = kzalloc(sizeof(struct tuner), GFP_KERNEL);
1110 if (NULL == t) 1182 if (NULL == t)
1111 return -ENOMEM; 1183 return -ENOMEM;
1184 v4l2_i2c_subdev_init(&t->sd, client, &tuner_ops);
1112 t->i2c = client; 1185 t->i2c = client;
1113 t->name = "(tuner unset)"; 1186 t->name = "(tuner unset)";
1114 i2c_set_clientdata(client, t);
1115 t->type = UNSET; 1187 t->type = UNSET;
1116 t->audmode = V4L2_TUNER_MODE_STEREO; 1188 t->audmode = V4L2_TUNER_MODE_STEREO;
1117 t->mode_mask = T_UNINITIALIZED; 1189 t->mode_mask = T_UNINITIALIZED;
@@ -1261,8 +1333,9 @@ static int tuner_legacy_probe(struct i2c_adapter *adap)
1261 1333
1262static int tuner_remove(struct i2c_client *client) 1334static int tuner_remove(struct i2c_client *client)
1263{ 1335{
1264 struct tuner *t = i2c_get_clientdata(client); 1336 struct tuner *t = to_tuner(i2c_get_clientdata(client));
1265 1337
1338 v4l2_device_unregister_subdev(&t->sd);
1266 tuner_detach(&t->fe); 1339 tuner_detach(&t->fe);
1267 t->fe.analog_demod_priv = NULL; 1340 t->fe.analog_demod_priv = NULL;
1268 1341
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index 3720f0e03a16..d0c794da735b 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -33,8 +33,7 @@
33#include <linux/freezer.h> 33#include <linux/freezer.h>
34 34
35#include <media/tvaudio.h> 35#include <media/tvaudio.h>
36#include <media/v4l2-common.h> 36#include <media/v4l2-device.h>
37#include <media/v4l2-ioctl.h>
38#include <media/v4l2-chip-ident.h> 37#include <media/v4l2-chip-ident.h>
39#include <media/v4l2-i2c-drv-legacy.h> 38#include <media/v4l2-i2c-drv-legacy.h>
40 39
@@ -110,7 +109,7 @@ struct CHIPDESC {
110 109
111/* current state of the chip */ 110/* current state of the chip */
112struct CHIPSTATE { 111struct CHIPSTATE {
113 struct i2c_client *c; 112 struct v4l2_subdev sd;
114 113
115 /* chip-specific description - should point to 114 /* chip-specific description - should point to
116 an entry at CHIPDESC table */ 115 an entry at CHIPDESC table */
@@ -132,6 +131,11 @@ struct CHIPSTATE {
132 int audmode; 131 int audmode;
133}; 132};
134 133
134static inline struct CHIPSTATE *to_state(struct v4l2_subdev *sd)
135{
136 return container_of(sd, struct CHIPSTATE, sd);
137}
138
135/* ---------------------------------------------------------------------- */ 139/* ---------------------------------------------------------------------- */
136/* i2c addresses */ 140/* i2c addresses */
137 141
@@ -152,34 +156,34 @@ I2C_CLIENT_INSMOD;
152 156
153static int chip_write(struct CHIPSTATE *chip, int subaddr, int val) 157static int chip_write(struct CHIPSTATE *chip, int subaddr, int val)
154{ 158{
159 struct v4l2_subdev *sd = &chip->sd;
160 struct i2c_client *c = v4l2_get_subdevdata(sd);
155 unsigned char buffer[2]; 161 unsigned char buffer[2];
156 162
157 if (subaddr < 0) { 163 if (subaddr < 0) {
158 v4l_dbg(1, debug, chip->c, "%s: chip_write: 0x%x\n", 164 v4l2_dbg(1, debug, sd, "chip_write: 0x%x\n", val);
159 chip->c->name, val);
160 chip->shadow.bytes[1] = val; 165 chip->shadow.bytes[1] = val;
161 buffer[0] = val; 166 buffer[0] = val;
162 if (1 != i2c_master_send(chip->c,buffer,1)) { 167 if (1 != i2c_master_send(c, buffer, 1)) {
163 v4l_warn(chip->c, "%s: I/O error (write 0x%x)\n", 168 v4l2_warn(sd, "I/O error (write 0x%x)\n", val);
164 chip->c->name, val);
165 return -1; 169 return -1;
166 } 170 }
167 } else { 171 } else {
168 if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) { 172 if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) {
169 v4l_info(chip->c, 173 v4l2_info(sd,
170 "Tried to access a non-existent register: %d\n", 174 "Tried to access a non-existent register: %d\n",
171 subaddr); 175 subaddr);
172 return -EINVAL; 176 return -EINVAL;
173 } 177 }
174 178
175 v4l_dbg(1, debug, chip->c, "%s: chip_write: reg%d=0x%x\n", 179 v4l2_dbg(1, debug, sd, "chip_write: reg%d=0x%x\n",
176 chip->c->name, subaddr, val); 180 subaddr, val);
177 chip->shadow.bytes[subaddr+1] = val; 181 chip->shadow.bytes[subaddr+1] = val;
178 buffer[0] = subaddr; 182 buffer[0] = subaddr;
179 buffer[1] = val; 183 buffer[1] = val;
180 if (2 != i2c_master_send(chip->c,buffer,2)) { 184 if (2 != i2c_master_send(c, buffer, 2)) {
181 v4l_warn(chip->c, "%s: I/O error (write reg%d=0x%x)\n", 185 v4l2_warn(sd, "I/O error (write reg%d=0x%x)\n",
182 chip->c->name, subaddr, val); 186 subaddr, val);
183 return -1; 187 return -1;
184 } 188 }
185 } 189 }
@@ -189,12 +193,14 @@ static int chip_write(struct CHIPSTATE *chip, int subaddr, int val)
189static int chip_write_masked(struct CHIPSTATE *chip, 193static int chip_write_masked(struct CHIPSTATE *chip,
190 int subaddr, int val, int mask) 194 int subaddr, int val, int mask)
191{ 195{
196 struct v4l2_subdev *sd = &chip->sd;
197
192 if (mask != 0) { 198 if (mask != 0) {
193 if (subaddr < 0) { 199 if (subaddr < 0) {
194 val = (chip->shadow.bytes[1] & ~mask) | (val & mask); 200 val = (chip->shadow.bytes[1] & ~mask) | (val & mask);
195 } else { 201 } else {
196 if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) { 202 if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) {
197 v4l_info(chip->c, 203 v4l2_info(sd,
198 "Tried to access a non-existent register: %d\n", 204 "Tried to access a non-existent register: %d\n",
199 subaddr); 205 subaddr);
200 return -EINVAL; 206 return -EINVAL;
@@ -208,45 +214,51 @@ static int chip_write_masked(struct CHIPSTATE *chip,
208 214
209static int chip_read(struct CHIPSTATE *chip) 215static int chip_read(struct CHIPSTATE *chip)
210{ 216{
217 struct v4l2_subdev *sd = &chip->sd;
218 struct i2c_client *c = v4l2_get_subdevdata(sd);
211 unsigned char buffer; 219 unsigned char buffer;
212 220
213 if (1 != i2c_master_recv(chip->c,&buffer,1)) { 221 if (1 != i2c_master_recv(c, &buffer, 1)) {
214 v4l_warn(chip->c, "%s: I/O error (read)\n", 222 v4l2_warn(sd, "I/O error (read)\n");
215 chip->c->name);
216 return -1; 223 return -1;
217 } 224 }
218 v4l_dbg(1, debug, chip->c, "%s: chip_read: 0x%x\n",chip->c->name, buffer); 225 v4l2_dbg(1, debug, sd, "chip_read: 0x%x\n", buffer);
219 return buffer; 226 return buffer;
220} 227}
221 228
222static int chip_read2(struct CHIPSTATE *chip, int subaddr) 229static int chip_read2(struct CHIPSTATE *chip, int subaddr)
223{ 230{
231 struct v4l2_subdev *sd = &chip->sd;
232 struct i2c_client *c = v4l2_get_subdevdata(sd);
224 unsigned char write[1]; 233 unsigned char write[1];
225 unsigned char read[1]; 234 unsigned char read[1];
226 struct i2c_msg msgs[2] = { 235 struct i2c_msg msgs[2] = {
227 { chip->c->addr, 0, 1, write }, 236 { c->addr, 0, 1, write },
228 { chip->c->addr, I2C_M_RD, 1, read } 237 { c->addr, I2C_M_RD, 1, read }
229 }; 238 };
239
230 write[0] = subaddr; 240 write[0] = subaddr;
231 241
232 if (2 != i2c_transfer(chip->c->adapter,msgs,2)) { 242 if (2 != i2c_transfer(c->adapter, msgs, 2)) {
233 v4l_warn(chip->c, "%s: I/O error (read2)\n", chip->c->name); 243 v4l2_warn(sd, "I/O error (read2)\n");
234 return -1; 244 return -1;
235 } 245 }
236 v4l_dbg(1, debug, chip->c, "%s: chip_read2: reg%d=0x%x\n", 246 v4l2_dbg(1, debug, sd, "chip_read2: reg%d=0x%x\n",
237 chip->c->name, subaddr,read[0]); 247 subaddr, read[0]);
238 return read[0]; 248 return read[0];
239} 249}
240 250
241static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd) 251static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
242{ 252{
253 struct v4l2_subdev *sd = &chip->sd;
254 struct i2c_client *c = v4l2_get_subdevdata(sd);
243 int i; 255 int i;
244 256
245 if (0 == cmd->count) 257 if (0 == cmd->count)
246 return 0; 258 return 0;
247 259
248 if (cmd->count + cmd->bytes[0] - 1 >= ARRAY_SIZE(chip->shadow.bytes)) { 260 if (cmd->count + cmd->bytes[0] - 1 >= ARRAY_SIZE(chip->shadow.bytes)) {
249 v4l_info(chip->c, 261 v4l2_info(sd,
250 "Tried to access a non-existent register range: %d to %d\n", 262 "Tried to access a non-existent register range: %d to %d\n",
251 cmd->bytes[0] + 1, cmd->bytes[0] + cmd->count - 1); 263 cmd->bytes[0] + 1, cmd->bytes[0] + cmd->count - 1);
252 return -EINVAL; 264 return -EINVAL;
@@ -255,19 +267,19 @@ static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
255 /* FIXME: it seems that the shadow bytes are wrong bellow !*/ 267 /* FIXME: it seems that the shadow bytes are wrong bellow !*/
256 268
257 /* update our shadow register set; print bytes if (debug > 0) */ 269 /* update our shadow register set; print bytes if (debug > 0) */
258 v4l_dbg(1, debug, chip->c, "%s: chip_cmd(%s): reg=%d, data:", 270 v4l2_dbg(1, debug, sd, "chip_cmd(%s): reg=%d, data:",
259 chip->c->name, name,cmd->bytes[0]); 271 name, cmd->bytes[0]);
260 for (i = 1; i < cmd->count; i++) { 272 for (i = 1; i < cmd->count; i++) {
261 if (debug) 273 if (debug)
262 printk(" 0x%x",cmd->bytes[i]); 274 printk(KERN_CONT " 0x%x", cmd->bytes[i]);
263 chip->shadow.bytes[i+cmd->bytes[0]] = cmd->bytes[i]; 275 chip->shadow.bytes[i+cmd->bytes[0]] = cmd->bytes[i];
264 } 276 }
265 if (debug) 277 if (debug)
266 printk("\n"); 278 printk(KERN_CONT "\n");
267 279
268 /* send data to the chip */ 280 /* send data to the chip */
269 if (cmd->count != i2c_master_send(chip->c,cmd->bytes,cmd->count)) { 281 if (cmd->count != i2c_master_send(c, cmd->bytes, cmd->count)) {
270 v4l_warn(chip->c, "%s: I/O error (%s)\n", chip->c->name, name); 282 v4l2_warn(sd, "I/O error (%s)\n", name);
271 return -1; 283 return -1;
272 } 284 }
273 return 0; 285 return 0;
@@ -290,9 +302,10 @@ static int chip_thread(void *data)
290{ 302{
291 struct CHIPSTATE *chip = data; 303 struct CHIPSTATE *chip = data;
292 struct CHIPDESC *desc = chip->desc; 304 struct CHIPDESC *desc = chip->desc;
305 struct v4l2_subdev *sd = &chip->sd;
293 int mode; 306 int mode;
294 307
295 v4l_dbg(1, debug, chip->c, "%s: thread started\n", chip->c->name); 308 v4l2_dbg(1, debug, sd, "thread started\n");
296 set_freezable(); 309 set_freezable();
297 for (;;) { 310 for (;;) {
298 set_current_state(TASK_INTERRUPTIBLE); 311 set_current_state(TASK_INTERRUPTIBLE);
@@ -302,7 +315,7 @@ static int chip_thread(void *data)
302 try_to_freeze(); 315 try_to_freeze();
303 if (kthread_should_stop()) 316 if (kthread_should_stop())
304 break; 317 break;
305 v4l_dbg(1, debug, chip->c, "%s: thread wakeup\n", chip->c->name); 318 v4l2_dbg(1, debug, sd, "thread wakeup\n");
306 319
307 /* don't do anything for radio or if mode != auto */ 320 /* don't do anything for radio or if mode != auto */
308 if (chip->radio || chip->mode != 0) 321 if (chip->radio || chip->mode != 0)
@@ -314,8 +327,7 @@ static int chip_thread(void *data)
314 continue; 327 continue;
315 328
316 /* chip detected a new audio mode - set it */ 329 /* chip detected a new audio mode - set it */
317 v4l_dbg(1, debug, chip->c, "%s: thread checkmode\n", 330 v4l2_dbg(1, debug, sd, "thread checkmode\n");
318 chip->c->name);
319 331
320 chip->prevmode = mode; 332 chip->prevmode = mode;
321 333
@@ -334,7 +346,7 @@ static int chip_thread(void *data)
334 mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000)); 346 mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000));
335 } 347 }
336 348
337 v4l_dbg(1, debug, chip->c, "%s: thread exiting\n", chip->c->name); 349 v4l2_dbg(1, debug, sd, "thread exiting\n");
338 return 0; 350 return 0;
339} 351}
340 352
@@ -363,6 +375,7 @@ static int chip_thread(void *data)
363 375
364static int tda9840_getmode(struct CHIPSTATE *chip) 376static int tda9840_getmode(struct CHIPSTATE *chip)
365{ 377{
378 struct v4l2_subdev *sd = &chip->sd;
366 int val, mode; 379 int val, mode;
367 380
368 val = chip_read(chip); 381 val = chip_read(chip);
@@ -372,7 +385,7 @@ static int tda9840_getmode(struct CHIPSTATE *chip)
372 if (val & TDA9840_ST_STEREO) 385 if (val & TDA9840_ST_STEREO)
373 mode |= V4L2_TUNER_MODE_STEREO; 386 mode |= V4L2_TUNER_MODE_STEREO;
374 387
375 v4l_dbg(1, debug, chip->c, "tda9840_getmode(): raw chip read: %d, return: %d\n", 388 v4l2_dbg(1, debug, sd, "tda9840_getmode(): raw chip read: %d, return: %d\n",
376 val, mode); 389 val, mode);
377 return mode; 390 return mode;
378} 391}
@@ -668,6 +681,7 @@ static void tda985x_setmode(struct CHIPSTATE *chip, int mode)
668 681
669static int tda9873_getmode(struct CHIPSTATE *chip) 682static int tda9873_getmode(struct CHIPSTATE *chip)
670{ 683{
684 struct v4l2_subdev *sd = &chip->sd;
671 int val,mode; 685 int val,mode;
672 686
673 val = chip_read(chip); 687 val = chip_read(chip);
@@ -676,23 +690,24 @@ static int tda9873_getmode(struct CHIPSTATE *chip)
676 mode |= V4L2_TUNER_MODE_STEREO; 690 mode |= V4L2_TUNER_MODE_STEREO;
677 if (val & TDA9873_DUAL) 691 if (val & TDA9873_DUAL)
678 mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; 692 mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2;
679 v4l_dbg(1, debug, chip->c, "tda9873_getmode(): raw chip read: %d, return: %d\n", 693 v4l2_dbg(1, debug, sd, "tda9873_getmode(): raw chip read: %d, return: %d\n",
680 val, mode); 694 val, mode);
681 return mode; 695 return mode;
682} 696}
683 697
684static void tda9873_setmode(struct CHIPSTATE *chip, int mode) 698static void tda9873_setmode(struct CHIPSTATE *chip, int mode)
685{ 699{
700 struct v4l2_subdev *sd = &chip->sd;
686 int sw_data = chip->shadow.bytes[TDA9873_SW+1] & ~ TDA9873_TR_MASK; 701 int sw_data = chip->shadow.bytes[TDA9873_SW+1] & ~ TDA9873_TR_MASK;
687 /* int adj_data = chip->shadow.bytes[TDA9873_AD+1] ; */ 702 /* int adj_data = chip->shadow.bytes[TDA9873_AD+1] ; */
688 703
689 if ((sw_data & TDA9873_INP_MASK) != TDA9873_INTERNAL) { 704 if ((sw_data & TDA9873_INP_MASK) != TDA9873_INTERNAL) {
690 v4l_dbg(1, debug, chip->c, "tda9873_setmode(): external input\n"); 705 v4l2_dbg(1, debug, sd, "tda9873_setmode(): external input\n");
691 return; 706 return;
692 } 707 }
693 708
694 v4l_dbg(1, debug, chip->c, "tda9873_setmode(): chip->shadow.bytes[%d] = %d\n", TDA9873_SW+1, chip->shadow.bytes[TDA9873_SW+1]); 709 v4l2_dbg(1, debug, sd, "tda9873_setmode(): chip->shadow.bytes[%d] = %d\n", TDA9873_SW+1, chip->shadow.bytes[TDA9873_SW+1]);
695 v4l_dbg(1, debug, chip->c, "tda9873_setmode(): sw_data = %d\n", sw_data); 710 v4l2_dbg(1, debug, sd, "tda9873_setmode(): sw_data = %d\n", sw_data);
696 711
697 switch (mode) { 712 switch (mode) {
698 case V4L2_TUNER_MODE_MONO: 713 case V4L2_TUNER_MODE_MONO:
@@ -713,7 +728,7 @@ static void tda9873_setmode(struct CHIPSTATE *chip, int mode)
713 } 728 }
714 729
715 chip_write(chip, TDA9873_SW, sw_data); 730 chip_write(chip, TDA9873_SW, sw_data);
716 v4l_dbg(1, debug, chip->c, "tda9873_setmode(): req. mode %d; chip_write: %d\n", 731 v4l2_dbg(1, debug, sd, "tda9873_setmode(): req. mode %d; chip_write: %d\n",
717 mode, sw_data); 732 mode, sw_data);
718} 733}
719 734
@@ -822,6 +837,8 @@ static struct tda9874a_MODES {
822 837
823static int tda9874a_setup(struct CHIPSTATE *chip) 838static int tda9874a_setup(struct CHIPSTATE *chip)
824{ 839{
840 struct v4l2_subdev *sd = &chip->sd;
841
825 chip_write(chip, TDA9874A_AGCGR, 0x00); /* 0 dB */ 842 chip_write(chip, TDA9874A_AGCGR, 0x00); /* 0 dB */
826 chip_write(chip, TDA9874A_GCONR, tda9874a_GCONR); 843 chip_write(chip, TDA9874A_GCONR, tda9874a_GCONR);
827 chip_write(chip, TDA9874A_MSR, (tda9874a_mode) ? 0x03:0x02); 844 chip_write(chip, TDA9874A_MSR, (tda9874a_mode) ? 0x03:0x02);
@@ -852,13 +869,14 @@ static int tda9874a_setup(struct CHIPSTATE *chip)
852 chip_write(chip, TDA9874A_SDACOSR, (tda9874a_mode) ? 0x81:0x80); 869 chip_write(chip, TDA9874A_SDACOSR, (tda9874a_mode) ? 0x81:0x80);
853 chip_write(chip, TDA9874A_AOSR, 0x00); /* or 0x10 */ 870 chip_write(chip, TDA9874A_AOSR, 0x00); /* or 0x10 */
854 } 871 }
855 v4l_dbg(1, debug, chip->c, "tda9874a_setup(): %s [0x%02X].\n", 872 v4l2_dbg(1, debug, sd, "tda9874a_setup(): %s [0x%02X].\n",
856 tda9874a_modelist[tda9874a_STD].name,tda9874a_STD); 873 tda9874a_modelist[tda9874a_STD].name,tda9874a_STD);
857 return 1; 874 return 1;
858} 875}
859 876
860static int tda9874a_getmode(struct CHIPSTATE *chip) 877static int tda9874a_getmode(struct CHIPSTATE *chip)
861{ 878{
879 struct v4l2_subdev *sd = &chip->sd;
862 int dsr,nsr,mode; 880 int dsr,nsr,mode;
863 int necr; /* just for debugging */ 881 int necr; /* just for debugging */
864 882
@@ -895,16 +913,18 @@ static int tda9874a_getmode(struct CHIPSTATE *chip)
895 mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; 913 mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2;
896 } 914 }
897 915
898 v4l_dbg(1, debug, chip->c, "tda9874a_getmode(): DSR=0x%X, NSR=0x%X, NECR=0x%X, return: %d.\n", 916 v4l2_dbg(1, debug, sd, "tda9874a_getmode(): DSR=0x%X, NSR=0x%X, NECR=0x%X, return: %d.\n",
899 dsr, nsr, necr, mode); 917 dsr, nsr, necr, mode);
900 return mode; 918 return mode;
901} 919}
902 920
903static void tda9874a_setmode(struct CHIPSTATE *chip, int mode) 921static void tda9874a_setmode(struct CHIPSTATE *chip, int mode)
904{ 922{
923 struct v4l2_subdev *sd = &chip->sd;
924
905 /* Disable/enable NICAM auto-muting (based on DSR.RSSF status bit). */ 925 /* Disable/enable NICAM auto-muting (based on DSR.RSSF status bit). */
906 /* If auto-muting is disabled, we can hear a signal of degrading quality. */ 926 /* If auto-muting is disabled, we can hear a signal of degrading quality. */
907 if(tda9874a_mode) { 927 if (tda9874a_mode) {
908 if(chip->shadow.bytes[MAXREGS-2] & 0x20) /* DSR.RSSF=1 */ 928 if(chip->shadow.bytes[MAXREGS-2] & 0x20) /* DSR.RSSF=1 */
909 tda9874a_NCONR &= 0xfe; /* enable */ 929 tda9874a_NCONR &= 0xfe; /* enable */
910 else 930 else
@@ -941,7 +961,7 @@ static void tda9874a_setmode(struct CHIPSTATE *chip, int mode)
941 chip_write(chip, TDA9874A_AOSR, aosr); 961 chip_write(chip, TDA9874A_AOSR, aosr);
942 chip_write(chip, TDA9874A_MDACOSR, mdacosr); 962 chip_write(chip, TDA9874A_MDACOSR, mdacosr);
943 963
944 v4l_dbg(1, debug, chip->c, "tda9874a_setmode(): req. mode %d; AOSR=0x%X, MDACOSR=0x%X.\n", 964 v4l2_dbg(1, debug, sd, "tda9874a_setmode(): req. mode %d; AOSR=0x%X, MDACOSR=0x%X.\n",
945 mode, aosr, mdacosr); 965 mode, aosr, mdacosr);
946 966
947 } else { /* dic == 0x07 */ 967 } else { /* dic == 0x07 */
@@ -976,13 +996,14 @@ static void tda9874a_setmode(struct CHIPSTATE *chip, int mode)
976 chip_write(chip, TDA9874A_FMMR, fmmr); 996 chip_write(chip, TDA9874A_FMMR, fmmr);
977 chip_write(chip, TDA9874A_AOSR, aosr); 997 chip_write(chip, TDA9874A_AOSR, aosr);
978 998
979 v4l_dbg(1, debug, chip->c, "tda9874a_setmode(): req. mode %d; FMMR=0x%X, AOSR=0x%X.\n", 999 v4l2_dbg(1, debug, sd, "tda9874a_setmode(): req. mode %d; FMMR=0x%X, AOSR=0x%X.\n",
980 mode, fmmr, aosr); 1000 mode, fmmr, aosr);
981 } 1001 }
982} 1002}
983 1003
984static int tda9874a_checkit(struct CHIPSTATE *chip) 1004static int tda9874a_checkit(struct CHIPSTATE *chip)
985{ 1005{
1006 struct v4l2_subdev *sd = &chip->sd;
986 int dic,sic; /* device id. and software id. codes */ 1007 int dic,sic; /* device id. and software id. codes */
987 1008
988 if(-1 == (dic = chip_read2(chip,TDA9874A_DIC))) 1009 if(-1 == (dic = chip_read2(chip,TDA9874A_DIC)))
@@ -990,10 +1011,10 @@ static int tda9874a_checkit(struct CHIPSTATE *chip)
990 if(-1 == (sic = chip_read2(chip,TDA9874A_SIC))) 1011 if(-1 == (sic = chip_read2(chip,TDA9874A_SIC)))
991 return 0; 1012 return 0;
992 1013
993 v4l_dbg(1, debug, chip->c, "tda9874a_checkit(): DIC=0x%X, SIC=0x%X.\n", dic, sic); 1014 v4l2_dbg(1, debug, sd, "tda9874a_checkit(): DIC=0x%X, SIC=0x%X.\n", dic, sic);
994 1015
995 if((dic == 0x11)||(dic == 0x07)) { 1016 if((dic == 0x11)||(dic == 0x07)) {
996 v4l_info(chip->c, "found tda9874%s.\n", (dic == 0x11) ? "a":"h"); 1017 v4l2_info(sd, "found tda9874%s.\n", (dic == 0x11) ? "a" : "h");
997 tda9874a_dic = dic; /* remember device id. */ 1018 tda9874a_dic = dic; /* remember device id. */
998 return 1; 1019 return 1;
999 } 1020 }
@@ -1113,12 +1134,12 @@ static int tda8425_shift12(int val) { return (val >> 12) | 0xf0; }
1113static int tda8425_initialize(struct CHIPSTATE *chip) 1134static int tda8425_initialize(struct CHIPSTATE *chip)
1114{ 1135{
1115 struct CHIPDESC *desc = chip->desc; 1136 struct CHIPDESC *desc = chip->desc;
1137 struct i2c_client *c = v4l2_get_subdevdata(&chip->sd);
1116 int inputmap[4] = { /* tuner */ TDA8425_S1_CH2, /* radio */ TDA8425_S1_CH1, 1138 int inputmap[4] = { /* tuner */ TDA8425_S1_CH2, /* radio */ TDA8425_S1_CH1,
1117 /* extern */ TDA8425_S1_CH1, /* intern */ TDA8425_S1_OFF}; 1139 /* extern */ TDA8425_S1_CH1, /* intern */ TDA8425_S1_OFF};
1118 1140
1119 if (chip->c->adapter->id == I2C_HW_B_RIVA) { 1141 if (c->adapter->id == I2C_HW_B_RIVA)
1120 memcpy (desc->inputmap, inputmap, sizeof (inputmap)); 1142 memcpy(desc->inputmap, inputmap, sizeof(inputmap));
1121 }
1122 return 0; 1143 return 0;
1123} 1144}
1124 1145
@@ -1215,9 +1236,11 @@ static audiocmd ta8874z_sub = {2, { TA8874Z_MODE_SUB, TA8874Z_SEPARATION_DEFAULT
1215 1236
1216static void ta8874z_setmode(struct CHIPSTATE *chip, int mode) 1237static void ta8874z_setmode(struct CHIPSTATE *chip, int mode)
1217{ 1238{
1239 struct v4l2_subdev *sd = &chip->sd;
1218 int update = 1; 1240 int update = 1;
1219 audiocmd *t = NULL; 1241 audiocmd *t = NULL;
1220 v4l_dbg(1, debug, chip->c, "ta8874z_setmode(): mode: 0x%02x\n", mode); 1242
1243 v4l2_dbg(1, debug, sd, "ta8874z_setmode(): mode: 0x%02x\n", mode);
1221 1244
1222 switch(mode){ 1245 switch(mode){
1223 case V4L2_TUNER_MODE_MONO: 1246 case V4L2_TUNER_MODE_MONO:
@@ -1479,142 +1502,11 @@ static struct CHIPDESC chiplist[] = {
1479 1502
1480 1503
1481/* ---------------------------------------------------------------------- */ 1504/* ---------------------------------------------------------------------- */
1482/* i2c registration */
1483
1484static int chip_probe(struct i2c_client *client, const struct i2c_device_id *id)
1485{
1486 struct CHIPSTATE *chip;
1487 struct CHIPDESC *desc;
1488
1489 if (debug) {
1490 printk(KERN_INFO "tvaudio: TV audio decoder + audio/video mux driver\n");
1491 printk(KERN_INFO "tvaudio: known chips: ");
1492 for (desc = chiplist; desc->name != NULL; desc++)
1493 printk("%s%s", (desc == chiplist) ? "" : ", ", desc->name);
1494 printk("\n");
1495 }
1496
1497 chip = kzalloc(sizeof(*chip),GFP_KERNEL);
1498 if (!chip)
1499 return -ENOMEM;
1500 chip->c = client;
1501 i2c_set_clientdata(client, chip);
1502 1505
1503 /* find description for the chip */ 1506static int tvaudio_g_ctrl(struct v4l2_subdev *sd,
1504 v4l_dbg(1, debug, client, "chip found @ 0x%x\n", client->addr<<1);
1505 for (desc = chiplist; desc->name != NULL; desc++) {
1506 if (0 == *(desc->insmodopt))
1507 continue;
1508 if (client->addr < desc->addr_lo ||
1509 client->addr > desc->addr_hi)
1510 continue;
1511 if (desc->checkit && !desc->checkit(chip))
1512 continue;
1513 break;
1514 }
1515 if (desc->name == NULL) {
1516 v4l_dbg(1, debug, client, "no matching chip description found\n");
1517 kfree(chip);
1518 return -EIO;
1519 }
1520 v4l_info(client, "%s found @ 0x%x (%s)\n", desc->name, client->addr<<1, client->adapter->name);
1521 if (desc->flags) {
1522 v4l_dbg(1, debug, client, "matches:%s%s%s.\n",
1523 (desc->flags & CHIP_HAS_VOLUME) ? " volume" : "",
1524 (desc->flags & CHIP_HAS_BASSTREBLE) ? " bass/treble" : "",
1525 (desc->flags & CHIP_HAS_INPUTSEL) ? " audiomux" : "");
1526 }
1527
1528 /* fill required data structures */
1529 if (!id)
1530 strlcpy(client->name, desc->name, I2C_NAME_SIZE);
1531 chip->desc = desc;
1532 chip->shadow.count = desc->registers+1;
1533 chip->prevmode = -1;
1534 chip->audmode = V4L2_TUNER_MODE_LANG1;
1535
1536 /* initialization */
1537 if (desc->initialize != NULL)
1538 desc->initialize(chip);
1539 else
1540 chip_cmd(chip,"init",&desc->init);
1541
1542 if (desc->flags & CHIP_HAS_VOLUME) {
1543 if (!desc->volfunc) {
1544 /* This shouldn't be happen. Warn user, but keep working
1545 without volume controls
1546 */
1547 v4l_info(chip->c, "volume callback undefined!\n");
1548 desc->flags &= ~CHIP_HAS_VOLUME;
1549 } else {
1550 chip->left = desc->leftinit ? desc->leftinit : 65535;
1551 chip->right = desc->rightinit ? desc->rightinit : 65535;
1552 chip_write(chip, desc->leftreg,
1553 desc->volfunc(chip->left));
1554 chip_write(chip, desc->rightreg,
1555 desc->volfunc(chip->right));
1556 }
1557 }
1558 if (desc->flags & CHIP_HAS_BASSTREBLE) {
1559 if (!desc->bassfunc || !desc->treblefunc) {
1560 /* This shouldn't be happen. Warn user, but keep working
1561 without bass/treble controls
1562 */
1563 v4l_info(chip->c, "bass/treble callbacks undefined!\n");
1564 desc->flags &= ~CHIP_HAS_BASSTREBLE;
1565 } else {
1566 chip->treble = desc->trebleinit ?
1567 desc->trebleinit : 32768;
1568 chip->bass = desc->bassinit ?
1569 desc->bassinit : 32768;
1570 chip_write(chip, desc->bassreg,
1571 desc->bassfunc(chip->bass));
1572 chip_write(chip, desc->treblereg,
1573 desc->treblefunc(chip->treble));
1574 }
1575 }
1576
1577 chip->thread = NULL;
1578 if (desc->flags & CHIP_NEED_CHECKMODE) {
1579 if (!desc->getmode || !desc->setmode) {
1580 /* This shouldn't be happen. Warn user, but keep working
1581 without kthread
1582 */
1583 v4l_info(chip->c, "set/get mode callbacks undefined!\n");
1584 return 0;
1585 }
1586 /* start async thread */
1587 init_timer(&chip->wt);
1588 chip->wt.function = chip_thread_wake;
1589 chip->wt.data = (unsigned long)chip;
1590 chip->thread = kthread_run(chip_thread, chip, chip->c->name);
1591 if (IS_ERR(chip->thread)) {
1592 v4l_warn(chip->c, "%s: failed to create kthread\n",
1593 chip->c->name);
1594 chip->thread = NULL;
1595 }
1596 }
1597 return 0;
1598}
1599
1600static int chip_remove(struct i2c_client *client)
1601{
1602 struct CHIPSTATE *chip = i2c_get_clientdata(client);
1603
1604 del_timer_sync(&chip->wt);
1605 if (chip->thread) {
1606 /* shutdown async thread */
1607 kthread_stop(chip->thread);
1608 chip->thread = NULL;
1609 }
1610
1611 kfree(chip);
1612 return 0;
1613}
1614
1615static int tvaudio_get_ctrl(struct CHIPSTATE *chip,
1616 struct v4l2_control *ctrl) 1507 struct v4l2_control *ctrl)
1617{ 1508{
1509 struct CHIPSTATE *chip = to_state(sd);
1618 struct CHIPDESC *desc = chip->desc; 1510 struct CHIPDESC *desc = chip->desc;
1619 1511
1620 switch (ctrl->id) { 1512 switch (ctrl->id) {
@@ -1652,9 +1544,10 @@ static int tvaudio_get_ctrl(struct CHIPSTATE *chip,
1652 return -EINVAL; 1544 return -EINVAL;
1653} 1545}
1654 1546
1655static int tvaudio_set_ctrl(struct CHIPSTATE *chip, 1547static int tvaudio_s_ctrl(struct v4l2_subdev *sd,
1656 struct v4l2_control *ctrl) 1548 struct v4l2_control *ctrl)
1657{ 1549{
1550 struct CHIPSTATE *chip = to_state(sd);
1658 struct CHIPDESC *desc = chip->desc; 1551 struct CHIPDESC *desc = chip->desc;
1659 1552
1660 switch (ctrl->id) { 1553 switch (ctrl->id) {
@@ -1726,161 +1619,327 @@ static int tvaudio_set_ctrl(struct CHIPSTATE *chip,
1726/* ---------------------------------------------------------------------- */ 1619/* ---------------------------------------------------------------------- */
1727/* video4linux interface */ 1620/* video4linux interface */
1728 1621
1729static int chip_command(struct i2c_client *client, 1622static int tvaudio_s_radio(struct v4l2_subdev *sd)
1730 unsigned int cmd, void *arg)
1731{ 1623{
1732 struct CHIPSTATE *chip = i2c_get_clientdata(client); 1624 struct CHIPSTATE *chip = to_state(sd);
1733 struct CHIPDESC *desc = chip->desc;
1734 1625
1735 if (debug > 0) { 1626 chip->radio = 1;
1736 v4l_i2c_print_ioctl(chip->c, cmd); 1627 chip->watch_stereo = 0;
1737 printk("\n"); 1628 /* del_timer(&chip->wt); */
1629 return 0;
1630}
1631
1632static int tvaudio_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
1633{
1634 struct CHIPSTATE *chip = to_state(sd);
1635 struct CHIPDESC *desc = chip->desc;
1636
1637 switch (qc->id) {
1638 case V4L2_CID_AUDIO_MUTE:
1639 break;
1640 case V4L2_CID_AUDIO_VOLUME:
1641 case V4L2_CID_AUDIO_BALANCE:
1642 if (!(desc->flags & CHIP_HAS_VOLUME))
1643 return -EINVAL;
1644 break;
1645 case V4L2_CID_AUDIO_BASS:
1646 case V4L2_CID_AUDIO_TREBLE:
1647 if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1648 return -EINVAL;
1649 break;
1650 default:
1651 return -EINVAL;
1738 } 1652 }
1653 return v4l2_ctrl_query_fill_std(qc);
1654}
1655
1656static int tvaudio_s_routing(struct v4l2_subdev *sd, const struct v4l2_routing *rt)
1657{
1658 struct CHIPSTATE *chip = to_state(sd);
1659 struct CHIPDESC *desc = chip->desc;
1739 1660
1740 switch (cmd) { 1661 if (!(desc->flags & CHIP_HAS_INPUTSEL) || rt->input >= 4)
1741 case AUDC_SET_RADIO: 1662 return -EINVAL;
1742 chip->radio = 1; 1663 /* There are four inputs: tuner, radio, extern and intern. */
1664 chip->input = rt->input;
1665 if (chip->muted)
1666 return 0;
1667 chip_write_masked(chip, desc->inputreg,
1668 desc->inputmap[chip->input], desc->inputmask);
1669 return 0;
1670}
1671
1672static int tvaudio_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1673{
1674 struct CHIPSTATE *chip = to_state(sd);
1675 struct CHIPDESC *desc = chip->desc;
1676 int mode = 0;
1677
1678 if (chip->radio)
1679 return 0;
1680 switch (vt->audmode) {
1681 case V4L2_TUNER_MODE_MONO:
1682 case V4L2_TUNER_MODE_STEREO:
1683 case V4L2_TUNER_MODE_LANG1:
1684 case V4L2_TUNER_MODE_LANG2:
1685 mode = vt->audmode;
1686 break;
1687 case V4L2_TUNER_MODE_LANG1_LANG2:
1688 mode = V4L2_TUNER_MODE_STEREO;
1689 break;
1690 default:
1691 return -EINVAL;
1692 }
1693 chip->audmode = vt->audmode;
1694
1695 if (desc->setmode && mode) {
1743 chip->watch_stereo = 0; 1696 chip->watch_stereo = 0;
1744 /* del_timer(&chip->wt); */ 1697 /* del_timer(&chip->wt); */
1745 break; 1698 chip->mode = mode;
1746 /* --- v4l ioctls --- */ 1699 desc->setmode(chip, mode);
1747 /* take care: bttv does userspace copying, we'll get a
1748 kernel pointer here... */
1749 case VIDIOC_QUERYCTRL:
1750 {
1751 struct v4l2_queryctrl *qc = arg;
1752
1753 switch (qc->id) {
1754 case V4L2_CID_AUDIO_MUTE:
1755 break;
1756 case V4L2_CID_AUDIO_VOLUME:
1757 case V4L2_CID_AUDIO_BALANCE:
1758 if (!(desc->flags & CHIP_HAS_VOLUME))
1759 return -EINVAL;
1760 break;
1761 case V4L2_CID_AUDIO_BASS:
1762 case V4L2_CID_AUDIO_TREBLE:
1763 if (!(desc->flags & CHIP_HAS_BASSTREBLE))
1764 return -EINVAL;
1765 break;
1766 default:
1767 return -EINVAL;
1768 }
1769 return v4l2_ctrl_query_fill_std(qc);
1770 } 1700 }
1771 case VIDIOC_S_CTRL: 1701 return 0;
1772 return tvaudio_set_ctrl(chip, arg); 1702}
1773 1703
1774 case VIDIOC_G_CTRL: 1704static int tvaudio_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1775 return tvaudio_get_ctrl(chip, arg); 1705{
1776 case VIDIOC_INT_G_AUDIO_ROUTING: 1706 struct CHIPSTATE *chip = to_state(sd);
1777 { 1707 struct CHIPDESC *desc = chip->desc;
1778 struct v4l2_routing *rt = arg; 1708 int mode = V4L2_TUNER_MODE_MONO;
1779 1709
1780 rt->input = chip->input; 1710 if (chip->radio)
1781 rt->output = 0; 1711 return 0;
1782 break; 1712 vt->audmode = chip->audmode;
1713 vt->rxsubchans = 0;
1714 vt->capability = V4L2_TUNER_CAP_STEREO |
1715 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
1716
1717 if (desc->getmode)
1718 mode = desc->getmode(chip);
1719
1720 if (mode & V4L2_TUNER_MODE_MONO)
1721 vt->rxsubchans |= V4L2_TUNER_SUB_MONO;
1722 if (mode & V4L2_TUNER_MODE_STEREO)
1723 vt->rxsubchans |= V4L2_TUNER_SUB_STEREO;
1724 /* Note: for SAP it should be mono/lang2 or stereo/lang2.
1725 When this module is converted fully to v4l2, then this
1726 should change for those chips that can detect SAP. */
1727 if (mode & V4L2_TUNER_MODE_LANG1)
1728 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 |
1729 V4L2_TUNER_SUB_LANG2;
1730 return 0;
1731}
1732
1733static int tvaudio_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
1734{
1735 struct CHIPSTATE *chip = to_state(sd);
1736
1737 chip->radio = 0;
1738 return 0;
1739}
1740
1741static int tvaudio_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
1742{
1743 struct CHIPSTATE *chip = to_state(sd);
1744 struct CHIPDESC *desc = chip->desc;
1745
1746 chip->mode = 0; /* automatic */
1747
1748 /* For chips that provide getmode and setmode, and doesn't
1749 automatically follows the stereo carrier, a kthread is
1750 created to set the audio standard. In this case, when then
1751 the video channel is changed, tvaudio starts on MONO mode.
1752 After waiting for 2 seconds, the kernel thread is called,
1753 to follow whatever audio standard is pointed by the
1754 audio carrier.
1755 */
1756 if (chip->thread) {
1757 desc->setmode(chip, V4L2_TUNER_MODE_MONO);
1758 if (chip->prevmode != V4L2_TUNER_MODE_MONO)
1759 chip->prevmode = -1; /* reset previous mode */
1760 mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000));
1783 } 1761 }
1784 case VIDIOC_INT_S_AUDIO_ROUTING: 1762 return 0;
1785 { 1763}
1786 struct v4l2_routing *rt = arg;
1787 1764
1788 if (!(desc->flags & CHIP_HAS_INPUTSEL) || rt->input >= 4) 1765static int tvaudio_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
1789 return -EINVAL; 1766{
1790 /* There are four inputs: tuner, radio, extern and intern. */ 1767 struct i2c_client *client = v4l2_get_subdevdata(sd);
1791 chip->input = rt->input; 1768
1792 if (chip->muted) 1769 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TVAUDIO, 0);
1793 break; 1770}
1794 chip_write_masked(chip, desc->inputreg, 1771
1795 desc->inputmap[chip->input], desc->inputmask); 1772static int tvaudio_command(struct i2c_client *client, unsigned cmd, void *arg)
1796 break; 1773{
1774 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
1775}
1776
1777/* ----------------------------------------------------------------------- */
1778
1779static const struct v4l2_subdev_core_ops tvaudio_core_ops = {
1780 .g_chip_ident = tvaudio_g_chip_ident,
1781 .queryctrl = tvaudio_queryctrl,
1782 .g_ctrl = tvaudio_g_ctrl,
1783 .s_ctrl = tvaudio_s_ctrl,
1784};
1785
1786static const struct v4l2_subdev_tuner_ops tvaudio_tuner_ops = {
1787 .s_radio = tvaudio_s_radio,
1788 .s_frequency = tvaudio_s_frequency,
1789 .s_std = tvaudio_s_std,
1790 .s_tuner = tvaudio_s_tuner,
1791 .s_tuner = tvaudio_g_tuner,
1792};
1793
1794static const struct v4l2_subdev_audio_ops tvaudio_audio_ops = {
1795 .s_routing = tvaudio_s_routing,
1796};
1797
1798static const struct v4l2_subdev_ops tvaudio_ops = {
1799 .core = &tvaudio_core_ops,
1800 .tuner = &tvaudio_tuner_ops,
1801 .audio = &tvaudio_audio_ops,
1802};
1803
1804/* ----------------------------------------------------------------------- */
1805
1806
1807/* i2c registration */
1808
1809static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *id)
1810{
1811 struct CHIPSTATE *chip;
1812 struct CHIPDESC *desc;
1813 struct v4l2_subdev *sd;
1814
1815 if (debug) {
1816 printk(KERN_INFO "tvaudio: TV audio decoder + audio/video mux driver\n");
1817 printk(KERN_INFO "tvaudio: known chips: ");
1818 for (desc = chiplist; desc->name != NULL; desc++)
1819 printk("%s%s", (desc == chiplist) ? "" : ", ", desc->name);
1820 printk("\n");
1797 } 1821 }
1798 case VIDIOC_S_TUNER:
1799 {
1800 struct v4l2_tuner *vt = arg;
1801 int mode = 0;
1802 1822
1803 if (chip->radio) 1823 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
1804 break; 1824 if (!chip)
1805 switch (vt->audmode) { 1825 return -ENOMEM;
1806 case V4L2_TUNER_MODE_MONO: 1826 sd = &chip->sd;
1807 case V4L2_TUNER_MODE_STEREO: 1827 v4l2_i2c_subdev_init(sd, client, &tvaudio_ops);
1808 case V4L2_TUNER_MODE_LANG1:
1809 case V4L2_TUNER_MODE_LANG2:
1810 mode = vt->audmode;
1811 break;
1812 case V4L2_TUNER_MODE_LANG1_LANG2:
1813 mode = V4L2_TUNER_MODE_STEREO;
1814 break;
1815 default:
1816 return -EINVAL;
1817 }
1818 chip->audmode = vt->audmode;
1819 1828
1820 if (desc->setmode && mode) { 1829 /* find description for the chip */
1821 chip->watch_stereo = 0; 1830 v4l2_dbg(1, debug, sd, "chip found @ 0x%x\n", client->addr<<1);
1822 /* del_timer(&chip->wt); */ 1831 for (desc = chiplist; desc->name != NULL; desc++) {
1823 chip->mode = mode; 1832 if (0 == *(desc->insmodopt))
1824 desc->setmode(chip, mode); 1833 continue;
1825 } 1834 if (client->addr < desc->addr_lo ||
1835 client->addr > desc->addr_hi)
1836 continue;
1837 if (desc->checkit && !desc->checkit(chip))
1838 continue;
1826 break; 1839 break;
1827 } 1840 }
1828 case VIDIOC_G_TUNER: 1841 if (desc->name == NULL) {
1829 { 1842 v4l2_dbg(1, debug, sd, "no matching chip description found\n");
1830 struct v4l2_tuner *vt = arg; 1843 kfree(chip);
1831 int mode = V4L2_TUNER_MODE_MONO; 1844 return -EIO;
1845 }
1846 v4l2_info(sd, "%s found @ 0x%x (%s)\n", desc->name, client->addr<<1, client->adapter->name);
1847 if (desc->flags) {
1848 v4l2_dbg(1, debug, sd, "matches:%s%s%s.\n",
1849 (desc->flags & CHIP_HAS_VOLUME) ? " volume" : "",
1850 (desc->flags & CHIP_HAS_BASSTREBLE) ? " bass/treble" : "",
1851 (desc->flags & CHIP_HAS_INPUTSEL) ? " audiomux" : "");
1852 }
1832 1853
1833 if (chip->radio) 1854 /* fill required data structures */
1834 break; 1855 if (!id)
1835 vt->audmode = chip->audmode; 1856 strlcpy(client->name, desc->name, I2C_NAME_SIZE);
1836 vt->rxsubchans = 0; 1857 chip->desc = desc;
1837 vt->capability = V4L2_TUNER_CAP_STEREO | 1858 chip->shadow.count = desc->registers+1;
1838 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1859 chip->prevmode = -1;
1860 chip->audmode = V4L2_TUNER_MODE_LANG1;
1839 1861
1840 if (desc->getmode) 1862 /* initialization */
1841 mode = desc->getmode(chip); 1863 if (desc->initialize != NULL)
1864 desc->initialize(chip);
1865 else
1866 chip_cmd(chip, "init", &desc->init);
1842 1867
1843 if (mode & V4L2_TUNER_MODE_MONO) 1868 if (desc->flags & CHIP_HAS_VOLUME) {
1844 vt->rxsubchans |= V4L2_TUNER_SUB_MONO; 1869 if (!desc->volfunc) {
1845 if (mode & V4L2_TUNER_MODE_STEREO) 1870 /* This shouldn't be happen. Warn user, but keep working
1846 vt->rxsubchans |= V4L2_TUNER_SUB_STEREO; 1871 without volume controls
1847 /* Note: for SAP it should be mono/lang2 or stereo/lang2. 1872 */
1848 When this module is converted fully to v4l2, then this 1873 v4l2_info(sd, "volume callback undefined!\n");
1849 should change for those chips that can detect SAP. */ 1874 desc->flags &= ~CHIP_HAS_VOLUME;
1850 if (mode & V4L2_TUNER_MODE_LANG1) 1875 } else {
1851 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | 1876 chip->left = desc->leftinit ? desc->leftinit : 65535;
1852 V4L2_TUNER_SUB_LANG2; 1877 chip->right = desc->rightinit ? desc->rightinit : 65535;
1853 break; 1878 chip_write(chip, desc->leftreg,
1879 desc->volfunc(chip->left));
1880 chip_write(chip, desc->rightreg,
1881 desc->volfunc(chip->right));
1882 }
1854 } 1883 }
1855 case VIDIOC_S_STD: 1884 if (desc->flags & CHIP_HAS_BASSTREBLE) {
1856 chip->radio = 0; 1885 if (!desc->bassfunc || !desc->treblefunc) {
1857 break; 1886 /* This shouldn't be happen. Warn user, but keep working
1858 case VIDIOC_S_FREQUENCY: 1887 without bass/treble controls
1859 chip->mode = 0; /* automatic */ 1888 */
1860 1889 v4l2_info(sd, "bass/treble callbacks undefined!\n");
1861 /* For chips that provide getmode and setmode, and doesn't 1890 desc->flags &= ~CHIP_HAS_BASSTREBLE;
1862 automatically follows the stereo carrier, a kthread is 1891 } else {
1863 created to set the audio standard. In this case, when then 1892 chip->treble = desc->trebleinit ?
1864 the video channel is changed, tvaudio starts on MONO mode. 1893 desc->trebleinit : 32768;
1865 After waiting for 2 seconds, the kernel thread is called, 1894 chip->bass = desc->bassinit ?
1866 to follow whatever audio standard is pointed by the 1895 desc->bassinit : 32768;
1867 audio carrier. 1896 chip_write(chip, desc->bassreg,
1868 */ 1897 desc->bassfunc(chip->bass));
1869 if (chip->thread) { 1898 chip_write(chip, desc->treblereg,
1870 desc->setmode(chip,V4L2_TUNER_MODE_MONO); 1899 desc->treblefunc(chip->treble));
1871 if (chip->prevmode != V4L2_TUNER_MODE_MONO)
1872 chip->prevmode = -1; /* reset previous mode */
1873 mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000));
1874 } 1900 }
1875 break; 1901 }
1876 1902
1877 case VIDIOC_G_CHIP_IDENT: 1903 chip->thread = NULL;
1878 return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_TVAUDIO, 0); 1904 if (desc->flags & CHIP_NEED_CHECKMODE) {
1905 if (!desc->getmode || !desc->setmode) {
1906 /* This shouldn't be happen. Warn user, but keep working
1907 without kthread
1908 */
1909 v4l2_info(sd, "set/get mode callbacks undefined!\n");
1910 return 0;
1911 }
1912 /* start async thread */
1913 init_timer(&chip->wt);
1914 chip->wt.function = chip_thread_wake;
1915 chip->wt.data = (unsigned long)chip;
1916 chip->thread = kthread_run(chip_thread, chip, client->name);
1917 if (IS_ERR(chip->thread)) {
1918 v4l2_warn(sd, "failed to create kthread\n");
1919 chip->thread = NULL;
1920 }
1879 } 1921 }
1880 return 0; 1922 return 0;
1881} 1923}
1882 1924
1883static int chip_legacy_probe(struct i2c_adapter *adap) 1925static int tvaudio_remove(struct i2c_client *client)
1926{
1927 struct v4l2_subdev *sd = i2c_get_clientdata(client);
1928 struct CHIPSTATE *chip = to_state(sd);
1929
1930 del_timer_sync(&chip->wt);
1931 if (chip->thread) {
1932 /* shutdown async thread */
1933 kthread_stop(chip->thread);
1934 chip->thread = NULL;
1935 }
1936
1937 v4l2_device_unregister_subdev(sd);
1938 kfree(chip);
1939 return 0;
1940}
1941
1942static int tvaudio_legacy_probe(struct i2c_adapter *adap)
1884{ 1943{
1885 /* don't attach on saa7146 based cards, 1944 /* don't attach on saa7146 based cards,
1886 because dedicated drivers are used */ 1945 because dedicated drivers are used */
@@ -1894,18 +1953,18 @@ static int chip_legacy_probe(struct i2c_adapter *adap)
1894/* This driver supports many devices and the idea is to let the driver 1953/* This driver supports many devices and the idea is to let the driver
1895 detect which device is present. So rather than listing all supported 1954 detect which device is present. So rather than listing all supported
1896 devices here, we pretend to support a single, fake device type. */ 1955 devices here, we pretend to support a single, fake device type. */
1897static const struct i2c_device_id chip_id[] = { 1956static const struct i2c_device_id tvaudio_id[] = {
1898 { "tvaudio", 0 }, 1957 { "tvaudio", 0 },
1899 { } 1958 { }
1900}; 1959};
1901MODULE_DEVICE_TABLE(i2c, chip_id); 1960MODULE_DEVICE_TABLE(i2c, tvaudio_id);
1902 1961
1903static struct v4l2_i2c_driver_data v4l2_i2c_data = { 1962static struct v4l2_i2c_driver_data v4l2_i2c_data = {
1904 .name = "tvaudio", 1963 .name = "tvaudio",
1905 .driverid = I2C_DRIVERID_TVAUDIO, 1964 .driverid = I2C_DRIVERID_TVAUDIO,
1906 .command = chip_command, 1965 .command = tvaudio_command,
1907 .probe = chip_probe, 1966 .probe = tvaudio_probe,
1908 .remove = chip_remove, 1967 .remove = tvaudio_remove,
1909 .legacy_probe = chip_legacy_probe, 1968 .legacy_probe = tvaudio_legacy_probe,
1910 .id_table = chip_id, 1969 .id_table = tvaudio_id,
1911}; 1970};
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
new file mode 100644
index 000000000000..ac9aa40d09f6
--- /dev/null
+++ b/drivers/media/video/tvp514x.c
@@ -0,0 +1,1569 @@
1/*
2 * drivers/media/video/tvp514x.c
3 *
4 * TI TVP5146/47 decoder driver
5 *
6 * Copyright (C) 2008 Texas Instruments Inc
7 * Author: Vaibhav Hiremath <hvaibhav@ti.com>
8 *
9 * Contributors:
10 * Sivaraj R <sivaraj@ti.com>
11 * Brijesh R Jadav <brijesh.j@ti.com>
12 * Hardik Shah <hardik.shah@ti.com>
13 * Manjunath Hadli <mrh@ti.com>
14 * Karicheri Muralidharan <m-karicheri2@ti.com>
15 *
16 * This package is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 *
29 */
30
31#include <linux/i2c.h>
32#include <linux/delay.h>
33#include <linux/videodev2.h>
34#include <media/v4l2-int-device.h>
35#include <media/tvp514x.h>
36
37#include "tvp514x_regs.h"
38
39/* Module Name */
40#define TVP514X_MODULE_NAME "tvp514x"
41
42/* Private macros for TVP */
43#define I2C_RETRY_COUNT (5)
44#define LOCK_RETRY_COUNT (5)
45#define LOCK_RETRY_DELAY (200)
46
47/* Debug functions */
48static int debug;
49module_param(debug, bool, 0644);
50MODULE_PARM_DESC(debug, "Debug level (0-1)");
51
52#define dump_reg(client, reg, val) \
53 do { \
54 val = tvp514x_read_reg(client, reg); \
55 v4l_info(client, "Reg(0x%.2X): 0x%.2X\n", reg, val); \
56 } while (0)
57
58/**
59 * enum tvp514x_std - enum for supported standards
60 */
61enum tvp514x_std {
62 STD_NTSC_MJ = 0,
63 STD_PAL_BDGHIN,
64 STD_INVALID
65};
66
67/**
68 * enum tvp514x_state - enum for different decoder states
69 */
70enum tvp514x_state {
71 STATE_NOT_DETECTED,
72 STATE_DETECTED
73};
74
75/**
76 * struct tvp514x_std_info - Structure to store standard informations
77 * @width: Line width in pixels
78 * @height:Number of active lines
79 * @video_std: Value to write in REG_VIDEO_STD register
80 * @standard: v4l2 standard structure information
81 */
82struct tvp514x_std_info {
83 unsigned long width;
84 unsigned long height;
85 u8 video_std;
86 struct v4l2_standard standard;
87};
88
89/**
90 * struct tvp514x_decoded - TVP5146/47 decoder object
91 * @v4l2_int_device: Slave handle
92 * @pdata: Board specific
93 * @client: I2C client data
94 * @id: Entry from I2C table
95 * @ver: Chip version
96 * @state: TVP5146/47 decoder state - detected or not-detected
97 * @pix: Current pixel format
98 * @num_fmts: Number of formats
99 * @fmt_list: Format list
100 * @current_std: Current standard
101 * @num_stds: Number of standards
102 * @std_list: Standards list
103 * @route: input and output routing at chip level
104 */
105struct tvp514x_decoder {
106 struct v4l2_int_device *v4l2_int_device;
107 const struct tvp514x_platform_data *pdata;
108 struct i2c_client *client;
109
110 struct i2c_device_id *id;
111
112 int ver;
113 enum tvp514x_state state;
114
115 struct v4l2_pix_format pix;
116 int num_fmts;
117 const struct v4l2_fmtdesc *fmt_list;
118
119 enum tvp514x_std current_std;
120 int num_stds;
121 struct tvp514x_std_info *std_list;
122
123 struct v4l2_routing route;
124};
125
126/* TVP514x default register values */
127static struct tvp514x_reg tvp514x_reg_list[] = {
128 {TOK_WRITE, REG_INPUT_SEL, 0x05}, /* Composite selected */
129 {TOK_WRITE, REG_AFE_GAIN_CTRL, 0x0F},
130 {TOK_WRITE, REG_VIDEO_STD, 0x00}, /* Auto mode */
131 {TOK_WRITE, REG_OPERATION_MODE, 0x00},
132 {TOK_SKIP, REG_AUTOSWITCH_MASK, 0x3F},
133 {TOK_WRITE, REG_COLOR_KILLER, 0x10},
134 {TOK_WRITE, REG_LUMA_CONTROL1, 0x00},
135 {TOK_WRITE, REG_LUMA_CONTROL2, 0x00},
136 {TOK_WRITE, REG_LUMA_CONTROL3, 0x02},
137 {TOK_WRITE, REG_BRIGHTNESS, 0x80},
138 {TOK_WRITE, REG_CONTRAST, 0x80},
139 {TOK_WRITE, REG_SATURATION, 0x80},
140 {TOK_WRITE, REG_HUE, 0x00},
141 {TOK_WRITE, REG_CHROMA_CONTROL1, 0x00},
142 {TOK_WRITE, REG_CHROMA_CONTROL2, 0x0E},
143 {TOK_SKIP, 0x0F, 0x00}, /* Reserved */
144 {TOK_WRITE, REG_COMP_PR_SATURATION, 0x80},
145 {TOK_WRITE, REG_COMP_Y_CONTRAST, 0x80},
146 {TOK_WRITE, REG_COMP_PB_SATURATION, 0x80},
147 {TOK_SKIP, 0x13, 0x00}, /* Reserved */
148 {TOK_WRITE, REG_COMP_Y_BRIGHTNESS, 0x80},
149 {TOK_SKIP, 0x15, 0x00}, /* Reserved */
150 {TOK_SKIP, REG_AVID_START_PIXEL_LSB, 0x55}, /* NTSC timing */
151 {TOK_SKIP, REG_AVID_START_PIXEL_MSB, 0x00},
152 {TOK_SKIP, REG_AVID_STOP_PIXEL_LSB, 0x25},
153 {TOK_SKIP, REG_AVID_STOP_PIXEL_MSB, 0x03},
154 {TOK_SKIP, REG_HSYNC_START_PIXEL_LSB, 0x00}, /* NTSC timing */
155 {TOK_SKIP, REG_HSYNC_START_PIXEL_MSB, 0x00},
156 {TOK_SKIP, REG_HSYNC_STOP_PIXEL_LSB, 0x40},
157 {TOK_SKIP, REG_HSYNC_STOP_PIXEL_MSB, 0x00},
158 {TOK_SKIP, REG_VSYNC_START_LINE_LSB, 0x04}, /* NTSC timing */
159 {TOK_SKIP, REG_VSYNC_START_LINE_MSB, 0x00},
160 {TOK_SKIP, REG_VSYNC_STOP_LINE_LSB, 0x07},
161 {TOK_SKIP, REG_VSYNC_STOP_LINE_MSB, 0x00},
162 {TOK_SKIP, REG_VBLK_START_LINE_LSB, 0x01}, /* NTSC timing */
163 {TOK_SKIP, REG_VBLK_START_LINE_MSB, 0x00},
164 {TOK_SKIP, REG_VBLK_STOP_LINE_LSB, 0x15},
165 {TOK_SKIP, REG_VBLK_STOP_LINE_MSB, 0x00},
166 {TOK_SKIP, 0x26, 0x00}, /* Reserved */
167 {TOK_SKIP, 0x27, 0x00}, /* Reserved */
168 {TOK_SKIP, REG_FAST_SWTICH_CONTROL, 0xCC},
169 {TOK_SKIP, 0x29, 0x00}, /* Reserved */
170 {TOK_SKIP, REG_FAST_SWTICH_SCART_DELAY, 0x00},
171 {TOK_SKIP, 0x2B, 0x00}, /* Reserved */
172 {TOK_SKIP, REG_SCART_DELAY, 0x00},
173 {TOK_SKIP, REG_CTI_DELAY, 0x00},
174 {TOK_SKIP, REG_CTI_CONTROL, 0x00},
175 {TOK_SKIP, 0x2F, 0x00}, /* Reserved */
176 {TOK_SKIP, 0x30, 0x00}, /* Reserved */
177 {TOK_SKIP, 0x31, 0x00}, /* Reserved */
178 {TOK_WRITE, REG_SYNC_CONTROL, 0x00}, /* HS, VS active high */
179 {TOK_WRITE, REG_OUTPUT_FORMATTER1, 0x00}, /* 10-bit BT.656 */
180 {TOK_WRITE, REG_OUTPUT_FORMATTER2, 0x11}, /* Enable clk & data */
181 {TOK_WRITE, REG_OUTPUT_FORMATTER3, 0xEE}, /* Enable AVID & FLD */
182 {TOK_WRITE, REG_OUTPUT_FORMATTER4, 0xAF}, /* Enable VS & HS */
183 {TOK_WRITE, REG_OUTPUT_FORMATTER5, 0xFF},
184 {TOK_WRITE, REG_OUTPUT_FORMATTER6, 0xFF},
185 {TOK_WRITE, REG_CLEAR_LOST_LOCK, 0x01}, /* Clear status */
186 {TOK_TERM, 0, 0},
187};
188
189/* List of image formats supported by TVP5146/47 decoder
190 * Currently we are using 8 bit mode only, but can be
191 * extended to 10/20 bit mode.
192 */
193static const struct v4l2_fmtdesc tvp514x_fmt_list[] = {
194 {
195 .index = 0,
196 .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
197 .flags = 0,
198 .description = "8-bit UYVY 4:2:2 Format",
199 .pixelformat = V4L2_PIX_FMT_UYVY,
200 },
201};
202
203/*
204 * Supported standards -
205 *
206 * Currently supports two standards only, need to add support for rest of the
207 * modes, like SECAM, etc...
208 */
209static struct tvp514x_std_info tvp514x_std_list[] = {
210 /* Standard: STD_NTSC_MJ */
211 [STD_NTSC_MJ] = {
212 .width = NTSC_NUM_ACTIVE_PIXELS,
213 .height = NTSC_NUM_ACTIVE_LINES,
214 .video_std = VIDEO_STD_NTSC_MJ_BIT,
215 .standard = {
216 .index = 0,
217 .id = V4L2_STD_NTSC,
218 .name = "NTSC",
219 .frameperiod = {1001, 30000},
220 .framelines = 525
221 },
222 /* Standard: STD_PAL_BDGHIN */
223 },
224 [STD_PAL_BDGHIN] = {
225 .width = PAL_NUM_ACTIVE_PIXELS,
226 .height = PAL_NUM_ACTIVE_LINES,
227 .video_std = VIDEO_STD_PAL_BDGHIN_BIT,
228 .standard = {
229 .index = 1,
230 .id = V4L2_STD_PAL,
231 .name = "PAL",
232 .frameperiod = {1, 25},
233 .framelines = 625
234 },
235 },
236 /* Standard: need to add for additional standard */
237};
238/*
239 * Control structure for Auto Gain
240 * This is temporary data, will get replaced once
241 * v4l2_ctrl_query_fill supports it.
242 */
243static const struct v4l2_queryctrl tvp514x_autogain_ctrl = {
244 .id = V4L2_CID_AUTOGAIN,
245 .name = "Gain, Automatic",
246 .type = V4L2_CTRL_TYPE_BOOLEAN,
247 .minimum = 0,
248 .maximum = 1,
249 .step = 1,
250 .default_value = 1,
251};
252
253/*
254 * Read a value from a register in an TVP5146/47 decoder device.
255 * Returns value read if successful, or non-zero (-1) otherwise.
256 */
257static int tvp514x_read_reg(struct i2c_client *client, u8 reg)
258{
259 int err;
260 int retry = 0;
261read_again:
262
263 err = i2c_smbus_read_byte_data(client, reg);
264 if (err == -1) {
265 if (retry <= I2C_RETRY_COUNT) {
266 v4l_warn(client, "Read: retry ... %d\n", retry);
267 retry++;
268 msleep_interruptible(10);
269 goto read_again;
270 }
271 }
272
273 return err;
274}
275
276/*
277 * Write a value to a register in an TVP5146/47 decoder device.
278 * Returns zero if successful, or non-zero otherwise.
279 */
280static int tvp514x_write_reg(struct i2c_client *client, u8 reg, u8 val)
281{
282 int err;
283 int retry = 0;
284write_again:
285
286 err = i2c_smbus_write_byte_data(client, reg, val);
287 if (err) {
288 if (retry <= I2C_RETRY_COUNT) {
289 v4l_warn(client, "Write: retry ... %d\n", retry);
290 retry++;
291 msleep_interruptible(10);
292 goto write_again;
293 }
294 }
295
296 return err;
297}
298
299/*
300 * tvp514x_write_regs : Initializes a list of TVP5146/47 registers
301 * if token is TOK_TERM, then entire write operation terminates
302 * if token is TOK_DELAY, then a delay of 'val' msec is introduced
303 * if token is TOK_SKIP, then the register write is skipped
304 * if token is TOK_WRITE, then the register write is performed
305 *
306 * reglist - list of registers to be written
307 * Returns zero if successful, or non-zero otherwise.
308 */
309static int tvp514x_write_regs(struct i2c_client *client,
310 const struct tvp514x_reg reglist[])
311{
312 int err;
313 const struct tvp514x_reg *next = reglist;
314
315 for (; next->token != TOK_TERM; next++) {
316 if (next->token == TOK_DELAY) {
317 msleep(next->val);
318 continue;
319 }
320
321 if (next->token == TOK_SKIP)
322 continue;
323
324 err = tvp514x_write_reg(client, next->reg, (u8) next->val);
325 if (err) {
326 v4l_err(client, "Write failed. Err[%d]\n", err);
327 return err;
328 }
329 }
330 return 0;
331}
332
333/*
334 * tvp514x_get_current_std:
335 * Returns the current standard detected by TVP5146/47
336 */
337static enum tvp514x_std tvp514x_get_current_std(struct tvp514x_decoder
338 *decoder)
339{
340 u8 std, std_status;
341
342 std = tvp514x_read_reg(decoder->client, REG_VIDEO_STD);
343 if ((std & VIDEO_STD_MASK) == VIDEO_STD_AUTO_SWITCH_BIT) {
344 /* use the standard status register */
345 std_status = tvp514x_read_reg(decoder->client,
346 REG_VIDEO_STD_STATUS);
347 } else
348 std_status = std; /* use the standard register itself */
349
350 switch (std_status & VIDEO_STD_MASK) {
351 case VIDEO_STD_NTSC_MJ_BIT:
352 return STD_NTSC_MJ;
353
354 case VIDEO_STD_PAL_BDGHIN_BIT:
355 return STD_PAL_BDGHIN;
356
357 default:
358 return STD_INVALID;
359 }
360
361 return STD_INVALID;
362}
363
364/*
365 * TVP5146/47 register dump function
366 */
367static void tvp514x_reg_dump(struct tvp514x_decoder *decoder)
368{
369 u8 value;
370
371 dump_reg(decoder->client, REG_INPUT_SEL, value);
372 dump_reg(decoder->client, REG_AFE_GAIN_CTRL, value);
373 dump_reg(decoder->client, REG_VIDEO_STD, value);
374 dump_reg(decoder->client, REG_OPERATION_MODE, value);
375 dump_reg(decoder->client, REG_COLOR_KILLER, value);
376 dump_reg(decoder->client, REG_LUMA_CONTROL1, value);
377 dump_reg(decoder->client, REG_LUMA_CONTROL2, value);
378 dump_reg(decoder->client, REG_LUMA_CONTROL3, value);
379 dump_reg(decoder->client, REG_BRIGHTNESS, value);
380 dump_reg(decoder->client, REG_CONTRAST, value);
381 dump_reg(decoder->client, REG_SATURATION, value);
382 dump_reg(decoder->client, REG_HUE, value);
383 dump_reg(decoder->client, REG_CHROMA_CONTROL1, value);
384 dump_reg(decoder->client, REG_CHROMA_CONTROL2, value);
385 dump_reg(decoder->client, REG_COMP_PR_SATURATION, value);
386 dump_reg(decoder->client, REG_COMP_Y_CONTRAST, value);
387 dump_reg(decoder->client, REG_COMP_PB_SATURATION, value);
388 dump_reg(decoder->client, REG_COMP_Y_BRIGHTNESS, value);
389 dump_reg(decoder->client, REG_AVID_START_PIXEL_LSB, value);
390 dump_reg(decoder->client, REG_AVID_START_PIXEL_MSB, value);
391 dump_reg(decoder->client, REG_AVID_STOP_PIXEL_LSB, value);
392 dump_reg(decoder->client, REG_AVID_STOP_PIXEL_MSB, value);
393 dump_reg(decoder->client, REG_HSYNC_START_PIXEL_LSB, value);
394 dump_reg(decoder->client, REG_HSYNC_START_PIXEL_MSB, value);
395 dump_reg(decoder->client, REG_HSYNC_STOP_PIXEL_LSB, value);
396 dump_reg(decoder->client, REG_HSYNC_STOP_PIXEL_MSB, value);
397 dump_reg(decoder->client, REG_VSYNC_START_LINE_LSB, value);
398 dump_reg(decoder->client, REG_VSYNC_START_LINE_MSB, value);
399 dump_reg(decoder->client, REG_VSYNC_STOP_LINE_LSB, value);
400 dump_reg(decoder->client, REG_VSYNC_STOP_LINE_MSB, value);
401 dump_reg(decoder->client, REG_VBLK_START_LINE_LSB, value);
402 dump_reg(decoder->client, REG_VBLK_START_LINE_MSB, value);
403 dump_reg(decoder->client, REG_VBLK_STOP_LINE_LSB, value);
404 dump_reg(decoder->client, REG_VBLK_STOP_LINE_MSB, value);
405 dump_reg(decoder->client, REG_SYNC_CONTROL, value);
406 dump_reg(decoder->client, REG_OUTPUT_FORMATTER1, value);
407 dump_reg(decoder->client, REG_OUTPUT_FORMATTER2, value);
408 dump_reg(decoder->client, REG_OUTPUT_FORMATTER3, value);
409 dump_reg(decoder->client, REG_OUTPUT_FORMATTER4, value);
410 dump_reg(decoder->client, REG_OUTPUT_FORMATTER5, value);
411 dump_reg(decoder->client, REG_OUTPUT_FORMATTER6, value);
412 dump_reg(decoder->client, REG_CLEAR_LOST_LOCK, value);
413}
414
415/*
416 * Configure the TVP5146/47 with the current register settings
417 * Returns zero if successful, or non-zero otherwise.
418 */
419static int tvp514x_configure(struct tvp514x_decoder *decoder)
420{
421 int err;
422
423 /* common register initialization */
424 err =
425 tvp514x_write_regs(decoder->client, tvp514x_reg_list);
426 if (err)
427 return err;
428
429 if (debug)
430 tvp514x_reg_dump(decoder);
431
432 return 0;
433}
434
435/*
436 * Detect if an tvp514x is present, and if so which revision.
437 * A device is considered to be detected if the chip ID (LSB and MSB)
438 * registers match the expected values.
439 * Any value of the rom version register is accepted.
440 * Returns ENODEV error number if no device is detected, or zero
441 * if a device is detected.
442 */
443static int tvp514x_detect(struct tvp514x_decoder *decoder)
444{
445 u8 chip_id_msb, chip_id_lsb, rom_ver;
446
447 chip_id_msb = tvp514x_read_reg(decoder->client, REG_CHIP_ID_MSB);
448 chip_id_lsb = tvp514x_read_reg(decoder->client, REG_CHIP_ID_LSB);
449 rom_ver = tvp514x_read_reg(decoder->client, REG_ROM_VERSION);
450
451 v4l_dbg(1, debug, decoder->client,
452 "chip id detected msb:0x%x lsb:0x%x rom version:0x%x\n",
453 chip_id_msb, chip_id_lsb, rom_ver);
454 if ((chip_id_msb != TVP514X_CHIP_ID_MSB)
455 || ((chip_id_lsb != TVP5146_CHIP_ID_LSB)
456 && (chip_id_lsb != TVP5147_CHIP_ID_LSB))) {
457 /* We didn't read the values we expected, so this must not be
458 * an TVP5146/47.
459 */
460 v4l_err(decoder->client,
461 "chip id mismatch msb:0x%x lsb:0x%x\n",
462 chip_id_msb, chip_id_lsb);
463 return -ENODEV;
464 }
465
466 decoder->ver = rom_ver;
467 decoder->state = STATE_DETECTED;
468
469 v4l_info(decoder->client,
470 "%s found at 0x%x (%s)\n", decoder->client->name,
471 decoder->client->addr << 1,
472 decoder->client->adapter->name);
473 return 0;
474}
475
476/*
477 * Following are decoder interface functions implemented by
478 * TVP5146/47 decoder driver.
479 */
480
481/**
482 * ioctl_querystd - V4L2 decoder interface handler for VIDIOC_QUERYSTD ioctl
483 * @s: pointer to standard V4L2 device structure
484 * @std_id: standard V4L2 std_id ioctl enum
485 *
486 * Returns the current standard detected by TVP5146/47. If no active input is
487 * detected, returns -EINVAL
488 */
489static int ioctl_querystd(struct v4l2_int_device *s, v4l2_std_id *std_id)
490{
491 struct tvp514x_decoder *decoder = s->priv;
492 enum tvp514x_std current_std;
493 enum tvp514x_input input_sel;
494 u8 sync_lock_status, lock_mask;
495
496 if (std_id == NULL)
497 return -EINVAL;
498
499 /* get the current standard */
500 current_std = tvp514x_get_current_std(decoder);
501 if (current_std == STD_INVALID)
502 return -EINVAL;
503
504 input_sel = decoder->route.input;
505
506 switch (input_sel) {
507 case INPUT_CVBS_VI1A:
508 case INPUT_CVBS_VI1B:
509 case INPUT_CVBS_VI1C:
510 case INPUT_CVBS_VI2A:
511 case INPUT_CVBS_VI2B:
512 case INPUT_CVBS_VI2C:
513 case INPUT_CVBS_VI3A:
514 case INPUT_CVBS_VI3B:
515 case INPUT_CVBS_VI3C:
516 case INPUT_CVBS_VI4A:
517 lock_mask = STATUS_CLR_SUBCAR_LOCK_BIT |
518 STATUS_HORZ_SYNC_LOCK_BIT |
519 STATUS_VIRT_SYNC_LOCK_BIT;
520 break;
521
522 case INPUT_SVIDEO_VI2A_VI1A:
523 case INPUT_SVIDEO_VI2B_VI1B:
524 case INPUT_SVIDEO_VI2C_VI1C:
525 case INPUT_SVIDEO_VI2A_VI3A:
526 case INPUT_SVIDEO_VI2B_VI3B:
527 case INPUT_SVIDEO_VI2C_VI3C:
528 case INPUT_SVIDEO_VI4A_VI1A:
529 case INPUT_SVIDEO_VI4A_VI1B:
530 case INPUT_SVIDEO_VI4A_VI1C:
531 case INPUT_SVIDEO_VI4A_VI3A:
532 case INPUT_SVIDEO_VI4A_VI3B:
533 case INPUT_SVIDEO_VI4A_VI3C:
534 lock_mask = STATUS_HORZ_SYNC_LOCK_BIT |
535 STATUS_VIRT_SYNC_LOCK_BIT;
536 break;
537 /*Need to add other interfaces*/
538 default:
539 return -EINVAL;
540 }
541 /* check whether signal is locked */
542 sync_lock_status = tvp514x_read_reg(decoder->client, REG_STATUS1);
543 if (lock_mask != (sync_lock_status & lock_mask))
544 return -EINVAL; /* No input detected */
545
546 decoder->current_std = current_std;
547 *std_id = decoder->std_list[current_std].standard.id;
548
549 v4l_dbg(1, debug, decoder->client, "Current STD: %s",
550 decoder->std_list[current_std].standard.name);
551 return 0;
552}
553
554/**
555 * ioctl_s_std - V4L2 decoder interface handler for VIDIOC_S_STD ioctl
556 * @s: pointer to standard V4L2 device structure
557 * @std_id: standard V4L2 v4l2_std_id ioctl enum
558 *
559 * If std_id is supported, sets the requested standard. Otherwise, returns
560 * -EINVAL
561 */
562static int ioctl_s_std(struct v4l2_int_device *s, v4l2_std_id *std_id)
563{
564 struct tvp514x_decoder *decoder = s->priv;
565 int err, i;
566
567 if (std_id == NULL)
568 return -EINVAL;
569
570 for (i = 0; i < decoder->num_stds; i++)
571 if (*std_id & decoder->std_list[i].standard.id)
572 break;
573
574 if ((i == decoder->num_stds) || (i == STD_INVALID))
575 return -EINVAL;
576
577 err = tvp514x_write_reg(decoder->client, REG_VIDEO_STD,
578 decoder->std_list[i].video_std);
579 if (err)
580 return err;
581
582 decoder->current_std = i;
583 tvp514x_reg_list[REG_VIDEO_STD].val = decoder->std_list[i].video_std;
584
585 v4l_dbg(1, debug, decoder->client, "Standard set to: %s",
586 decoder->std_list[i].standard.name);
587 return 0;
588}
589
590/**
591 * ioctl_s_routing - V4L2 decoder interface handler for VIDIOC_S_INPUT ioctl
592 * @s: pointer to standard V4L2 device structure
593 * @index: number of the input
594 *
595 * If index is valid, selects the requested input. Otherwise, returns -EINVAL if
596 * the input is not supported or there is no active signal present in the
597 * selected input.
598 */
599static int ioctl_s_routing(struct v4l2_int_device *s,
600 struct v4l2_routing *route)
601{
602 struct tvp514x_decoder *decoder = s->priv;
603 int err;
604 enum tvp514x_input input_sel;
605 enum tvp514x_output output_sel;
606 enum tvp514x_std current_std = STD_INVALID;
607 u8 sync_lock_status, lock_mask;
608 int try_count = LOCK_RETRY_COUNT;
609
610 if ((!route) || (route->input >= INPUT_INVALID) ||
611 (route->output >= OUTPUT_INVALID))
612 return -EINVAL; /* Index out of bound */
613
614 input_sel = route->input;
615 output_sel = route->output;
616
617 err = tvp514x_write_reg(decoder->client, REG_INPUT_SEL, input_sel);
618 if (err)
619 return err;
620
621 output_sel |= tvp514x_read_reg(decoder->client,
622 REG_OUTPUT_FORMATTER1) & 0x7;
623 err = tvp514x_write_reg(decoder->client, REG_OUTPUT_FORMATTER1,
624 output_sel);
625 if (err)
626 return err;
627
628 tvp514x_reg_list[REG_INPUT_SEL].val = input_sel;
629 tvp514x_reg_list[REG_OUTPUT_FORMATTER1].val = output_sel;
630
631 /* Clear status */
632 msleep(LOCK_RETRY_DELAY);
633 err =
634 tvp514x_write_reg(decoder->client, REG_CLEAR_LOST_LOCK, 0x01);
635 if (err)
636 return err;
637
638 switch (input_sel) {
639 case INPUT_CVBS_VI1A:
640 case INPUT_CVBS_VI1B:
641 case INPUT_CVBS_VI1C:
642 case INPUT_CVBS_VI2A:
643 case INPUT_CVBS_VI2B:
644 case INPUT_CVBS_VI2C:
645 case INPUT_CVBS_VI3A:
646 case INPUT_CVBS_VI3B:
647 case INPUT_CVBS_VI3C:
648 case INPUT_CVBS_VI4A:
649 lock_mask = STATUS_CLR_SUBCAR_LOCK_BIT |
650 STATUS_HORZ_SYNC_LOCK_BIT |
651 STATUS_VIRT_SYNC_LOCK_BIT;
652 break;
653
654 case INPUT_SVIDEO_VI2A_VI1A:
655 case INPUT_SVIDEO_VI2B_VI1B:
656 case INPUT_SVIDEO_VI2C_VI1C:
657 case INPUT_SVIDEO_VI2A_VI3A:
658 case INPUT_SVIDEO_VI2B_VI3B:
659 case INPUT_SVIDEO_VI2C_VI3C:
660 case INPUT_SVIDEO_VI4A_VI1A:
661 case INPUT_SVIDEO_VI4A_VI1B:
662 case INPUT_SVIDEO_VI4A_VI1C:
663 case INPUT_SVIDEO_VI4A_VI3A:
664 case INPUT_SVIDEO_VI4A_VI3B:
665 case INPUT_SVIDEO_VI4A_VI3C:
666 lock_mask = STATUS_HORZ_SYNC_LOCK_BIT |
667 STATUS_VIRT_SYNC_LOCK_BIT;
668 break;
669 /*Need to add other interfaces*/
670 default:
671 return -EINVAL;
672 }
673
674 while (try_count-- > 0) {
675 /* Allow decoder to sync up with new input */
676 msleep(LOCK_RETRY_DELAY);
677
678 /* get the current standard for future reference */
679 current_std = tvp514x_get_current_std(decoder);
680 if (current_std == STD_INVALID)
681 continue;
682
683 sync_lock_status = tvp514x_read_reg(decoder->client,
684 REG_STATUS1);
685 if (lock_mask == (sync_lock_status & lock_mask))
686 break; /* Input detected */
687 }
688
689 if ((current_std == STD_INVALID) || (try_count < 0))
690 return -EINVAL;
691
692 decoder->current_std = current_std;
693 decoder->route.input = route->input;
694 decoder->route.output = route->output;
695
696 v4l_dbg(1, debug, decoder->client,
697 "Input set to: %d, std : %d",
698 input_sel, current_std);
699
700 return 0;
701}
702
703/**
704 * ioctl_queryctrl - V4L2 decoder interface handler for VIDIOC_QUERYCTRL ioctl
705 * @s: pointer to standard V4L2 device structure
706 * @qctrl: standard V4L2 v4l2_queryctrl structure
707 *
708 * If the requested control is supported, returns the control information.
709 * Otherwise, returns -EINVAL if the control is not supported.
710 */
711static int
712ioctl_queryctrl(struct v4l2_int_device *s, struct v4l2_queryctrl *qctrl)
713{
714 struct tvp514x_decoder *decoder = s->priv;
715 int err = -EINVAL;
716
717 if (qctrl == NULL)
718 return err;
719
720 switch (qctrl->id) {
721 case V4L2_CID_BRIGHTNESS:
722 /* Brightness supported is same as standard one (0-255),
723 * so make use of standard API provided.
724 */
725 err = v4l2_ctrl_query_fill_std(qctrl);
726 break;
727 case V4L2_CID_CONTRAST:
728 case V4L2_CID_SATURATION:
729 /* Saturation and Contrast supported is -
730 * Contrast: 0 - 255 (Default - 128)
731 * Saturation: 0 - 255 (Default - 128)
732 */
733 err = v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 128);
734 break;
735 case V4L2_CID_HUE:
736 /* Hue Supported is -
737 * Hue - -180 - +180 (Default - 0, Step - +180)
738 */
739 err = v4l2_ctrl_query_fill(qctrl, -180, 180, 180, 0);
740 break;
741 case V4L2_CID_AUTOGAIN:
742 /* Autogain is either 0 or 1*/
743 memcpy(qctrl, &tvp514x_autogain_ctrl,
744 sizeof(struct v4l2_queryctrl));
745 err = 0;
746 break;
747 default:
748 v4l_err(decoder->client,
749 "invalid control id %d\n", qctrl->id);
750 return err;
751 }
752
753 v4l_dbg(1, debug, decoder->client,
754 "Query Control: %s : Min - %d, Max - %d, Def - %d",
755 qctrl->name,
756 qctrl->minimum,
757 qctrl->maximum,
758 qctrl->default_value);
759
760 return err;
761}
762
763/**
764 * ioctl_g_ctrl - V4L2 decoder interface handler for VIDIOC_G_CTRL ioctl
765 * @s: pointer to standard V4L2 device structure
766 * @ctrl: pointer to v4l2_control structure
767 *
768 * If the requested control is supported, returns the control's current
769 * value from the decoder. Otherwise, returns -EINVAL if the control is not
770 * supported.
771 */
772static int
773ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *ctrl)
774{
775 struct tvp514x_decoder *decoder = s->priv;
776
777 if (ctrl == NULL)
778 return -EINVAL;
779
780 switch (ctrl->id) {
781 case V4L2_CID_BRIGHTNESS:
782 ctrl->value = tvp514x_reg_list[REG_BRIGHTNESS].val;
783 break;
784 case V4L2_CID_CONTRAST:
785 ctrl->value = tvp514x_reg_list[REG_CONTRAST].val;
786 break;
787 case V4L2_CID_SATURATION:
788 ctrl->value = tvp514x_reg_list[REG_SATURATION].val;
789 break;
790 case V4L2_CID_HUE:
791 ctrl->value = tvp514x_reg_list[REG_HUE].val;
792 if (ctrl->value == 0x7F)
793 ctrl->value = 180;
794 else if (ctrl->value == 0x80)
795 ctrl->value = -180;
796 else
797 ctrl->value = 0;
798
799 break;
800 case V4L2_CID_AUTOGAIN:
801 ctrl->value = tvp514x_reg_list[REG_AFE_GAIN_CTRL].val;
802 if ((ctrl->value & 0x3) == 3)
803 ctrl->value = 1;
804 else
805 ctrl->value = 0;
806
807 break;
808 default:
809 v4l_err(decoder->client,
810 "invalid control id %d\n", ctrl->id);
811 return -EINVAL;
812 }
813
814 v4l_dbg(1, debug, decoder->client,
815 "Get Control: ID - %d - %d",
816 ctrl->id, ctrl->value);
817 return 0;
818}
819
820/**
821 * ioctl_s_ctrl - V4L2 decoder interface handler for VIDIOC_S_CTRL ioctl
822 * @s: pointer to standard V4L2 device structure
823 * @ctrl: pointer to v4l2_control structure
824 *
825 * If the requested control is supported, sets the control's current
826 * value in HW. Otherwise, returns -EINVAL if the control is not supported.
827 */
828static int
829ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *ctrl)
830{
831 struct tvp514x_decoder *decoder = s->priv;
832 int err = -EINVAL, value;
833
834 if (ctrl == NULL)
835 return err;
836
837 value = (__s32) ctrl->value;
838
839 switch (ctrl->id) {
840 case V4L2_CID_BRIGHTNESS:
841 if (ctrl->value < 0 || ctrl->value > 255) {
842 v4l_err(decoder->client,
843 "invalid brightness setting %d\n",
844 ctrl->value);
845 return -ERANGE;
846 }
847 err = tvp514x_write_reg(decoder->client, REG_BRIGHTNESS,
848 value);
849 if (err)
850 return err;
851 tvp514x_reg_list[REG_BRIGHTNESS].val = value;
852 break;
853 case V4L2_CID_CONTRAST:
854 if (ctrl->value < 0 || ctrl->value > 255) {
855 v4l_err(decoder->client,
856 "invalid contrast setting %d\n",
857 ctrl->value);
858 return -ERANGE;
859 }
860 err = tvp514x_write_reg(decoder->client, REG_CONTRAST,
861 value);
862 if (err)
863 return err;
864 tvp514x_reg_list[REG_CONTRAST].val = value;
865 break;
866 case V4L2_CID_SATURATION:
867 if (ctrl->value < 0 || ctrl->value > 255) {
868 v4l_err(decoder->client,
869 "invalid saturation setting %d\n",
870 ctrl->value);
871 return -ERANGE;
872 }
873 err = tvp514x_write_reg(decoder->client, REG_SATURATION,
874 value);
875 if (err)
876 return err;
877 tvp514x_reg_list[REG_SATURATION].val = value;
878 break;
879 case V4L2_CID_HUE:
880 if (value == 180)
881 value = 0x7F;
882 else if (value == -180)
883 value = 0x80;
884 else if (value == 0)
885 value = 0;
886 else {
887 v4l_err(decoder->client,
888 "invalid hue setting %d\n",
889 ctrl->value);
890 return -ERANGE;
891 }
892 err = tvp514x_write_reg(decoder->client, REG_HUE,
893 value);
894 if (err)
895 return err;
896 tvp514x_reg_list[REG_HUE].val = value;
897 break;
898 case V4L2_CID_AUTOGAIN:
899 if (value == 1)
900 value = 0x0F;
901 else if (value == 0)
902 value = 0x0C;
903 else {
904 v4l_err(decoder->client,
905 "invalid auto gain setting %d\n",
906 ctrl->value);
907 return -ERANGE;
908 }
909 err = tvp514x_write_reg(decoder->client, REG_AFE_GAIN_CTRL,
910 value);
911 if (err)
912 return err;
913 tvp514x_reg_list[REG_AFE_GAIN_CTRL].val = value;
914 break;
915 default:
916 v4l_err(decoder->client,
917 "invalid control id %d\n", ctrl->id);
918 return err;
919 }
920
921 v4l_dbg(1, debug, decoder->client,
922 "Set Control: ID - %d - %d",
923 ctrl->id, ctrl->value);
924
925 return err;
926}
927
928/**
929 * ioctl_enum_fmt_cap - Implement the CAPTURE buffer VIDIOC_ENUM_FMT ioctl
930 * @s: pointer to standard V4L2 device structure
931 * @fmt: standard V4L2 VIDIOC_ENUM_FMT ioctl structure
932 *
933 * Implement the VIDIOC_ENUM_FMT ioctl to enumerate supported formats
934 */
935static int
936ioctl_enum_fmt_cap(struct v4l2_int_device *s, struct v4l2_fmtdesc *fmt)
937{
938 struct tvp514x_decoder *decoder = s->priv;
939 int index;
940
941 if (fmt == NULL)
942 return -EINVAL;
943
944 index = fmt->index;
945 if ((index >= decoder->num_fmts) || (index < 0))
946 return -EINVAL; /* Index out of bound */
947
948 if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
949 return -EINVAL; /* only capture is supported */
950
951 memcpy(fmt, &decoder->fmt_list[index],
952 sizeof(struct v4l2_fmtdesc));
953
954 v4l_dbg(1, debug, decoder->client,
955 "Current FMT: index - %d (%s)",
956 decoder->fmt_list[index].index,
957 decoder->fmt_list[index].description);
958 return 0;
959}
960
961/**
962 * ioctl_try_fmt_cap - Implement the CAPTURE buffer VIDIOC_TRY_FMT ioctl
963 * @s: pointer to standard V4L2 device structure
964 * @f: pointer to standard V4L2 VIDIOC_TRY_FMT ioctl structure
965 *
966 * Implement the VIDIOC_TRY_FMT ioctl for the CAPTURE buffer type. This
967 * ioctl is used to negotiate the image capture size and pixel format
968 * without actually making it take effect.
969 */
970static int
971ioctl_try_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f)
972{
973 struct tvp514x_decoder *decoder = s->priv;
974 int ifmt;
975 struct v4l2_pix_format *pix;
976 enum tvp514x_std current_std;
977
978 if (f == NULL)
979 return -EINVAL;
980
981 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
982 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
983
984 pix = &f->fmt.pix;
985
986 /* Calculate height and width based on current standard */
987 current_std = tvp514x_get_current_std(decoder);
988 if (current_std == STD_INVALID)
989 return -EINVAL;
990
991 decoder->current_std = current_std;
992 pix->width = decoder->std_list[current_std].width;
993 pix->height = decoder->std_list[current_std].height;
994
995 for (ifmt = 0; ifmt < decoder->num_fmts; ifmt++) {
996 if (pix->pixelformat ==
997 decoder->fmt_list[ifmt].pixelformat)
998 break;
999 }
1000 if (ifmt == decoder->num_fmts)
1001 ifmt = 0; /* None of the format matched, select default */
1002 pix->pixelformat = decoder->fmt_list[ifmt].pixelformat;
1003
1004 pix->field = V4L2_FIELD_INTERLACED;
1005 pix->bytesperline = pix->width * 2;
1006 pix->sizeimage = pix->bytesperline * pix->height;
1007 pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
1008 pix->priv = 0;
1009
1010 v4l_dbg(1, debug, decoder->client,
1011 "Try FMT: pixelformat - %s, bytesperline - %d"
1012 "Width - %d, Height - %d",
1013 decoder->fmt_list[ifmt].description, pix->bytesperline,
1014 pix->width, pix->height);
1015 return 0;
1016}
1017
1018/**
1019 * ioctl_s_fmt_cap - V4L2 decoder interface handler for VIDIOC_S_FMT ioctl
1020 * @s: pointer to standard V4L2 device structure
1021 * @f: pointer to standard V4L2 VIDIOC_S_FMT ioctl structure
1022 *
1023 * If the requested format is supported, configures the HW to use that
1024 * format, returns error code if format not supported or HW can't be
1025 * correctly configured.
1026 */
1027static int
1028ioctl_s_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f)
1029{
1030 struct tvp514x_decoder *decoder = s->priv;
1031 struct v4l2_pix_format *pix;
1032 int rval;
1033
1034 if (f == NULL)
1035 return -EINVAL;
1036
1037 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1038 return -EINVAL; /* only capture is supported */
1039
1040 pix = &f->fmt.pix;
1041 rval = ioctl_try_fmt_cap(s, f);
1042 if (rval)
1043 return rval;
1044
1045 decoder->pix = *pix;
1046
1047 return rval;
1048}
1049
1050/**
1051 * ioctl_g_fmt_cap - V4L2 decoder interface handler for ioctl_g_fmt_cap
1052 * @s: pointer to standard V4L2 device structure
1053 * @f: pointer to standard V4L2 v4l2_format structure
1054 *
1055 * Returns the decoder's current pixel format in the v4l2_format
1056 * parameter.
1057 */
1058static int
1059ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f)
1060{
1061 struct tvp514x_decoder *decoder = s->priv;
1062
1063 if (f == NULL)
1064 return -EINVAL;
1065
1066 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1067 return -EINVAL; /* only capture is supported */
1068
1069 f->fmt.pix = decoder->pix;
1070
1071 v4l_dbg(1, debug, decoder->client,
1072 "Current FMT: bytesperline - %d"
1073 "Width - %d, Height - %d",
1074 decoder->pix.bytesperline,
1075 decoder->pix.width, decoder->pix.height);
1076 return 0;
1077}
1078
1079/**
1080 * ioctl_g_parm - V4L2 decoder interface handler for VIDIOC_G_PARM ioctl
1081 * @s: pointer to standard V4L2 device structure
1082 * @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure
1083 *
1084 * Returns the decoder's video CAPTURE parameters.
1085 */
1086static int
1087ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
1088{
1089 struct tvp514x_decoder *decoder = s->priv;
1090 struct v4l2_captureparm *cparm;
1091 enum tvp514x_std current_std;
1092
1093 if (a == NULL)
1094 return -EINVAL;
1095
1096 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1097 return -EINVAL; /* only capture is supported */
1098
1099 memset(a, 0, sizeof(*a));
1100 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1101
1102 /* get the current standard */
1103 current_std = tvp514x_get_current_std(decoder);
1104 if (current_std == STD_INVALID)
1105 return -EINVAL;
1106
1107 decoder->current_std = current_std;
1108
1109 cparm = &a->parm.capture;
1110 cparm->capability = V4L2_CAP_TIMEPERFRAME;
1111 cparm->timeperframe =
1112 decoder->std_list[current_std].standard.frameperiod;
1113
1114 return 0;
1115}
1116
1117/**
1118 * ioctl_s_parm - V4L2 decoder interface handler for VIDIOC_S_PARM ioctl
1119 * @s: pointer to standard V4L2 device structure
1120 * @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure
1121 *
1122 * Configures the decoder to use the input parameters, if possible. If
1123 * not possible, returns the appropriate error code.
1124 */
1125static int
1126ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
1127{
1128 struct tvp514x_decoder *decoder = s->priv;
1129 struct v4l2_fract *timeperframe;
1130 enum tvp514x_std current_std;
1131
1132 if (a == NULL)
1133 return -EINVAL;
1134
1135 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1136 return -EINVAL; /* only capture is supported */
1137
1138 timeperframe = &a->parm.capture.timeperframe;
1139
1140 /* get the current standard */
1141 current_std = tvp514x_get_current_std(decoder);
1142 if (current_std == STD_INVALID)
1143 return -EINVAL;
1144
1145 decoder->current_std = current_std;
1146
1147 *timeperframe =
1148 decoder->std_list[current_std].standard.frameperiod;
1149
1150 return 0;
1151}
1152
1153/**
1154 * ioctl_g_ifparm - V4L2 decoder interface handler for vidioc_int_g_ifparm_num
1155 * @s: pointer to standard V4L2 device structure
1156 * @p: pointer to standard V4L2 vidioc_int_g_ifparm_num ioctl structure
1157 *
1158 * Gets slave interface parameters.
1159 * Calculates the required xclk value to support the requested
1160 * clock parameters in p. This value is returned in the p
1161 * parameter.
1162 */
1163static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
1164{
1165 struct tvp514x_decoder *decoder = s->priv;
1166 int rval;
1167
1168 if (p == NULL)
1169 return -EINVAL;
1170
1171 if (NULL == decoder->pdata->ifparm)
1172 return -EINVAL;
1173
1174 rval = decoder->pdata->ifparm(p);
1175 if (rval) {
1176 v4l_err(decoder->client, "g_ifparm.Err[%d]\n", rval);
1177 return rval;
1178 }
1179
1180 p->u.bt656.clock_curr = TVP514X_XCLK_BT656;
1181
1182 return 0;
1183}
1184
1185/**
1186 * ioctl_g_priv - V4L2 decoder interface handler for vidioc_int_g_priv_num
1187 * @s: pointer to standard V4L2 device structure
1188 * @p: void pointer to hold decoder's private data address
1189 *
1190 * Returns device's (decoder's) private data area address in p parameter
1191 */
1192static int ioctl_g_priv(struct v4l2_int_device *s, void *p)
1193{
1194 struct tvp514x_decoder *decoder = s->priv;
1195
1196 if (NULL == decoder->pdata->priv_data_set)
1197 return -EINVAL;
1198
1199 return decoder->pdata->priv_data_set(p);
1200}
1201
1202/**
1203 * ioctl_s_power - V4L2 decoder interface handler for vidioc_int_s_power_num
1204 * @s: pointer to standard V4L2 device structure
1205 * @on: power state to which device is to be set
1206 *
1207 * Sets devices power state to requrested state, if possible.
1208 */
1209static int ioctl_s_power(struct v4l2_int_device *s, enum v4l2_power on)
1210{
1211 struct tvp514x_decoder *decoder = s->priv;
1212 int err = 0;
1213
1214 switch (on) {
1215 case V4L2_POWER_OFF:
1216 /* Power Down Sequence */
1217 err =
1218 tvp514x_write_reg(decoder->client, REG_OPERATION_MODE,
1219 0x01);
1220 /* Disable mux for TVP5146/47 decoder data path */
1221 if (decoder->pdata->power_set)
1222 err |= decoder->pdata->power_set(on);
1223 decoder->state = STATE_NOT_DETECTED;
1224 break;
1225
1226 case V4L2_POWER_STANDBY:
1227 if (decoder->pdata->power_set)
1228 err = decoder->pdata->power_set(on);
1229 break;
1230
1231 case V4L2_POWER_ON:
1232 /* Enable mux for TVP5146/47 decoder data path */
1233 if ((decoder->pdata->power_set) &&
1234 (decoder->state == STATE_NOT_DETECTED)) {
1235 int i;
1236 struct tvp514x_init_seq *int_seq =
1237 (struct tvp514x_init_seq *)
1238 decoder->id->driver_data;
1239
1240 err = decoder->pdata->power_set(on);
1241
1242 /* Power Up Sequence */
1243 for (i = 0; i < int_seq->no_regs; i++) {
1244 err |= tvp514x_write_reg(decoder->client,
1245 int_seq->init_reg_seq[i].reg,
1246 int_seq->init_reg_seq[i].val);
1247 }
1248 /* Detect the sensor is not already detected */
1249 err |= tvp514x_detect(decoder);
1250 if (err) {
1251 v4l_err(decoder->client,
1252 "Unable to detect decoder\n");
1253 return err;
1254 }
1255 }
1256 err |= tvp514x_configure(decoder);
1257 break;
1258
1259 default:
1260 err = -ENODEV;
1261 break;
1262 }
1263
1264 return err;
1265}
1266
1267/**
1268 * ioctl_init - V4L2 decoder interface handler for VIDIOC_INT_INIT
1269 * @s: pointer to standard V4L2 device structure
1270 *
1271 * Initialize the decoder device (calls tvp514x_configure())
1272 */
1273static int ioctl_init(struct v4l2_int_device *s)
1274{
1275 struct tvp514x_decoder *decoder = s->priv;
1276
1277 /* Set default standard to auto */
1278 tvp514x_reg_list[REG_VIDEO_STD].val =
1279 VIDEO_STD_AUTO_SWITCH_BIT;
1280
1281 return tvp514x_configure(decoder);
1282}
1283
1284/**
1285 * ioctl_dev_exit - V4L2 decoder interface handler for vidioc_int_dev_exit_num
1286 * @s: pointer to standard V4L2 device structure
1287 *
1288 * Delinitialise the dev. at slave detach. The complement of ioctl_dev_init.
1289 */
1290static int ioctl_dev_exit(struct v4l2_int_device *s)
1291{
1292 return 0;
1293}
1294
1295/**
1296 * ioctl_dev_init - V4L2 decoder interface handler for vidioc_int_dev_init_num
1297 * @s: pointer to standard V4L2 device structure
1298 *
1299 * Initialise the device when slave attaches to the master. Returns 0 if
1300 * TVP5146/47 device could be found, otherwise returns appropriate error.
1301 */
1302static int ioctl_dev_init(struct v4l2_int_device *s)
1303{
1304 struct tvp514x_decoder *decoder = s->priv;
1305 int err;
1306
1307 err = tvp514x_detect(decoder);
1308 if (err < 0) {
1309 v4l_err(decoder->client,
1310 "Unable to detect decoder\n");
1311 return err;
1312 }
1313
1314 v4l_info(decoder->client,
1315 "chip version 0x%.2x detected\n", decoder->ver);
1316
1317 return 0;
1318}
1319
1320static struct v4l2_int_ioctl_desc tvp514x_ioctl_desc[] = {
1321 {vidioc_int_dev_init_num, (v4l2_int_ioctl_func*) ioctl_dev_init},
1322 {vidioc_int_dev_exit_num, (v4l2_int_ioctl_func*) ioctl_dev_exit},
1323 {vidioc_int_s_power_num, (v4l2_int_ioctl_func*) ioctl_s_power},
1324 {vidioc_int_g_priv_num, (v4l2_int_ioctl_func*) ioctl_g_priv},
1325 {vidioc_int_g_ifparm_num, (v4l2_int_ioctl_func*) ioctl_g_ifparm},
1326 {vidioc_int_init_num, (v4l2_int_ioctl_func*) ioctl_init},
1327 {vidioc_int_enum_fmt_cap_num,
1328 (v4l2_int_ioctl_func *) ioctl_enum_fmt_cap},
1329 {vidioc_int_try_fmt_cap_num,
1330 (v4l2_int_ioctl_func *) ioctl_try_fmt_cap},
1331 {vidioc_int_g_fmt_cap_num,
1332 (v4l2_int_ioctl_func *) ioctl_g_fmt_cap},
1333 {vidioc_int_s_fmt_cap_num,
1334 (v4l2_int_ioctl_func *) ioctl_s_fmt_cap},
1335 {vidioc_int_g_parm_num, (v4l2_int_ioctl_func *) ioctl_g_parm},
1336 {vidioc_int_s_parm_num, (v4l2_int_ioctl_func *) ioctl_s_parm},
1337 {vidioc_int_queryctrl_num,
1338 (v4l2_int_ioctl_func *) ioctl_queryctrl},
1339 {vidioc_int_g_ctrl_num, (v4l2_int_ioctl_func *) ioctl_g_ctrl},
1340 {vidioc_int_s_ctrl_num, (v4l2_int_ioctl_func *) ioctl_s_ctrl},
1341 {vidioc_int_querystd_num, (v4l2_int_ioctl_func *) ioctl_querystd},
1342 {vidioc_int_s_std_num, (v4l2_int_ioctl_func *) ioctl_s_std},
1343 {vidioc_int_s_video_routing_num,
1344 (v4l2_int_ioctl_func *) ioctl_s_routing},
1345};
1346
1347static struct v4l2_int_slave tvp514x_slave = {
1348 .ioctls = tvp514x_ioctl_desc,
1349 .num_ioctls = ARRAY_SIZE(tvp514x_ioctl_desc),
1350};
1351
1352static struct tvp514x_decoder tvp514x_dev = {
1353 .state = STATE_NOT_DETECTED,
1354
1355 .fmt_list = tvp514x_fmt_list,
1356 .num_fmts = ARRAY_SIZE(tvp514x_fmt_list),
1357
1358 .pix = { /* Default to NTSC 8-bit YUV 422 */
1359 .width = NTSC_NUM_ACTIVE_PIXELS,
1360 .height = NTSC_NUM_ACTIVE_LINES,
1361 .pixelformat = V4L2_PIX_FMT_UYVY,
1362 .field = V4L2_FIELD_INTERLACED,
1363 .bytesperline = NTSC_NUM_ACTIVE_PIXELS * 2,
1364 .sizeimage =
1365 NTSC_NUM_ACTIVE_PIXELS * 2 * NTSC_NUM_ACTIVE_LINES,
1366 .colorspace = V4L2_COLORSPACE_SMPTE170M,
1367 },
1368
1369 .current_std = STD_NTSC_MJ,
1370 .std_list = tvp514x_std_list,
1371 .num_stds = ARRAY_SIZE(tvp514x_std_list),
1372
1373};
1374
1375static struct v4l2_int_device tvp514x_int_device = {
1376 .module = THIS_MODULE,
1377 .name = TVP514X_MODULE_NAME,
1378 .priv = &tvp514x_dev,
1379 .type = v4l2_int_type_slave,
1380 .u = {
1381 .slave = &tvp514x_slave,
1382 },
1383};
1384
1385/**
1386 * tvp514x_probe - decoder driver i2c probe handler
1387 * @client: i2c driver client device structure
1388 *
1389 * Register decoder as an i2c client device and V4L2
1390 * device.
1391 */
1392static int
1393tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id)
1394{
1395 struct tvp514x_decoder *decoder = &tvp514x_dev;
1396 int err;
1397
1398 /* Check if the adapter supports the needed features */
1399 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
1400 return -EIO;
1401
1402 decoder->pdata = client->dev.platform_data;
1403 if (!decoder->pdata) {
1404 v4l_err(client, "No platform data\n!!");
1405 return -ENODEV;
1406 }
1407 /*
1408 * Fetch platform specific data, and configure the
1409 * tvp514x_reg_list[] accordingly. Since this is one
1410 * time configuration, no need to preserve.
1411 */
1412 tvp514x_reg_list[REG_OUTPUT_FORMATTER2].val |=
1413 (decoder->pdata->clk_polarity << 1);
1414 tvp514x_reg_list[REG_SYNC_CONTROL].val |=
1415 ((decoder->pdata->hs_polarity << 2) |
1416 (decoder->pdata->vs_polarity << 3));
1417 /*
1418 * Save the id data, required for power up sequence
1419 */
1420 decoder->id = (struct i2c_device_id *)id;
1421 /* Attach to Master */
1422 strcpy(tvp514x_int_device.u.slave->attach_to, decoder->pdata->master);
1423 decoder->v4l2_int_device = &tvp514x_int_device;
1424 decoder->client = client;
1425 i2c_set_clientdata(client, decoder);
1426
1427 /* Register with V4L2 layer as slave device */
1428 err = v4l2_int_device_register(decoder->v4l2_int_device);
1429 if (err) {
1430 i2c_set_clientdata(client, NULL);
1431 v4l_err(client,
1432 "Unable to register to v4l2. Err[%d]\n", err);
1433
1434 } else
1435 v4l_info(client, "Registered to v4l2 master %s!!\n",
1436 decoder->pdata->master);
1437
1438 return 0;
1439}
1440
1441/**
1442 * tvp514x_remove - decoder driver i2c remove handler
1443 * @client: i2c driver client device structure
1444 *
1445 * Unregister decoder as an i2c client device and V4L2
1446 * device. Complement of tvp514x_probe().
1447 */
1448static int __exit tvp514x_remove(struct i2c_client *client)
1449{
1450 struct tvp514x_decoder *decoder = i2c_get_clientdata(client);
1451
1452 if (!client->adapter)
1453 return -ENODEV; /* our client isn't attached */
1454
1455 v4l2_int_device_unregister(decoder->v4l2_int_device);
1456 i2c_set_clientdata(client, NULL);
1457
1458 return 0;
1459}
1460/*
1461 * TVP5146 Init/Power on Sequence
1462 */
1463static const struct tvp514x_reg tvp5146_init_reg_seq[] = {
1464 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x02},
1465 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
1466 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0x80},
1467 {TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x01},
1468 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x60},
1469 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
1470 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0xB0},
1471 {TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x01},
1472 {TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x00},
1473 {TOK_WRITE, REG_OPERATION_MODE, 0x01},
1474 {TOK_WRITE, REG_OPERATION_MODE, 0x00},
1475};
1476static const struct tvp514x_init_seq tvp5146_init = {
1477 .no_regs = ARRAY_SIZE(tvp5146_init_reg_seq),
1478 .init_reg_seq = tvp5146_init_reg_seq,
1479};
1480/*
1481 * TVP5147 Init/Power on Sequence
1482 */
1483static const struct tvp514x_reg tvp5147_init_reg_seq[] = {
1484 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x02},
1485 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
1486 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0x80},
1487 {TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x01},
1488 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x60},
1489 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
1490 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0xB0},
1491 {TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x01},
1492 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x16},
1493 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
1494 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0xA0},
1495 {TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x16},
1496 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS1, 0x60},
1497 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS2, 0x00},
1498 {TOK_WRITE, REG_VBUS_ADDRESS_ACCESS3, 0xB0},
1499 {TOK_WRITE, REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR, 0x00},
1500 {TOK_WRITE, REG_OPERATION_MODE, 0x01},
1501 {TOK_WRITE, REG_OPERATION_MODE, 0x00},
1502};
1503static const struct tvp514x_init_seq tvp5147_init = {
1504 .no_regs = ARRAY_SIZE(tvp5147_init_reg_seq),
1505 .init_reg_seq = tvp5147_init_reg_seq,
1506};
1507/*
1508 * TVP5146M2/TVP5147M1 Init/Power on Sequence
1509 */
1510static const struct tvp514x_reg tvp514xm_init_reg_seq[] = {
1511 {TOK_WRITE, REG_OPERATION_MODE, 0x01},
1512 {TOK_WRITE, REG_OPERATION_MODE, 0x00},
1513};
1514static const struct tvp514x_init_seq tvp514xm_init = {
1515 .no_regs = ARRAY_SIZE(tvp514xm_init_reg_seq),
1516 .init_reg_seq = tvp514xm_init_reg_seq,
1517};
1518/*
1519 * I2C Device Table -
1520 *
1521 * name - Name of the actual device/chip.
1522 * driver_data - Driver data
1523 */
1524static const struct i2c_device_id tvp514x_id[] = {
1525 {"tvp5146", (unsigned long)&tvp5146_init},
1526 {"tvp5146m2", (unsigned long)&tvp514xm_init},
1527 {"tvp5147", (unsigned long)&tvp5147_init},
1528 {"tvp5147m1", (unsigned long)&tvp514xm_init},
1529 {},
1530};
1531
1532MODULE_DEVICE_TABLE(i2c, tvp514x_id);
1533
1534static struct i2c_driver tvp514x_i2c_driver = {
1535 .driver = {
1536 .name = TVP514X_MODULE_NAME,
1537 .owner = THIS_MODULE,
1538 },
1539 .probe = tvp514x_probe,
1540 .remove = __exit_p(tvp514x_remove),
1541 .id_table = tvp514x_id,
1542};
1543
1544/**
1545 * tvp514x_init
1546 *
1547 * Module init function
1548 */
1549static int __init tvp514x_init(void)
1550{
1551 return i2c_add_driver(&tvp514x_i2c_driver);
1552}
1553
1554/**
1555 * tvp514x_cleanup
1556 *
1557 * Module exit function
1558 */
1559static void __exit tvp514x_cleanup(void)
1560{
1561 i2c_del_driver(&tvp514x_i2c_driver);
1562}
1563
1564module_init(tvp514x_init);
1565module_exit(tvp514x_cleanup);
1566
1567MODULE_AUTHOR("Texas Instruments");
1568MODULE_DESCRIPTION("TVP514X linux decoder driver");
1569MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/tvp514x_regs.h b/drivers/media/video/tvp514x_regs.h
new file mode 100644
index 000000000000..351620aeecc2
--- /dev/null
+++ b/drivers/media/video/tvp514x_regs.h
@@ -0,0 +1,297 @@
1/*
2 * drivers/media/video/tvp514x_regs.h
3 *
4 * Copyright (C) 2008 Texas Instruments Inc
5 * Author: Vaibhav Hiremath <hvaibhav@ti.com>
6 *
7 * Contributors:
8 * Sivaraj R <sivaraj@ti.com>
9 * Brijesh R Jadav <brijesh.j@ti.com>
10 * Hardik Shah <hardik.shah@ti.com>
11 * Manjunath Hadli <mrh@ti.com>
12 * Karicheri Muralidharan <m-karicheri2@ti.com>
13 *
14 * This package is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 */
28
29#ifndef _TVP514X_REGS_H
30#define _TVP514X_REGS_H
31
32/*
33 * TVP5146/47 registers
34 */
35#define REG_INPUT_SEL (0x00)
36#define REG_AFE_GAIN_CTRL (0x01)
37#define REG_VIDEO_STD (0x02)
38#define REG_OPERATION_MODE (0x03)
39#define REG_AUTOSWITCH_MASK (0x04)
40
41#define REG_COLOR_KILLER (0x05)
42#define REG_LUMA_CONTROL1 (0x06)
43#define REG_LUMA_CONTROL2 (0x07)
44#define REG_LUMA_CONTROL3 (0x08)
45
46#define REG_BRIGHTNESS (0x09)
47#define REG_CONTRAST (0x0A)
48#define REG_SATURATION (0x0B)
49#define REG_HUE (0x0C)
50
51#define REG_CHROMA_CONTROL1 (0x0D)
52#define REG_CHROMA_CONTROL2 (0x0E)
53
54/* 0x0F Reserved */
55
56#define REG_COMP_PR_SATURATION (0x10)
57#define REG_COMP_Y_CONTRAST (0x11)
58#define REG_COMP_PB_SATURATION (0x12)
59
60/* 0x13 Reserved */
61
62#define REG_COMP_Y_BRIGHTNESS (0x14)
63
64/* 0x15 Reserved */
65
66#define REG_AVID_START_PIXEL_LSB (0x16)
67#define REG_AVID_START_PIXEL_MSB (0x17)
68#define REG_AVID_STOP_PIXEL_LSB (0x18)
69#define REG_AVID_STOP_PIXEL_MSB (0x19)
70
71#define REG_HSYNC_START_PIXEL_LSB (0x1A)
72#define REG_HSYNC_START_PIXEL_MSB (0x1B)
73#define REG_HSYNC_STOP_PIXEL_LSB (0x1C)
74#define REG_HSYNC_STOP_PIXEL_MSB (0x1D)
75
76#define REG_VSYNC_START_LINE_LSB (0x1E)
77#define REG_VSYNC_START_LINE_MSB (0x1F)
78#define REG_VSYNC_STOP_LINE_LSB (0x20)
79#define REG_VSYNC_STOP_LINE_MSB (0x21)
80
81#define REG_VBLK_START_LINE_LSB (0x22)
82#define REG_VBLK_START_LINE_MSB (0x23)
83#define REG_VBLK_STOP_LINE_LSB (0x24)
84#define REG_VBLK_STOP_LINE_MSB (0x25)
85
86/* 0x26 - 0x27 Reserved */
87
88#define REG_FAST_SWTICH_CONTROL (0x28)
89
90/* 0x29 Reserved */
91
92#define REG_FAST_SWTICH_SCART_DELAY (0x2A)
93
94/* 0x2B Reserved */
95
96#define REG_SCART_DELAY (0x2C)
97#define REG_CTI_DELAY (0x2D)
98#define REG_CTI_CONTROL (0x2E)
99
100/* 0x2F - 0x31 Reserved */
101
102#define REG_SYNC_CONTROL (0x32)
103#define REG_OUTPUT_FORMATTER1 (0x33)
104#define REG_OUTPUT_FORMATTER2 (0x34)
105#define REG_OUTPUT_FORMATTER3 (0x35)
106#define REG_OUTPUT_FORMATTER4 (0x36)
107#define REG_OUTPUT_FORMATTER5 (0x37)
108#define REG_OUTPUT_FORMATTER6 (0x38)
109#define REG_CLEAR_LOST_LOCK (0x39)
110
111#define REG_STATUS1 (0x3A)
112#define REG_STATUS2 (0x3B)
113
114#define REG_AGC_GAIN_STATUS_LSB (0x3C)
115#define REG_AGC_GAIN_STATUS_MSB (0x3D)
116
117/* 0x3E Reserved */
118
119#define REG_VIDEO_STD_STATUS (0x3F)
120#define REG_GPIO_INPUT1 (0x40)
121#define REG_GPIO_INPUT2 (0x41)
122
123/* 0x42 - 0x45 Reserved */
124
125#define REG_AFE_COARSE_GAIN_CH1 (0x46)
126#define REG_AFE_COARSE_GAIN_CH2 (0x47)
127#define REG_AFE_COARSE_GAIN_CH3 (0x48)
128#define REG_AFE_COARSE_GAIN_CH4 (0x49)
129
130#define REG_AFE_FINE_GAIN_PB_B_LSB (0x4A)
131#define REG_AFE_FINE_GAIN_PB_B_MSB (0x4B)
132#define REG_AFE_FINE_GAIN_Y_G_CHROMA_LSB (0x4C)
133#define REG_AFE_FINE_GAIN_Y_G_CHROMA_MSB (0x4D)
134#define REG_AFE_FINE_GAIN_PR_R_LSB (0x4E)
135#define REG_AFE_FINE_GAIN_PR_R_MSB (0x4F)
136#define REG_AFE_FINE_GAIN_CVBS_LUMA_LSB (0x50)
137#define REG_AFE_FINE_GAIN_CVBS_LUMA_MSB (0x51)
138
139/* 0x52 - 0x68 Reserved */
140
141#define REG_FBIT_VBIT_CONTROL1 (0x69)
142
143/* 0x6A - 0x6B Reserved */
144
145#define REG_BACKEND_AGC_CONTROL (0x6C)
146
147/* 0x6D - 0x6E Reserved */
148
149#define REG_AGC_DECREMENT_SPEED_CONTROL (0x6F)
150#define REG_ROM_VERSION (0x70)
151
152/* 0x71 - 0x73 Reserved */
153
154#define REG_AGC_WHITE_PEAK_PROCESSING (0x74)
155#define REG_FBIT_VBIT_CONTROL2 (0x75)
156#define REG_VCR_TRICK_MODE_CONTROL (0x76)
157#define REG_HORIZONTAL_SHAKE_INCREMENT (0x77)
158#define REG_AGC_INCREMENT_SPEED (0x78)
159#define REG_AGC_INCREMENT_DELAY (0x79)
160
161/* 0x7A - 0x7F Reserved */
162
163#define REG_CHIP_ID_MSB (0x80)
164#define REG_CHIP_ID_LSB (0x81)
165
166/* 0x82 Reserved */
167
168#define REG_CPLL_SPEED_CONTROL (0x83)
169
170/* 0x84 - 0x96 Reserved */
171
172#define REG_STATUS_REQUEST (0x97)
173
174/* 0x98 - 0x99 Reserved */
175
176#define REG_VERTICAL_LINE_COUNT_LSB (0x9A)
177#define REG_VERTICAL_LINE_COUNT_MSB (0x9B)
178
179/* 0x9C - 0x9D Reserved */
180
181#define REG_AGC_DECREMENT_DELAY (0x9E)
182
183/* 0x9F - 0xB0 Reserved */
184
185#define REG_VDP_TTX_FILTER_1_MASK1 (0xB1)
186#define REG_VDP_TTX_FILTER_1_MASK2 (0xB2)
187#define REG_VDP_TTX_FILTER_1_MASK3 (0xB3)
188#define REG_VDP_TTX_FILTER_1_MASK4 (0xB4)
189#define REG_VDP_TTX_FILTER_1_MASK5 (0xB5)
190#define REG_VDP_TTX_FILTER_2_MASK1 (0xB6)
191#define REG_VDP_TTX_FILTER_2_MASK2 (0xB7)
192#define REG_VDP_TTX_FILTER_2_MASK3 (0xB8)
193#define REG_VDP_TTX_FILTER_2_MASK4 (0xB9)
194#define REG_VDP_TTX_FILTER_2_MASK5 (0xBA)
195#define REG_VDP_TTX_FILTER_CONTROL (0xBB)
196#define REG_VDP_FIFO_WORD_COUNT (0xBC)
197#define REG_VDP_FIFO_INTERRUPT_THRLD (0xBD)
198
199/* 0xBE Reserved */
200
201#define REG_VDP_FIFO_RESET (0xBF)
202#define REG_VDP_FIFO_OUTPUT_CONTROL (0xC0)
203#define REG_VDP_LINE_NUMBER_INTERRUPT (0xC1)
204#define REG_VDP_PIXEL_ALIGNMENT_LSB (0xC2)
205#define REG_VDP_PIXEL_ALIGNMENT_MSB (0xC3)
206
207/* 0xC4 - 0xD5 Reserved */
208
209#define REG_VDP_LINE_START (0xD6)
210#define REG_VDP_LINE_STOP (0xD7)
211#define REG_VDP_GLOBAL_LINE_MODE (0xD8)
212#define REG_VDP_FULL_FIELD_ENABLE (0xD9)
213#define REG_VDP_FULL_FIELD_MODE (0xDA)
214
215/* 0xDB - 0xDF Reserved */
216
217#define REG_VBUS_DATA_ACCESS_NO_VBUS_ADDR_INCR (0xE0)
218#define REG_VBUS_DATA_ACCESS_VBUS_ADDR_INCR (0xE1)
219#define REG_FIFO_READ_DATA (0xE2)
220
221/* 0xE3 - 0xE7 Reserved */
222
223#define REG_VBUS_ADDRESS_ACCESS1 (0xE8)
224#define REG_VBUS_ADDRESS_ACCESS2 (0xE9)
225#define REG_VBUS_ADDRESS_ACCESS3 (0xEA)
226
227/* 0xEB - 0xEF Reserved */
228
229#define REG_INTERRUPT_RAW_STATUS0 (0xF0)
230#define REG_INTERRUPT_RAW_STATUS1 (0xF1)
231#define REG_INTERRUPT_STATUS0 (0xF2)
232#define REG_INTERRUPT_STATUS1 (0xF3)
233#define REG_INTERRUPT_MASK0 (0xF4)
234#define REG_INTERRUPT_MASK1 (0xF5)
235#define REG_INTERRUPT_CLEAR0 (0xF6)
236#define REG_INTERRUPT_CLEAR1 (0xF7)
237
238/* 0xF8 - 0xFF Reserved */
239
240/*
241 * Mask and bit definitions of TVP5146/47 registers
242 */
243/* The ID values we are looking for */
244#define TVP514X_CHIP_ID_MSB (0x51)
245#define TVP5146_CHIP_ID_LSB (0x46)
246#define TVP5147_CHIP_ID_LSB (0x47)
247
248#define VIDEO_STD_MASK (0x07)
249#define VIDEO_STD_AUTO_SWITCH_BIT (0x00)
250#define VIDEO_STD_NTSC_MJ_BIT (0x01)
251#define VIDEO_STD_PAL_BDGHIN_BIT (0x02)
252#define VIDEO_STD_PAL_M_BIT (0x03)
253#define VIDEO_STD_PAL_COMBINATION_N_BIT (0x04)
254#define VIDEO_STD_NTSC_4_43_BIT (0x05)
255#define VIDEO_STD_SECAM_BIT (0x06)
256#define VIDEO_STD_PAL_60_BIT (0x07)
257
258/*
259 * Status bit
260 */
261#define STATUS_TV_VCR_BIT (1<<0)
262#define STATUS_HORZ_SYNC_LOCK_BIT (1<<1)
263#define STATUS_VIRT_SYNC_LOCK_BIT (1<<2)
264#define STATUS_CLR_SUBCAR_LOCK_BIT (1<<3)
265#define STATUS_LOST_LOCK_DETECT_BIT (1<<4)
266#define STATUS_FEILD_RATE_BIT (1<<5)
267#define STATUS_LINE_ALTERNATING_BIT (1<<6)
268#define STATUS_PEAK_WHITE_DETECT_BIT (1<<7)
269
270/* Tokens for register write */
271#define TOK_WRITE (0) /* token for write operation */
272#define TOK_TERM (1) /* terminating token */
273#define TOK_DELAY (2) /* delay token for reg list */
274#define TOK_SKIP (3) /* token to skip a register */
275/**
276 * struct tvp514x_reg - Structure for TVP5146/47 register initialization values
277 * @token - Token: TOK_WRITE, TOK_TERM etc..
278 * @reg - Register offset
279 * @val - Register Value for TOK_WRITE or delay in ms for TOK_DELAY
280 */
281struct tvp514x_reg {
282 u8 token;
283 u8 reg;
284 u32 val;
285};
286
287/**
288 * struct tvp514x_init_seq - Structure for TVP5146/47/46M2/47M1 power up
289 * Sequence.
290 * @ no_regs - Number of registers to write for power up sequence.
291 * @ init_reg_seq - Array of registers and respective value to write.
292 */
293struct tvp514x_init_seq {
294 unsigned int no_regs;
295 const struct tvp514x_reg *init_reg_seq;
296};
297#endif /* ifndef _TVP514X_REGS_H */
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 28af5ce5560d..a388a9f0cb18 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -9,8 +9,10 @@
9#include <linux/videodev2.h> 9#include <linux/videodev2.h>
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/video_decoder.h> 11#include <linux/video_decoder.h>
12#include <media/v4l2-common.h> 12#include <media/v4l2-device.h>
13#include <media/tvp5150.h> 13#include <media/tvp5150.h>
14#include <media/v4l2-i2c-drv-legacy.h>
15#include <media/v4l2-chip-ident.h>
14 16
15#include "tvp5150_reg.h" 17#include "tvp5150_reg.h"
16 18
@@ -29,21 +31,7 @@ I2C_CLIENT_INSMOD;
29 31
30static int debug; 32static int debug;
31module_param(debug, int, 0); 33module_param(debug, int, 0);
32MODULE_PARM_DESC(debug, "Debug level (0-1)"); 34MODULE_PARM_DESC(debug, "Debug level (0-2)");
33
34#define tvp5150_err(fmt, arg...) do { \
35 printk(KERN_ERR "%s %d-%04x: " fmt, c->driver->driver.name, \
36 i2c_adapter_id(c->adapter), c->addr , ## arg); } while (0)
37#define tvp5150_info(fmt, arg...) do { \
38 printk(KERN_INFO "%s %d-%04x: " fmt, c->driver->driver.name, \
39 i2c_adapter_id(c->adapter), c->addr , ## arg); } while (0)
40#define tvp5150_dbg(num, fmt, arg...) \
41 do { \
42 if (debug >= num) \
43 printk(KERN_DEBUG "%s debug %d-%04x: " fmt,\
44 c->driver->driver.name, \
45 i2c_adapter_id(c->adapter), \
46 c->addr , ## arg); } while (0)
47 35
48/* supported controls */ 36/* supported controls */
49static struct v4l2_queryctrl tvp5150_qctrl[] = { 37static struct v4l2_queryctrl tvp5150_qctrl[] = {
@@ -87,7 +75,7 @@ static struct v4l2_queryctrl tvp5150_qctrl[] = {
87}; 75};
88 76
89struct tvp5150 { 77struct tvp5150 {
90 struct i2c_client *client; 78 struct v4l2_subdev sd;
91 79
92 v4l2_std_id norm; /* Current set standard */ 80 v4l2_std_id norm; /* Current set standard */
93 struct v4l2_routing route; 81 struct v4l2_routing route;
@@ -98,49 +86,57 @@ struct tvp5150 {
98 int sat; 86 int sat;
99}; 87};
100 88
101static int tvp5150_read(struct i2c_client *c, unsigned char addr) 89static inline struct tvp5150 *to_tvp5150(struct v4l2_subdev *sd)
102{ 90{
91 return container_of(sd, struct tvp5150, sd);
92}
93
94static int tvp5150_read(struct v4l2_subdev *sd, unsigned char addr)
95{
96 struct i2c_client *c = v4l2_get_subdevdata(sd);
103 unsigned char buffer[1]; 97 unsigned char buffer[1];
104 int rc; 98 int rc;
105 99
106 buffer[0] = addr; 100 buffer[0] = addr;
107 if (1 != (rc = i2c_master_send(c, buffer, 1))) 101 if (1 != (rc = i2c_master_send(c, buffer, 1)))
108 tvp5150_dbg(0, "i2c i/o error: rc == %d (should be 1)\n", rc); 102 v4l2_dbg(0, debug, sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
109 103
110 msleep(10); 104 msleep(10);
111 105
112 if (1 != (rc = i2c_master_recv(c, buffer, 1))) 106 if (1 != (rc = i2c_master_recv(c, buffer, 1)))
113 tvp5150_dbg(0, "i2c i/o error: rc == %d (should be 1)\n", rc); 107 v4l2_dbg(0, debug, sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
114 108
115 tvp5150_dbg(2, "tvp5150: read 0x%02x = 0x%02x\n", addr, buffer[0]); 109 v4l2_dbg(2, debug, sd, "tvp5150: read 0x%02x = 0x%02x\n", addr, buffer[0]);
116 110
117 return (buffer[0]); 111 return (buffer[0]);
118} 112}
119 113
120static inline void tvp5150_write(struct i2c_client *c, unsigned char addr, 114static inline void tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
121 unsigned char value) 115 unsigned char value)
122{ 116{
117 struct i2c_client *c = v4l2_get_subdevdata(sd);
123 unsigned char buffer[2]; 118 unsigned char buffer[2];
124 int rc; 119 int rc;
125 120
126 buffer[0] = addr; 121 buffer[0] = addr;
127 buffer[1] = value; 122 buffer[1] = value;
128 tvp5150_dbg(2, "tvp5150: writing 0x%02x 0x%02x\n", buffer[0], buffer[1]); 123 v4l2_dbg(2, debug, sd, "tvp5150: writing 0x%02x 0x%02x\n", buffer[0], buffer[1]);
129 if (2 != (rc = i2c_master_send(c, buffer, 2))) 124 if (2 != (rc = i2c_master_send(c, buffer, 2)))
130 tvp5150_dbg(0, "i2c i/o error: rc == %d (should be 2)\n", rc); 125 v4l2_dbg(0, debug, sd, "i2c i/o error: rc == %d (should be 2)\n", rc);
131} 126}
132 127
133static void dump_reg_range(struct i2c_client *c, char *s, u8 init, const u8 end,int max_line) 128static void dump_reg_range(struct v4l2_subdev *sd, char *s, u8 init,
129 const u8 end, int max_line)
134{ 130{
135 int i=0; 131 int i = 0;
136 132
137 while (init!=(u8)(end+1)) { 133 while (init != (u8)(end + 1)) {
138 if ((i%max_line) == 0) { 134 if ((i % max_line) == 0) {
139 if (i>0) 135 if (i > 0)
140 printk("\n"); 136 printk("\n");
141 printk("tvp5150: %s reg 0x%02x = ",s,init); 137 printk("tvp5150: %s reg 0x%02x = ", s, init);
142 } 138 }
143 printk("%02x ",tvp5150_read(c, init)); 139 printk("%02x ", tvp5150_read(sd, init));
144 140
145 init++; 141 init++;
146 i++; 142 i++;
@@ -148,147 +144,148 @@ static void dump_reg_range(struct i2c_client *c, char *s, u8 init, const u8 end,
148 printk("\n"); 144 printk("\n");
149} 145}
150 146
151static void dump_reg(struct i2c_client *c) 147static int tvp5150_log_status(struct v4l2_subdev *sd)
152{ 148{
153 printk("tvp5150: Video input source selection #1 = 0x%02x\n", 149 printk("tvp5150: Video input source selection #1 = 0x%02x\n",
154 tvp5150_read(c, TVP5150_VD_IN_SRC_SEL_1)); 150 tvp5150_read(sd, TVP5150_VD_IN_SRC_SEL_1));
155 printk("tvp5150: Analog channel controls = 0x%02x\n", 151 printk("tvp5150: Analog channel controls = 0x%02x\n",
156 tvp5150_read(c, TVP5150_ANAL_CHL_CTL)); 152 tvp5150_read(sd, TVP5150_ANAL_CHL_CTL));
157 printk("tvp5150: Operation mode controls = 0x%02x\n", 153 printk("tvp5150: Operation mode controls = 0x%02x\n",
158 tvp5150_read(c, TVP5150_OP_MODE_CTL)); 154 tvp5150_read(sd, TVP5150_OP_MODE_CTL));
159 printk("tvp5150: Miscellaneous controls = 0x%02x\n", 155 printk("tvp5150: Miscellaneous controls = 0x%02x\n",
160 tvp5150_read(c, TVP5150_MISC_CTL)); 156 tvp5150_read(sd, TVP5150_MISC_CTL));
161 printk("tvp5150: Autoswitch mask= 0x%02x\n", 157 printk("tvp5150: Autoswitch mask= 0x%02x\n",
162 tvp5150_read(c, TVP5150_AUTOSW_MSK)); 158 tvp5150_read(sd, TVP5150_AUTOSW_MSK));
163 printk("tvp5150: Color killer threshold control = 0x%02x\n", 159 printk("tvp5150: Color killer threshold control = 0x%02x\n",
164 tvp5150_read(c, TVP5150_COLOR_KIL_THSH_CTL)); 160 tvp5150_read(sd, TVP5150_COLOR_KIL_THSH_CTL));
165 printk("tvp5150: Luminance processing controls #1 #2 and #3 = %02x %02x %02x\n", 161 printk("tvp5150: Luminance processing controls #1 #2 and #3 = %02x %02x %02x\n",
166 tvp5150_read(c, TVP5150_LUMA_PROC_CTL_1), 162 tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_1),
167 tvp5150_read(c, TVP5150_LUMA_PROC_CTL_2), 163 tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_2),
168 tvp5150_read(c, TVP5150_LUMA_PROC_CTL_3)); 164 tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_3));
169 printk("tvp5150: Brightness control = 0x%02x\n", 165 printk("tvp5150: Brightness control = 0x%02x\n",
170 tvp5150_read(c, TVP5150_BRIGHT_CTL)); 166 tvp5150_read(sd, TVP5150_BRIGHT_CTL));
171 printk("tvp5150: Color saturation control = 0x%02x\n", 167 printk("tvp5150: Color saturation control = 0x%02x\n",
172 tvp5150_read(c, TVP5150_SATURATION_CTL)); 168 tvp5150_read(sd, TVP5150_SATURATION_CTL));
173 printk("tvp5150: Hue control = 0x%02x\n", 169 printk("tvp5150: Hue control = 0x%02x\n",
174 tvp5150_read(c, TVP5150_HUE_CTL)); 170 tvp5150_read(sd, TVP5150_HUE_CTL));
175 printk("tvp5150: Contrast control = 0x%02x\n", 171 printk("tvp5150: Contrast control = 0x%02x\n",
176 tvp5150_read(c, TVP5150_CONTRAST_CTL)); 172 tvp5150_read(sd, TVP5150_CONTRAST_CTL));
177 printk("tvp5150: Outputs and data rates select = 0x%02x\n", 173 printk("tvp5150: Outputs and data rates select = 0x%02x\n",
178 tvp5150_read(c, TVP5150_DATA_RATE_SEL)); 174 tvp5150_read(sd, TVP5150_DATA_RATE_SEL));
179 printk("tvp5150: Configuration shared pins = 0x%02x\n", 175 printk("tvp5150: Configuration shared pins = 0x%02x\n",
180 tvp5150_read(c, TVP5150_CONF_SHARED_PIN)); 176 tvp5150_read(sd, TVP5150_CONF_SHARED_PIN));
181 printk("tvp5150: Active video cropping start = 0x%02x%02x\n", 177 printk("tvp5150: Active video cropping start = 0x%02x%02x\n",
182 tvp5150_read(c, TVP5150_ACT_VD_CROP_ST_MSB), 178 tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_MSB),
183 tvp5150_read(c, TVP5150_ACT_VD_CROP_ST_LSB)); 179 tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_LSB));
184 printk("tvp5150: Active video cropping stop = 0x%02x%02x\n", 180 printk("tvp5150: Active video cropping stop = 0x%02x%02x\n",
185 tvp5150_read(c, TVP5150_ACT_VD_CROP_STP_MSB), 181 tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_MSB),
186 tvp5150_read(c, TVP5150_ACT_VD_CROP_STP_LSB)); 182 tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_LSB));
187 printk("tvp5150: Genlock/RTC = 0x%02x\n", 183 printk("tvp5150: Genlock/RTC = 0x%02x\n",
188 tvp5150_read(c, TVP5150_GENLOCK)); 184 tvp5150_read(sd, TVP5150_GENLOCK));
189 printk("tvp5150: Horizontal sync start = 0x%02x\n", 185 printk("tvp5150: Horizontal sync start = 0x%02x\n",
190 tvp5150_read(c, TVP5150_HORIZ_SYNC_START)); 186 tvp5150_read(sd, TVP5150_HORIZ_SYNC_START));
191 printk("tvp5150: Vertical blanking start = 0x%02x\n", 187 printk("tvp5150: Vertical blanking start = 0x%02x\n",
192 tvp5150_read(c, TVP5150_VERT_BLANKING_START)); 188 tvp5150_read(sd, TVP5150_VERT_BLANKING_START));
193 printk("tvp5150: Vertical blanking stop = 0x%02x\n", 189 printk("tvp5150: Vertical blanking stop = 0x%02x\n",
194 tvp5150_read(c, TVP5150_VERT_BLANKING_STOP)); 190 tvp5150_read(sd, TVP5150_VERT_BLANKING_STOP));
195 printk("tvp5150: Chrominance processing control #1 and #2 = %02x %02x\n", 191 printk("tvp5150: Chrominance processing control #1 and #2 = %02x %02x\n",
196 tvp5150_read(c, TVP5150_CHROMA_PROC_CTL_1), 192 tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_1),
197 tvp5150_read(c, TVP5150_CHROMA_PROC_CTL_2)); 193 tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_2));
198 printk("tvp5150: Interrupt reset register B = 0x%02x\n", 194 printk("tvp5150: Interrupt reset register B = 0x%02x\n",
199 tvp5150_read(c, TVP5150_INT_RESET_REG_B)); 195 tvp5150_read(sd, TVP5150_INT_RESET_REG_B));
200 printk("tvp5150: Interrupt enable register B = 0x%02x\n", 196 printk("tvp5150: Interrupt enable register B = 0x%02x\n",
201 tvp5150_read(c, TVP5150_INT_ENABLE_REG_B)); 197 tvp5150_read(sd, TVP5150_INT_ENABLE_REG_B));
202 printk("tvp5150: Interrupt configuration register B = 0x%02x\n", 198 printk("tvp5150: Interrupt configuration register B = 0x%02x\n",
203 tvp5150_read(c, TVP5150_INTT_CONFIG_REG_B)); 199 tvp5150_read(sd, TVP5150_INTT_CONFIG_REG_B));
204 printk("tvp5150: Video standard = 0x%02x\n", 200 printk("tvp5150: Video standard = 0x%02x\n",
205 tvp5150_read(c, TVP5150_VIDEO_STD)); 201 tvp5150_read(sd, TVP5150_VIDEO_STD));
206 printk("tvp5150: Chroma gain factor: Cb=0x%02x Cr=0x%02x\n", 202 printk("tvp5150: Chroma gain factor: Cb=0x%02x Cr=0x%02x\n",
207 tvp5150_read(c, TVP5150_CB_GAIN_FACT), 203 tvp5150_read(sd, TVP5150_CB_GAIN_FACT),
208 tvp5150_read(c, TVP5150_CR_GAIN_FACTOR)); 204 tvp5150_read(sd, TVP5150_CR_GAIN_FACTOR));
209 printk("tvp5150: Macrovision on counter = 0x%02x\n", 205 printk("tvp5150: Macrovision on counter = 0x%02x\n",
210 tvp5150_read(c, TVP5150_MACROVISION_ON_CTR)); 206 tvp5150_read(sd, TVP5150_MACROVISION_ON_CTR));
211 printk("tvp5150: Macrovision off counter = 0x%02x\n", 207 printk("tvp5150: Macrovision off counter = 0x%02x\n",
212 tvp5150_read(c, TVP5150_MACROVISION_OFF_CTR)); 208 tvp5150_read(sd, TVP5150_MACROVISION_OFF_CTR));
213 printk("tvp5150: ITU-R BT.656.%d timing(TVP5150AM1 only)\n", 209 printk("tvp5150: ITU-R BT.656.%d timing(TVP5150AM1 only)\n",
214 (tvp5150_read(c, TVP5150_REV_SELECT)&1)?3:4); 210 (tvp5150_read(sd, TVP5150_REV_SELECT) & 1) ? 3 : 4);
215 printk("tvp5150: Device ID = %02x%02x\n", 211 printk("tvp5150: Device ID = %02x%02x\n",
216 tvp5150_read(c, TVP5150_MSB_DEV_ID), 212 tvp5150_read(sd, TVP5150_MSB_DEV_ID),
217 tvp5150_read(c, TVP5150_LSB_DEV_ID)); 213 tvp5150_read(sd, TVP5150_LSB_DEV_ID));
218 printk("tvp5150: ROM version = (hex) %02x.%02x\n", 214 printk("tvp5150: ROM version = (hex) %02x.%02x\n",
219 tvp5150_read(c, TVP5150_ROM_MAJOR_VER), 215 tvp5150_read(sd, TVP5150_ROM_MAJOR_VER),
220 tvp5150_read(c, TVP5150_ROM_MINOR_VER)); 216 tvp5150_read(sd, TVP5150_ROM_MINOR_VER));
221 printk("tvp5150: Vertical line count = 0x%02x%02x\n", 217 printk("tvp5150: Vertical line count = 0x%02x%02x\n",
222 tvp5150_read(c, TVP5150_VERT_LN_COUNT_MSB), 218 tvp5150_read(sd, TVP5150_VERT_LN_COUNT_MSB),
223 tvp5150_read(c, TVP5150_VERT_LN_COUNT_LSB)); 219 tvp5150_read(sd, TVP5150_VERT_LN_COUNT_LSB));
224 printk("tvp5150: Interrupt status register B = 0x%02x\n", 220 printk("tvp5150: Interrupt status register B = 0x%02x\n",
225 tvp5150_read(c, TVP5150_INT_STATUS_REG_B)); 221 tvp5150_read(sd, TVP5150_INT_STATUS_REG_B));
226 printk("tvp5150: Interrupt active register B = 0x%02x\n", 222 printk("tvp5150: Interrupt active register B = 0x%02x\n",
227 tvp5150_read(c, TVP5150_INT_ACTIVE_REG_B)); 223 tvp5150_read(sd, TVP5150_INT_ACTIVE_REG_B));
228 printk("tvp5150: Status regs #1 to #5 = %02x %02x %02x %02x %02x\n", 224 printk("tvp5150: Status regs #1 to #5 = %02x %02x %02x %02x %02x\n",
229 tvp5150_read(c, TVP5150_STATUS_REG_1), 225 tvp5150_read(sd, TVP5150_STATUS_REG_1),
230 tvp5150_read(c, TVP5150_STATUS_REG_2), 226 tvp5150_read(sd, TVP5150_STATUS_REG_2),
231 tvp5150_read(c, TVP5150_STATUS_REG_3), 227 tvp5150_read(sd, TVP5150_STATUS_REG_3),
232 tvp5150_read(c, TVP5150_STATUS_REG_4), 228 tvp5150_read(sd, TVP5150_STATUS_REG_4),
233 tvp5150_read(c, TVP5150_STATUS_REG_5)); 229 tvp5150_read(sd, TVP5150_STATUS_REG_5));
234 230
235 dump_reg_range(c,"Teletext filter 1", TVP5150_TELETEXT_FIL1_INI, 231 dump_reg_range(sd, "Teletext filter 1", TVP5150_TELETEXT_FIL1_INI,
236 TVP5150_TELETEXT_FIL1_END,8); 232 TVP5150_TELETEXT_FIL1_END, 8);
237 dump_reg_range(c,"Teletext filter 2", TVP5150_TELETEXT_FIL2_INI, 233 dump_reg_range(sd, "Teletext filter 2", TVP5150_TELETEXT_FIL2_INI,
238 TVP5150_TELETEXT_FIL2_END,8); 234 TVP5150_TELETEXT_FIL2_END, 8);
239 235
240 printk("tvp5150: Teletext filter enable = 0x%02x\n", 236 printk("tvp5150: Teletext filter enable = 0x%02x\n",
241 tvp5150_read(c, TVP5150_TELETEXT_FIL_ENA)); 237 tvp5150_read(sd, TVP5150_TELETEXT_FIL_ENA));
242 printk("tvp5150: Interrupt status register A = 0x%02x\n", 238 printk("tvp5150: Interrupt status register A = 0x%02x\n",
243 tvp5150_read(c, TVP5150_INT_STATUS_REG_A)); 239 tvp5150_read(sd, TVP5150_INT_STATUS_REG_A));
244 printk("tvp5150: Interrupt enable register A = 0x%02x\n", 240 printk("tvp5150: Interrupt enable register A = 0x%02x\n",
245 tvp5150_read(c, TVP5150_INT_ENABLE_REG_A)); 241 tvp5150_read(sd, TVP5150_INT_ENABLE_REG_A));
246 printk("tvp5150: Interrupt configuration = 0x%02x\n", 242 printk("tvp5150: Interrupt configuration = 0x%02x\n",
247 tvp5150_read(c, TVP5150_INT_CONF)); 243 tvp5150_read(sd, TVP5150_INT_CONF));
248 printk("tvp5150: VDP status register = 0x%02x\n", 244 printk("tvp5150: VDP status register = 0x%02x\n",
249 tvp5150_read(c, TVP5150_VDP_STATUS_REG)); 245 tvp5150_read(sd, TVP5150_VDP_STATUS_REG));
250 printk("tvp5150: FIFO word count = 0x%02x\n", 246 printk("tvp5150: FIFO word count = 0x%02x\n",
251 tvp5150_read(c, TVP5150_FIFO_WORD_COUNT)); 247 tvp5150_read(sd, TVP5150_FIFO_WORD_COUNT));
252 printk("tvp5150: FIFO interrupt threshold = 0x%02x\n", 248 printk("tvp5150: FIFO interrupt threshold = 0x%02x\n",
253 tvp5150_read(c, TVP5150_FIFO_INT_THRESHOLD)); 249 tvp5150_read(sd, TVP5150_FIFO_INT_THRESHOLD));
254 printk("tvp5150: FIFO reset = 0x%02x\n", 250 printk("tvp5150: FIFO reset = 0x%02x\n",
255 tvp5150_read(c, TVP5150_FIFO_RESET)); 251 tvp5150_read(sd, TVP5150_FIFO_RESET));
256 printk("tvp5150: Line number interrupt = 0x%02x\n", 252 printk("tvp5150: Line number interrupt = 0x%02x\n",
257 tvp5150_read(c, TVP5150_LINE_NUMBER_INT)); 253 tvp5150_read(sd, TVP5150_LINE_NUMBER_INT));
258 printk("tvp5150: Pixel alignment register = 0x%02x%02x\n", 254 printk("tvp5150: Pixel alignment register = 0x%02x%02x\n",
259 tvp5150_read(c, TVP5150_PIX_ALIGN_REG_HIGH), 255 tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_HIGH),
260 tvp5150_read(c, TVP5150_PIX_ALIGN_REG_LOW)); 256 tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_LOW));
261 printk("tvp5150: FIFO output control = 0x%02x\n", 257 printk("tvp5150: FIFO output control = 0x%02x\n",
262 tvp5150_read(c, TVP5150_FIFO_OUT_CTRL)); 258 tvp5150_read(sd, TVP5150_FIFO_OUT_CTRL));
263 printk("tvp5150: Full field enable = 0x%02x\n", 259 printk("tvp5150: Full field enable = 0x%02x\n",
264 tvp5150_read(c, TVP5150_FULL_FIELD_ENA)); 260 tvp5150_read(sd, TVP5150_FULL_FIELD_ENA));
265 printk("tvp5150: Full field mode register = 0x%02x\n", 261 printk("tvp5150: Full field mode register = 0x%02x\n",
266 tvp5150_read(c, TVP5150_FULL_FIELD_MODE_REG)); 262 tvp5150_read(sd, TVP5150_FULL_FIELD_MODE_REG));
267 263
268 dump_reg_range(c,"CC data", TVP5150_CC_DATA_INI, 264 dump_reg_range(sd, "CC data", TVP5150_CC_DATA_INI,
269 TVP5150_CC_DATA_END,8); 265 TVP5150_CC_DATA_END, 8);
270 266
271 dump_reg_range(c,"WSS data", TVP5150_WSS_DATA_INI, 267 dump_reg_range(sd, "WSS data", TVP5150_WSS_DATA_INI,
272 TVP5150_WSS_DATA_END,8); 268 TVP5150_WSS_DATA_END, 8);
273 269
274 dump_reg_range(c,"VPS data", TVP5150_VPS_DATA_INI, 270 dump_reg_range(sd, "VPS data", TVP5150_VPS_DATA_INI,
275 TVP5150_VPS_DATA_END,8); 271 TVP5150_VPS_DATA_END, 8);
276 272
277 dump_reg_range(c,"VITC data", TVP5150_VITC_DATA_INI, 273 dump_reg_range(sd, "VITC data", TVP5150_VITC_DATA_INI,
278 TVP5150_VITC_DATA_END,10); 274 TVP5150_VITC_DATA_END, 10);
279 275
280 dump_reg_range(c,"Line mode", TVP5150_LINE_MODE_INI, 276 dump_reg_range(sd, "Line mode", TVP5150_LINE_MODE_INI,
281 TVP5150_LINE_MODE_END,8); 277 TVP5150_LINE_MODE_END, 8);
278 return 0;
282} 279}
283 280
284/**************************************************************************** 281/****************************************************************************
285 Basic functions 282 Basic functions
286 ****************************************************************************/ 283 ****************************************************************************/
287 284
288static inline void tvp5150_selmux(struct i2c_client *c) 285static inline void tvp5150_selmux(struct v4l2_subdev *sd)
289{ 286{
290 int opmode=0; 287 int opmode=0;
291 struct tvp5150 *decoder = i2c_get_clientdata(c); 288 struct tvp5150 *decoder = to_tvp5150(sd);
292 int input = 0; 289 int input = 0;
293 unsigned char val; 290 unsigned char val;
294 291
@@ -309,23 +306,23 @@ static inline void tvp5150_selmux(struct i2c_client *c)
309 break; 306 break;
310 } 307 }
311 308
312 tvp5150_dbg( 1, "Selecting video route: route input=%i, output=%i " 309 v4l2_dbg(1, debug, sd, "Selecting video route: route input=%i, output=%i "
313 "=> tvp5150 input=%i, opmode=%i\n", 310 "=> tvp5150 input=%i, opmode=%i\n",
314 decoder->route.input,decoder->route.output, 311 decoder->route.input,decoder->route.output,
315 input, opmode ); 312 input, opmode );
316 313
317 tvp5150_write(c, TVP5150_OP_MODE_CTL, opmode); 314 tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
318 tvp5150_write(c, TVP5150_VD_IN_SRC_SEL_1, input); 315 tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
319 316
320 /* Svideo should enable YCrCb output and disable GPCL output 317 /* Svideo should enable YCrCb output and disable GPCL output
321 * For Composite and TV, it should be the reverse 318 * For Composite and TV, it should be the reverse
322 */ 319 */
323 val = tvp5150_read(c, TVP5150_MISC_CTL); 320 val = tvp5150_read(sd, TVP5150_MISC_CTL);
324 if (decoder->route.input == TVP5150_SVIDEO) 321 if (decoder->route.input == TVP5150_SVIDEO)
325 val = (val & ~0x40) | 0x10; 322 val = (val & ~0x40) | 0x10;
326 else 323 else
327 val = (val & ~0x10) | 0x40; 324 val = (val & ~0x10) | 0x40;
328 tvp5150_write(c, TVP5150_MISC_CTL, val); 325 tvp5150_write(sd, TVP5150_MISC_CTL, val);
329}; 326};
330 327
331struct i2c_reg_value { 328struct i2c_reg_value {
@@ -593,35 +590,35 @@ static struct i2c_vbi_ram_value vbi_ram_default[] =
593 { (u16)-1 } 590 { (u16)-1 }
594}; 591};
595 592
596static int tvp5150_write_inittab(struct i2c_client *c, 593static int tvp5150_write_inittab(struct v4l2_subdev *sd,
597 const struct i2c_reg_value *regs) 594 const struct i2c_reg_value *regs)
598{ 595{
599 while (regs->reg != 0xff) { 596 while (regs->reg != 0xff) {
600 tvp5150_write(c, regs->reg, regs->value); 597 tvp5150_write(sd, regs->reg, regs->value);
601 regs++; 598 regs++;
602 } 599 }
603 return 0; 600 return 0;
604} 601}
605 602
606static int tvp5150_vdp_init(struct i2c_client *c, 603static int tvp5150_vdp_init(struct v4l2_subdev *sd,
607 const struct i2c_vbi_ram_value *regs) 604 const struct i2c_vbi_ram_value *regs)
608{ 605{
609 unsigned int i; 606 unsigned int i;
610 607
611 /* Disable Full Field */ 608 /* Disable Full Field */
612 tvp5150_write(c, TVP5150_FULL_FIELD_ENA, 0); 609 tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0);
613 610
614 /* Before programming, Line mode should be at 0xff */ 611 /* Before programming, Line mode should be at 0xff */
615 for (i=TVP5150_LINE_MODE_INI; i<=TVP5150_LINE_MODE_END; i++) 612 for (i = TVP5150_LINE_MODE_INI; i <= TVP5150_LINE_MODE_END; i++)
616 tvp5150_write(c, i, 0xff); 613 tvp5150_write(sd, i, 0xff);
617 614
618 /* Load Ram Table */ 615 /* Load Ram Table */
619 while (regs->reg != (u16)-1 ) { 616 while (regs->reg != (u16)-1) {
620 tvp5150_write(c, TVP5150_CONF_RAM_ADDR_HIGH,regs->reg>>8); 617 tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8);
621 tvp5150_write(c, TVP5150_CONF_RAM_ADDR_LOW,regs->reg); 618 tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg);
622 619
623 for (i=0;i<16;i++) 620 for (i = 0; i < 16; i++)
624 tvp5150_write(c, TVP5150_VDP_CONF_RAM_DATA,regs->values[i]); 621 tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]);
625 622
626 regs++; 623 regs++;
627 } 624 }
@@ -629,11 +626,13 @@ static int tvp5150_vdp_init(struct i2c_client *c,
629} 626}
630 627
631/* Fills VBI capabilities based on i2c_vbi_ram_value struct */ 628/* Fills VBI capabilities based on i2c_vbi_ram_value struct */
632static void tvp5150_vbi_get_cap(const struct i2c_vbi_ram_value *regs, 629static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,
633 struct v4l2_sliced_vbi_cap *cap) 630 struct v4l2_sliced_vbi_cap *cap)
634{ 631{
632 const struct i2c_vbi_ram_value *regs = vbi_ram_default;
635 int line; 633 int line;
636 634
635 v4l2_dbg(1, debug, sd, "VIDIOC_G_SLICED_VBI_CAP\n");
637 memset(cap, 0, sizeof *cap); 636 memset(cap, 0, sizeof *cap);
638 637
639 while (regs->reg != (u16)-1 ) { 638 while (regs->reg != (u16)-1 ) {
@@ -644,6 +643,7 @@ static void tvp5150_vbi_get_cap(const struct i2c_vbi_ram_value *regs,
644 643
645 regs++; 644 regs++;
646 } 645 }
646 return 0;
647} 647}
648 648
649/* Set vbi processing 649/* Set vbi processing
@@ -659,18 +659,18 @@ static void tvp5150_vbi_get_cap(const struct i2c_vbi_ram_value *regs,
659 * LSB = field1 659 * LSB = field1
660 * MSB = field2 660 * MSB = field2
661 */ 661 */
662static int tvp5150_set_vbi(struct i2c_client *c, 662static int tvp5150_set_vbi(struct v4l2_subdev *sd,
663 const struct i2c_vbi_ram_value *regs, 663 const struct i2c_vbi_ram_value *regs,
664 unsigned int type,u8 flags, int line, 664 unsigned int type,u8 flags, int line,
665 const int fields) 665 const int fields)
666{ 666{
667 struct tvp5150 *decoder = i2c_get_clientdata(c); 667 struct tvp5150 *decoder = to_tvp5150(sd);
668 v4l2_std_id std=decoder->norm; 668 v4l2_std_id std = decoder->norm;
669 u8 reg; 669 u8 reg;
670 int pos=0; 670 int pos=0;
671 671
672 if (std == V4L2_STD_ALL) { 672 if (std == V4L2_STD_ALL) {
673 tvp5150_err("VBI can't be configured without knowing number of lines\n"); 673 v4l2_err(sd, "VBI can't be configured without knowing number of lines\n");
674 return 0; 674 return 0;
675 } else if (std & V4L2_STD_625_50) { 675 } else if (std & V4L2_STD_625_50) {
676 /* Don't follow NTSC Line number convension */ 676 /* Don't follow NTSC Line number convension */
@@ -698,163 +698,186 @@ static int tvp5150_set_vbi(struct i2c_client *c,
698 reg=((line-6)<<1)+TVP5150_LINE_MODE_INI; 698 reg=((line-6)<<1)+TVP5150_LINE_MODE_INI;
699 699
700 if (fields&1) { 700 if (fields&1) {
701 tvp5150_write(c, reg, type); 701 tvp5150_write(sd, reg, type);
702 } 702 }
703 703
704 if (fields&2) { 704 if (fields&2) {
705 tvp5150_write(c, reg+1, type); 705 tvp5150_write(sd, reg+1, type);
706 } 706 }
707 707
708 return type; 708 return type;
709} 709}
710 710
711static int tvp5150_get_vbi(struct i2c_client *c, 711static int tvp5150_get_vbi(struct v4l2_subdev *sd,
712 const struct i2c_vbi_ram_value *regs, int line) 712 const struct i2c_vbi_ram_value *regs, int line)
713{ 713{
714 struct tvp5150 *decoder = i2c_get_clientdata(c); 714 struct tvp5150 *decoder = to_tvp5150(sd);
715 v4l2_std_id std=decoder->norm; 715 v4l2_std_id std = decoder->norm;
716 u8 reg; 716 u8 reg;
717 int pos, type=0; 717 int pos, type = 0;
718 718
719 if (std == V4L2_STD_ALL) { 719 if (std == V4L2_STD_ALL) {
720 tvp5150_err("VBI can't be configured without knowing number of lines\n"); 720 v4l2_err(sd, "VBI can't be configured without knowing number of lines\n");
721 return 0; 721 return 0;
722 } else if (std & V4L2_STD_625_50) { 722 } else if (std & V4L2_STD_625_50) {
723 /* Don't follow NTSC Line number convension */ 723 /* Don't follow NTSC Line number convension */
724 line += 3; 724 line += 3;
725 } 725 }
726 726
727 if (line<6||line>27) 727 if (line < 6 || line > 27)
728 return 0; 728 return 0;
729 729
730 reg=((line-6)<<1)+TVP5150_LINE_MODE_INI; 730 reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI;
731 731
732 pos=tvp5150_read(c, reg)&0x0f; 732 pos = tvp5150_read(sd, reg) & 0x0f;
733 if (pos<0x0f) 733 if (pos < 0x0f)
734 type=regs[pos].type.vbi_type; 734 type = regs[pos].type.vbi_type;
735 735
736 pos=tvp5150_read(c, reg+1)&0x0f; 736 pos = tvp5150_read(sd, reg + 1) & 0x0f;
737 if (pos<0x0f) 737 if (pos < 0x0f)
738 type|=regs[pos].type.vbi_type; 738 type |= regs[pos].type.vbi_type;
739 739
740 return type; 740 return type;
741} 741}
742static int tvp5150_set_std(struct i2c_client *c, v4l2_std_id std) 742
743static int tvp5150_set_std(struct v4l2_subdev *sd, v4l2_std_id std)
743{ 744{
744 struct tvp5150 *decoder = i2c_get_clientdata(c); 745 struct tvp5150 *decoder = to_tvp5150(sd);
745 int fmt=0; 746 int fmt = 0;
746 747
747 decoder->norm=std; 748 decoder->norm = std;
748 749
749 /* First tests should be against specific std */ 750 /* First tests should be against specific std */
750 751
751 if (std == V4L2_STD_ALL) { 752 if (std == V4L2_STD_ALL) {
752 fmt=0; /* Autodetect mode */ 753 fmt = 0; /* Autodetect mode */
753 } else if (std & V4L2_STD_NTSC_443) { 754 } else if (std & V4L2_STD_NTSC_443) {
754 fmt=0xa; 755 fmt = 0xa;
755 } else if (std & V4L2_STD_PAL_M) { 756 } else if (std & V4L2_STD_PAL_M) {
756 fmt=0x6; 757 fmt = 0x6;
757 } else if (std & (V4L2_STD_PAL_N| V4L2_STD_PAL_Nc)) { 758 } else if (std & (V4L2_STD_PAL_N | V4L2_STD_PAL_Nc)) {
758 fmt=0x8; 759 fmt = 0x8;
759 } else { 760 } else {
760 /* Then, test against generic ones */ 761 /* Then, test against generic ones */
761 if (std & V4L2_STD_NTSC) { 762 if (std & V4L2_STD_NTSC)
762 fmt=0x2; 763 fmt = 0x2;
763 } else if (std & V4L2_STD_PAL) { 764 else if (std & V4L2_STD_PAL)
764 fmt=0x4; 765 fmt = 0x4;
765 } else if (std & V4L2_STD_SECAM) { 766 else if (std & V4L2_STD_SECAM)
766 fmt=0xc; 767 fmt = 0xc;
767 }
768 } 768 }
769 769
770 tvp5150_dbg(1,"Set video std register to %d.\n",fmt); 770 v4l2_dbg(1, debug, sd, "Set video std register to %d.\n", fmt);
771 tvp5150_write(c, TVP5150_VIDEO_STD, fmt); 771 tvp5150_write(sd, TVP5150_VIDEO_STD, fmt);
772
773 return 0; 772 return 0;
774} 773}
775 774
776static inline void tvp5150_reset(struct i2c_client *c) 775static int tvp5150_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
776{
777 struct tvp5150 *decoder = to_tvp5150(sd);
778
779 if (decoder->norm == std)
780 return 0;
781
782 return tvp5150_set_std(sd, std);
783}
784
785static int tvp5150_reset(struct v4l2_subdev *sd, u32 val)
777{ 786{
787 struct tvp5150 *decoder = to_tvp5150(sd);
778 u8 msb_id, lsb_id, msb_rom, lsb_rom; 788 u8 msb_id, lsb_id, msb_rom, lsb_rom;
779 struct tvp5150 *decoder = i2c_get_clientdata(c);
780 789
781 msb_id=tvp5150_read(c,TVP5150_MSB_DEV_ID); 790 msb_id = tvp5150_read(sd, TVP5150_MSB_DEV_ID);
782 lsb_id=tvp5150_read(c,TVP5150_LSB_DEV_ID); 791 lsb_id = tvp5150_read(sd, TVP5150_LSB_DEV_ID);
783 msb_rom=tvp5150_read(c,TVP5150_ROM_MAJOR_VER); 792 msb_rom = tvp5150_read(sd, TVP5150_ROM_MAJOR_VER);
784 lsb_rom=tvp5150_read(c,TVP5150_ROM_MINOR_VER); 793 lsb_rom = tvp5150_read(sd, TVP5150_ROM_MINOR_VER);
785 794
786 if ((msb_rom==4)&&(lsb_rom==0)) { /* Is TVP5150AM1 */ 795 if (msb_rom == 4 && lsb_rom == 0) { /* Is TVP5150AM1 */
787 tvp5150_info("tvp%02x%02xam1 detected.\n",msb_id, lsb_id); 796 v4l2_info(sd, "tvp%02x%02xam1 detected.\n", msb_id, lsb_id);
788 797
789 /* ITU-T BT.656.4 timing */ 798 /* ITU-T BT.656.4 timing */
790 tvp5150_write(c,TVP5150_REV_SELECT,0); 799 tvp5150_write(sd, TVP5150_REV_SELECT, 0);
791 } else { 800 } else {
792 if ((msb_rom==3)||(lsb_rom==0x21)) { /* Is TVP5150A */ 801 if (msb_rom == 3 || lsb_rom == 0x21) { /* Is TVP5150A */
793 tvp5150_info("tvp%02x%02xa detected.\n",msb_id, lsb_id); 802 v4l2_info(sd, "tvp%02x%02xa detected.\n", msb_id, lsb_id);
794 } else { 803 } else {
795 tvp5150_info("*** unknown tvp%02x%02x chip detected.\n",msb_id,lsb_id); 804 v4l2_info(sd, "*** unknown tvp%02x%02x chip detected.\n",
796 tvp5150_info("*** Rom ver is %d.%d\n",msb_rom,lsb_rom); 805 msb_id, lsb_id);
806 v4l2_info(sd, "*** Rom ver is %d.%d\n", msb_rom, lsb_rom);
797 } 807 }
798 } 808 }
799 809
800 /* Initializes TVP5150 to its default values */ 810 /* Initializes TVP5150 to its default values */
801 tvp5150_write_inittab(c, tvp5150_init_default); 811 tvp5150_write_inittab(sd, tvp5150_init_default);
802 812
803 /* Initializes VDP registers */ 813 /* Initializes VDP registers */
804 tvp5150_vdp_init(c, vbi_ram_default); 814 tvp5150_vdp_init(sd, vbi_ram_default);
805 815
806 /* Selects decoder input */ 816 /* Selects decoder input */
807 tvp5150_selmux(c); 817 tvp5150_selmux(sd);
808 818
809 /* Initializes TVP5150 to stream enabled values */ 819 /* Initializes TVP5150 to stream enabled values */
810 tvp5150_write_inittab(c, tvp5150_init_enable); 820 tvp5150_write_inittab(sd, tvp5150_init_enable);
811 821
812 /* Initialize image preferences */ 822 /* Initialize image preferences */
813 tvp5150_write(c, TVP5150_BRIGHT_CTL, decoder->bright); 823 tvp5150_write(sd, TVP5150_BRIGHT_CTL, decoder->bright);
814 tvp5150_write(c, TVP5150_CONTRAST_CTL, decoder->contrast); 824 tvp5150_write(sd, TVP5150_CONTRAST_CTL, decoder->contrast);
815 tvp5150_write(c, TVP5150_SATURATION_CTL, decoder->contrast); 825 tvp5150_write(sd, TVP5150_SATURATION_CTL, decoder->contrast);
816 tvp5150_write(c, TVP5150_HUE_CTL, decoder->hue); 826 tvp5150_write(sd, TVP5150_HUE_CTL, decoder->hue);
817 827
818 tvp5150_set_std(c, decoder->norm); 828 tvp5150_set_std(sd, decoder->norm);
829 return 0;
819}; 830};
820 831
821static int tvp5150_get_ctrl(struct i2c_client *c, struct v4l2_control *ctrl) 832static int tvp5150_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
822{ 833{
823/* struct tvp5150 *decoder = i2c_get_clientdata(c); */ 834 v4l2_dbg(1, debug, sd, "VIDIOC_G_CTRL called\n");
824 835
825 switch (ctrl->id) { 836 switch (ctrl->id) {
826 case V4L2_CID_BRIGHTNESS: 837 case V4L2_CID_BRIGHTNESS:
827 ctrl->value = tvp5150_read(c, TVP5150_BRIGHT_CTL); 838 ctrl->value = tvp5150_read(sd, TVP5150_BRIGHT_CTL);
828 return 0; 839 return 0;
829 case V4L2_CID_CONTRAST: 840 case V4L2_CID_CONTRAST:
830 ctrl->value = tvp5150_read(c, TVP5150_CONTRAST_CTL); 841 ctrl->value = tvp5150_read(sd, TVP5150_CONTRAST_CTL);
831 return 0; 842 return 0;
832 case V4L2_CID_SATURATION: 843 case V4L2_CID_SATURATION:
833 ctrl->value = tvp5150_read(c, TVP5150_SATURATION_CTL); 844 ctrl->value = tvp5150_read(sd, TVP5150_SATURATION_CTL);
834 return 0; 845 return 0;
835 case V4L2_CID_HUE: 846 case V4L2_CID_HUE:
836 ctrl->value = tvp5150_read(c, TVP5150_HUE_CTL); 847 ctrl->value = tvp5150_read(sd, TVP5150_HUE_CTL);
837 return 0; 848 return 0;
838 } 849 }
839 return -EINVAL; 850 return -EINVAL;
840} 851}
841 852
842static int tvp5150_set_ctrl(struct i2c_client *c, struct v4l2_control *ctrl) 853static int tvp5150_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
843{ 854{
844/* struct tvp5150 *decoder = i2c_get_clientdata(c); */ 855 u8 i, n;
856 n = ARRAY_SIZE(tvp5150_qctrl);
857
858 for (i = 0; i < n; i++) {
859 if (ctrl->id != tvp5150_qctrl[i].id)
860 continue;
861 if (ctrl->value < tvp5150_qctrl[i].minimum ||
862 ctrl->value > tvp5150_qctrl[i].maximum)
863 return -ERANGE;
864 v4l2_dbg(1, debug, sd, "VIDIOC_S_CTRL: id=%d, value=%d\n",
865 ctrl->id, ctrl->value);
866 break;
867 }
845 868
846 switch (ctrl->id) { 869 switch (ctrl->id) {
847 case V4L2_CID_BRIGHTNESS: 870 case V4L2_CID_BRIGHTNESS:
848 tvp5150_write(c, TVP5150_BRIGHT_CTL, ctrl->value); 871 tvp5150_write(sd, TVP5150_BRIGHT_CTL, ctrl->value);
849 return 0; 872 return 0;
850 case V4L2_CID_CONTRAST: 873 case V4L2_CID_CONTRAST:
851 tvp5150_write(c, TVP5150_CONTRAST_CTL, ctrl->value); 874 tvp5150_write(sd, TVP5150_CONTRAST_CTL, ctrl->value);
852 return 0; 875 return 0;
853 case V4L2_CID_SATURATION: 876 case V4L2_CID_SATURATION:
854 tvp5150_write(c, TVP5150_SATURATION_CTL, ctrl->value); 877 tvp5150_write(sd, TVP5150_SATURATION_CTL, ctrl->value);
855 return 0; 878 return 0;
856 case V4L2_CID_HUE: 879 case V4L2_CID_HUE:
857 tvp5150_write(c, TVP5150_HUE_CTL, ctrl->value); 880 tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->value);
858 return 0; 881 return 0;
859 } 882 }
860 return -EINVAL; 883 return -EINVAL;
@@ -863,227 +886,210 @@ static int tvp5150_set_ctrl(struct i2c_client *c, struct v4l2_control *ctrl)
863/**************************************************************************** 886/****************************************************************************
864 I2C Command 887 I2C Command
865 ****************************************************************************/ 888 ****************************************************************************/
866static int tvp5150_command(struct i2c_client *c,
867 unsigned int cmd, void *arg)
868{
869 struct tvp5150 *decoder = i2c_get_clientdata(c);
870 889
871 switch (cmd) { 890static int tvp5150_s_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
891{
892 struct tvp5150 *decoder = to_tvp5150(sd);
872 893
873 case 0: 894 decoder->route = *route;
874 case VIDIOC_INT_RESET: 895 tvp5150_selmux(sd);
875 tvp5150_reset(c); 896 return 0;
876 break; 897}
877 case VIDIOC_INT_G_VIDEO_ROUTING:
878 {
879 struct v4l2_routing *route = arg;
880 898
881 *route = decoder->route; 899static int tvp5150_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
882 break; 900{
901 struct v4l2_sliced_vbi_format *svbi;
902 int i;
903
904 /* raw vbi */
905 if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
906 /* this is for capturing 36 raw vbi lines
907 if there's a way to cut off the beginning 2 vbi lines
908 with the tvp5150 then the vbi line count could be lowered
909 to 17 lines/field again, although I couldn't find a register
910 which could do that cropping */
911 if (fmt->fmt.vbi.sample_format == V4L2_PIX_FMT_GREY)
912 tvp5150_write(sd, TVP5150_LUMA_PROC_CTL_1, 0x70);
913 if (fmt->fmt.vbi.count[0] == 18 && fmt->fmt.vbi.count[1] == 18) {
914 tvp5150_write(sd, TVP5150_VERT_BLANKING_START, 0x00);
915 tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP, 0x01);
916 }
917 return 0;
883 } 918 }
884 case VIDIOC_INT_S_VIDEO_ROUTING: 919 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
885 { 920 return -EINVAL;
886 struct v4l2_routing *route = arg; 921 svbi = &fmt->fmt.sliced;
922 if (svbi->service_set != 0) {
923 for (i = 0; i <= 23; i++) {
924 svbi->service_lines[1][i] = 0;
925 svbi->service_lines[0][i] =
926 tvp5150_set_vbi(sd, vbi_ram_default,
927 svbi->service_lines[0][i], 0xf0, i, 3);
928 }
929 /* Enables FIFO */
930 tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1);
931 } else {
932 /* Disables FIFO*/
933 tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 0);
887 934
888 decoder->route = *route; 935 /* Disable Full Field */
889 tvp5150_selmux(c); 936 tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0);
890 break; 937
938 /* Disable Line modes */
939 for (i = TVP5150_LINE_MODE_INI; i <= TVP5150_LINE_MODE_END; i++)
940 tvp5150_write(sd, i, 0xff);
891 } 941 }
892 case VIDIOC_S_STD: 942 return 0;
893 if (decoder->norm == *(v4l2_std_id *)arg) 943}
894 break;
895 return tvp5150_set_std(c, *(v4l2_std_id *)arg);
896 case VIDIOC_G_STD:
897 *(v4l2_std_id *)arg = decoder->norm;
898 break;
899 944
900 case VIDIOC_G_SLICED_VBI_CAP: 945static int tvp5150_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
901 { 946{
902 struct v4l2_sliced_vbi_cap *cap = arg; 947 struct v4l2_sliced_vbi_format *svbi;
903 tvp5150_dbg(1, "VIDIOC_G_SLICED_VBI_CAP\n"); 948 int i, mask = 0;
904 949
905 tvp5150_vbi_get_cap(vbi_ram_default, cap); 950 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
906 break; 951 return -EINVAL;
952 svbi = &fmt->fmt.sliced;
953 memset(svbi, 0, sizeof(*svbi));
954
955 for (i = 0; i <= 23; i++) {
956 svbi->service_lines[0][i] =
957 tvp5150_get_vbi(sd, vbi_ram_default, i);
958 mask |= svbi->service_lines[0][i];
907 } 959 }
908 case VIDIOC_S_FMT: 960 svbi->service_set = mask;
909 { 961 return 0;
910 struct v4l2_format *fmt; 962}
911 struct v4l2_sliced_vbi_format *svbi;
912 int i;
913
914 fmt = arg;
915 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
916 return -EINVAL;
917 svbi = &fmt->fmt.sliced;
918 if (svbi->service_set != 0) {
919 for (i = 0; i <= 23; i++) {
920 svbi->service_lines[1][i] = 0;
921
922 svbi->service_lines[0][i]=tvp5150_set_vbi(c,
923 vbi_ram_default,
924 svbi->service_lines[0][i],0xf0,i,3);
925 }
926 /* Enables FIFO */
927 tvp5150_write(c, TVP5150_FIFO_OUT_CTRL,1);
928 } else {
929 /* Disables FIFO*/
930 tvp5150_write(c, TVP5150_FIFO_OUT_CTRL,0);
931 963
932 /* Disable Full Field */
933 tvp5150_write(c, TVP5150_FULL_FIELD_ENA, 0);
934 964
935 /* Disable Line modes */ 965static int tvp5150_g_chip_ident(struct v4l2_subdev *sd,
936 for (i=TVP5150_LINE_MODE_INI; i<=TVP5150_LINE_MODE_END; i++) 966 struct v4l2_chip_ident *chip)
937 tvp5150_write(c, i, 0xff); 967{
938 } 968 int rev;
939 break; 969 struct i2c_client *client = v4l2_get_subdevdata(sd);
940 }
941 case VIDIOC_G_FMT:
942 {
943 struct v4l2_format *fmt;
944 struct v4l2_sliced_vbi_format *svbi;
945 970
946 int i, mask=0; 971 rev = tvp5150_read(sd, TVP5150_ROM_MAJOR_VER) << 8 |
972 tvp5150_read(sd, TVP5150_ROM_MINOR_VER);
947 973
948 fmt = arg; 974 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TVP5150,
949 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) 975 rev);
950 return -EINVAL; 976}
951 svbi = &fmt->fmt.sliced;
952 memset(svbi, 0, sizeof(*svbi));
953 977
954 for (i = 0; i <= 23; i++) {
955 svbi->service_lines[0][i]=tvp5150_get_vbi(c,
956 vbi_ram_default,i);
957 mask|=svbi->service_lines[0][i];
958 }
959 svbi->service_set=mask;
960 break;
961 }
962 978
963#ifdef CONFIG_VIDEO_ADV_DEBUG 979#ifdef CONFIG_VIDEO_ADV_DEBUG
964 case VIDIOC_DBG_G_REGISTER: 980static int tvp5150_g_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
965 case VIDIOC_DBG_S_REGISTER: 981{
966 { 982 struct i2c_client *client = v4l2_get_subdevdata(sd);
967 struct v4l2_register *reg = arg;
968
969 if (!v4l2_chip_match_i2c_client(c, reg->match_type, reg->match_chip))
970 return -EINVAL;
971 if (!capable(CAP_SYS_ADMIN))
972 return -EPERM;
973 if (cmd == VIDIOC_DBG_G_REGISTER)
974 reg->val = tvp5150_read(c, reg->reg & 0xff);
975 else
976 tvp5150_write(c, reg->reg & 0xff, reg->val & 0xff);
977 break;
978 }
979#endif
980 983
981 case VIDIOC_LOG_STATUS: 984 if (!v4l2_chip_match_i2c_client(client,
982 dump_reg(c); 985 reg->match_type, reg->match_chip))
983 break; 986 return -EINVAL;
987 if (!capable(CAP_SYS_ADMIN))
988 return -EPERM;
989 reg->val = tvp5150_read(sd, reg->reg & 0xff);
990 return 0;
991}
984 992
985 case VIDIOC_G_TUNER: 993static int tvp5150_s_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
986 { 994{
987 struct v4l2_tuner *vt = arg; 995 struct i2c_client *client = v4l2_get_subdevdata(sd);
988 int status = tvp5150_read(c, 0x88);
989 996
990 vt->signal = ((status & 0x04) && (status & 0x02)) ? 0xffff : 0x0; 997 if (!v4l2_chip_match_i2c_client(client,
991 break; 998 reg->match_type, reg->match_chip))
992 } 999 return -EINVAL;
993 case VIDIOC_QUERYCTRL: 1000 if (!capable(CAP_SYS_ADMIN))
994 { 1001 return -EPERM;
995 struct v4l2_queryctrl *qc = arg; 1002 tvp5150_write(sd, reg->reg & 0xff, reg->val & 0xff);
996 int i; 1003 return 0;
1004}
1005#endif
997 1006
998 tvp5150_dbg(1, "VIDIOC_QUERYCTRL called\n"); 1007static int tvp5150_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
1008{
1009 int status = tvp5150_read(sd, 0x88);
999 1010
1000 for (i = 0; i < ARRAY_SIZE(tvp5150_qctrl); i++) 1011 vt->signal = ((status & 0x04) && (status & 0x02)) ? 0xffff : 0x0;
1001 if (qc->id && qc->id == tvp5150_qctrl[i].id) { 1012 return 0;
1002 memcpy(qc, &(tvp5150_qctrl[i]), 1013}
1003 sizeof(*qc));
1004 return 0;
1005 }
1006 1014
1007 return -EINVAL; 1015static int tvp5150_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
1008 } 1016{
1009 case VIDIOC_G_CTRL: 1017 int i;
1010 {
1011 struct v4l2_control *ctrl = arg;
1012 tvp5150_dbg(1, "VIDIOC_G_CTRL called\n");
1013 1018
1014 return tvp5150_get_ctrl(c, ctrl); 1019 v4l2_dbg(1, debug, sd, "VIDIOC_QUERYCTRL called\n");
1015 } 1020
1016 case VIDIOC_S_CTRL: 1021 for (i = 0; i < ARRAY_SIZE(tvp5150_qctrl); i++)
1017 { 1022 if (qc->id && qc->id == tvp5150_qctrl[i].id) {
1018 struct v4l2_control *ctrl = arg; 1023 memcpy(qc, &(tvp5150_qctrl[i]),
1019 u8 i, n; 1024 sizeof(*qc));
1020 n = ARRAY_SIZE(tvp5150_qctrl); 1025 return 0;
1021 for (i = 0; i < n; i++)
1022 if (ctrl->id == tvp5150_qctrl[i].id) {
1023 if (ctrl->value <
1024 tvp5150_qctrl[i].minimum
1025 || ctrl->value >
1026 tvp5150_qctrl[i].maximum)
1027 return -ERANGE;
1028 tvp5150_dbg(1,
1029 "VIDIOC_S_CTRL: id=%d, value=%d\n",
1030 ctrl->id, ctrl->value);
1031 return tvp5150_set_ctrl(c, ctrl);
1032 }
1033 return -EINVAL;
1034 } 1026 }
1035 1027
1036 default: 1028 return -EINVAL;
1037 return -EINVAL; 1029}
1038 }
1039 1030
1040 return 0; 1031static int tvp5150_command(struct i2c_client *client, unsigned cmd, void *arg)
1032{
1033 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
1041} 1034}
1042 1035
1036/* ----------------------------------------------------------------------- */
1037
1038static const struct v4l2_subdev_core_ops tvp5150_core_ops = {
1039 .log_status = tvp5150_log_status,
1040 .g_ctrl = tvp5150_g_ctrl,
1041 .s_ctrl = tvp5150_s_ctrl,
1042 .queryctrl = tvp5150_queryctrl,
1043 .reset = tvp5150_reset,
1044 .g_chip_ident = tvp5150_g_chip_ident,
1045#ifdef CONFIG_VIDEO_ADV_DEBUG
1046 .g_register = tvp5150_g_register,
1047 .s_register = tvp5150_s_register,
1048#endif
1049};
1050
1051static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = {
1052 .s_std = tvp5150_s_std,
1053 .g_tuner = tvp5150_g_tuner,
1054};
1055
1056static const struct v4l2_subdev_video_ops tvp5150_video_ops = {
1057 .s_routing = tvp5150_s_routing,
1058 .g_fmt = tvp5150_g_fmt,
1059 .s_fmt = tvp5150_s_fmt,
1060 .g_sliced_vbi_cap = tvp5150_g_sliced_vbi_cap,
1061};
1062
1063static const struct v4l2_subdev_ops tvp5150_ops = {
1064 .core = &tvp5150_core_ops,
1065 .tuner = &tvp5150_tuner_ops,
1066 .video = &tvp5150_video_ops,
1067};
1068
1069
1043/**************************************************************************** 1070/****************************************************************************
1044 I2C Client & Driver 1071 I2C Client & Driver
1045 ****************************************************************************/ 1072 ****************************************************************************/
1046static struct i2c_driver driver;
1047
1048static struct i2c_client client_template = {
1049 .name = "(unset)",
1050 .driver = &driver,
1051};
1052 1073
1053static int tvp5150_detect_client(struct i2c_adapter *adapter, 1074static int tvp5150_probe(struct i2c_client *c,
1054 int address, int kind) 1075 const struct i2c_device_id *id)
1055{ 1076{
1056 struct i2c_client *c;
1057 struct tvp5150 *core; 1077 struct tvp5150 *core;
1058 int rv; 1078 struct v4l2_subdev *sd;
1059
1060 if (debug)
1061 printk( KERN_INFO
1062 "tvp5150.c: detecting tvp5150 client on address 0x%x\n",
1063 address << 1);
1064
1065 client_template.adapter = adapter;
1066 client_template.addr = address;
1067 1079
1068 /* Check if the adapter supports the needed features */ 1080 /* Check if the adapter supports the needed features */
1069 if (!i2c_check_functionality 1081 if (!i2c_check_functionality(c->adapter,
1070 (adapter,
1071 I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) 1082 I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
1072 return 0; 1083 return -EIO;
1073
1074 c = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
1075 if (!c)
1076 return -ENOMEM;
1077 memcpy(c, &client_template, sizeof(struct i2c_client));
1078 1084
1079 core = kzalloc(sizeof(struct tvp5150), GFP_KERNEL); 1085 core = kzalloc(sizeof(struct tvp5150), GFP_KERNEL);
1080 if (!core) { 1086 if (!core) {
1081 kfree(c);
1082 return -ENOMEM; 1087 return -ENOMEM;
1083 } 1088 }
1084 i2c_set_clientdata(c, core); 1089 sd = &core->sd;
1085 1090 v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
1086 rv = i2c_attach_client(c); 1091 v4l_info(c, "chip found @ 0x%02x (%s)\n",
1092 c->addr << 1, c->adapter->name);
1087 1093
1088 core->norm = V4L2_STD_ALL; /* Default is autodetect */ 1094 core->norm = V4L2_STD_ALL; /* Default is autodetect */
1089 core->route.input = TVP5150_COMPOSITE1; 1095 core->route.input = TVP5150_COMPOSITE1;
@@ -1093,69 +1099,38 @@ static int tvp5150_detect_client(struct i2c_adapter *adapter,
1093 core->hue = 0; 1099 core->hue = 0;
1094 core->sat = 128; 1100 core->sat = 128;
1095 1101
1096 if (rv) {
1097 kfree(c);
1098 kfree(core);
1099 return rv;
1100 }
1101
1102 if (debug > 1) 1102 if (debug > 1)
1103 dump_reg(c); 1103 tvp5150_log_status(sd);
1104 return 0; 1104 return 0;
1105} 1105}
1106 1106
1107static int tvp5150_attach_adapter(struct i2c_adapter *adapter) 1107static int tvp5150_remove(struct i2c_client *c)
1108{ 1108{
1109 if (debug) 1109 struct v4l2_subdev *sd = i2c_get_clientdata(c);
1110 printk( KERN_INFO
1111 "tvp5150.c: starting probe for adapter %s (0x%x)\n",
1112 adapter->name, adapter->id);
1113 return i2c_probe(adapter, &addr_data, &tvp5150_detect_client);
1114}
1115
1116static int tvp5150_detach_client(struct i2c_client *c)
1117{
1118 struct tvp5150 *decoder = i2c_get_clientdata(c);
1119 int err;
1120 1110
1121 tvp5150_dbg(1, 1111 v4l2_dbg(1, debug, sd,
1122 "tvp5150.c: removing tvp5150 adapter on address 0x%x\n", 1112 "tvp5150.c: removing tvp5150 adapter on address 0x%x\n",
1123 c->addr << 1); 1113 c->addr << 1);
1124 1114
1125 err = i2c_detach_client(c); 1115 v4l2_device_unregister_subdev(sd);
1126 if (err) { 1116 kfree(to_tvp5150(sd));
1127 return err;
1128 }
1129
1130 kfree(decoder);
1131 kfree(c);
1132
1133 return 0; 1117 return 0;
1134} 1118}
1135 1119
1136/* ----------------------------------------------------------------------- */ 1120/* ----------------------------------------------------------------------- */
1137 1121
1138static struct i2c_driver driver = { 1122static const struct i2c_device_id tvp5150_id[] = {
1139 .driver = { 1123 { "tvp5150", 0 },
1140 .name = "tvp5150", 1124 { }
1141 }, 1125};
1142 .id = I2C_DRIVERID_TVP5150, 1126MODULE_DEVICE_TABLE(i2c, tvp5150_id);
1143
1144 .attach_adapter = tvp5150_attach_adapter,
1145 .detach_client = tvp5150_detach_client,
1146 1127
1128static struct v4l2_i2c_driver_data v4l2_i2c_data = {
1129 .name = "tvp5150",
1130 .driverid = I2C_DRIVERID_TVP5150,
1147 .command = tvp5150_command, 1131 .command = tvp5150_command,
1132 .probe = tvp5150_probe,
1133 .remove = tvp5150_remove,
1134 .legacy_class = I2C_CLASS_TV_ANALOG | I2C_CLASS_TV_DIGITAL,
1135 .id_table = tvp5150_id,
1148}; 1136};
1149
1150static int __init tvp5150_init(void)
1151{
1152 return i2c_add_driver(&driver);
1153}
1154
1155static void __exit tvp5150_exit(void)
1156{
1157 i2c_del_driver(&driver);
1158}
1159
1160module_init(tvp5150_init);
1161module_exit(tvp5150_exit);
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
new file mode 100644
index 000000000000..d5cdc4be1a35
--- /dev/null
+++ b/drivers/media/video/tw9910.c
@@ -0,0 +1,951 @@
1/*
2 * tw9910 Video Driver
3 *
4 * Copyright (C) 2008 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * Based on ov772x driver,
8 *
9 * Copyright (C) 2008 Kuninori Morimoto <morimoto.kuninori@renesas.com>
10 * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
11 * Copyright (C) 2008 Magnus Damm
12 * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/i2c.h>
22#include <linux/slab.h>
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/videodev2.h>
26#include <media/v4l2-chip-ident.h>
27#include <media/v4l2-common.h>
28#include <media/soc_camera.h>
29#include <media/tw9910.h>
30
31#define GET_ID(val) ((val & 0xF8) >> 3)
32#define GET_ReV(val) (val & 0x07)
33
34/*
35 * register offset
36 */
37#define ID 0x00 /* Product ID Code Register */
38#define STATUS1 0x01 /* Chip Status Register I */
39#define INFORM 0x02 /* Input Format */
40#define OPFORM 0x03 /* Output Format Control Register */
41#define DLYCTR 0x04 /* Hysteresis and HSYNC Delay Control */
42#define OUTCTR1 0x05 /* Output Control I */
43#define ACNTL1 0x06 /* Analog Control Register 1 */
44#define CROP_HI 0x07 /* Cropping Register, High */
45#define VDELAY_LO 0x08 /* Vertical Delay Register, Low */
46#define VACTIVE_LO 0x09 /* Vertical Active Register, Low */
47#define HDELAY_LO 0x0A /* Horizontal Delay Register, Low */
48#define HACTIVE_LO 0x0B /* Horizontal Active Register, Low */
49#define CNTRL1 0x0C /* Control Register I */
50#define VSCALE_LO 0x0D /* Vertical Scaling Register, Low */
51#define SCALE_HI 0x0E /* Scaling Register, High */
52#define HSCALE_LO 0x0F /* Horizontal Scaling Register, Low */
53#define BRIGHT 0x10 /* BRIGHTNESS Control Register */
54#define CONTRAST 0x11 /* CONTRAST Control Register */
55#define SHARPNESS 0x12 /* SHARPNESS Control Register I */
56#define SAT_U 0x13 /* Chroma (U) Gain Register */
57#define SAT_V 0x14 /* Chroma (V) Gain Register */
58#define HUE 0x15 /* Hue Control Register */
59#define CORING1 0x17
60#define CORING2 0x18 /* Coring and IF compensation */
61#define VBICNTL 0x19 /* VBI Control Register */
62#define ACNTL2 0x1A /* Analog Control 2 */
63#define OUTCTR2 0x1B /* Output Control 2 */
64#define SDT 0x1C /* Standard Selection */
65#define SDTR 0x1D /* Standard Recognition */
66#define TEST 0x1F /* Test Control Register */
67#define CLMPG 0x20 /* Clamping Gain */
68#define IAGC 0x21 /* Individual AGC Gain */
69#define AGCGAIN 0x22 /* AGC Gain */
70#define PEAKWT 0x23 /* White Peak Threshold */
71#define CLMPL 0x24 /* Clamp level */
72#define SYNCT 0x25 /* Sync Amplitude */
73#define MISSCNT 0x26 /* Sync Miss Count Register */
74#define PCLAMP 0x27 /* Clamp Position Register */
75#define VCNTL1 0x28 /* Vertical Control I */
76#define VCNTL2 0x29 /* Vertical Control II */
77#define CKILL 0x2A /* Color Killer Level Control */
78#define COMB 0x2B /* Comb Filter Control */
79#define LDLY 0x2C /* Luma Delay and H Filter Control */
80#define MISC1 0x2D /* Miscellaneous Control I */
81#define LOOP 0x2E /* LOOP Control Register */
82#define MISC2 0x2F /* Miscellaneous Control II */
83#define MVSN 0x30 /* Macrovision Detection */
84#define STATUS2 0x31 /* Chip STATUS II */
85#define HFREF 0x32 /* H monitor */
86#define CLMD 0x33 /* CLAMP MODE */
87#define IDCNTL 0x34 /* ID Detection Control */
88#define CLCNTL1 0x35 /* Clamp Control I */
89#define ANAPLLCTL 0x4C
90#define VBIMIN 0x4D
91#define HSLOWCTL 0x4E
92#define WSS3 0x4F
93#define FILLDATA 0x50
94#define SDID 0x51
95#define DID 0x52
96#define WSS1 0x53
97#define WSS2 0x54
98#define VVBI 0x55
99#define LCTL6 0x56
100#define LCTL7 0x57
101#define LCTL8 0x58
102#define LCTL9 0x59
103#define LCTL10 0x5A
104#define LCTL11 0x5B
105#define LCTL12 0x5C
106#define LCTL13 0x5D
107#define LCTL14 0x5E
108#define LCTL15 0x5F
109#define LCTL16 0x60
110#define LCTL17 0x61
111#define LCTL18 0x62
112#define LCTL19 0x63
113#define LCTL20 0x64
114#define LCTL21 0x65
115#define LCTL22 0x66
116#define LCTL23 0x67
117#define LCTL24 0x68
118#define LCTL25 0x69
119#define LCTL26 0x6A
120#define HSGEGIN 0x6B
121#define HSEND 0x6C
122#define OVSDLY 0x6D
123#define OVSEND 0x6E
124#define VBIDELAY 0x6F
125
126/*
127 * register detail
128 */
129
130/* INFORM */
131#define FC27_ON 0x40 /* 1 : Input crystal clock frequency is 27MHz */
132#define FC27_FF 0x00 /* 0 : Square pixel mode. */
133 /* Must use 24.54MHz for 60Hz field rate */
134 /* source or 29.5MHz for 50Hz field rate */
135#define IFSEL_S 0x10 /* 01 : S-video decoding */
136#define IFSEL_C 0x00 /* 00 : Composite video decoding */
137 /* Y input video selection */
138#define YSEL_M0 0x00 /* 00 : Mux0 selected */
139#define YSEL_M1 0x04 /* 01 : Mux1 selected */
140#define YSEL_M2 0x08 /* 10 : Mux2 selected */
141#define YSEL_M3 0x10 /* 11 : Mux3 selected */
142
143/* OPFORM */
144#define MODE 0x80 /* 0 : CCIR601 compatible YCrCb 4:2:2 format */
145 /* 1 : ITU-R-656 compatible data sequence format */
146#define LEN 0x40 /* 0 : 8-bit YCrCb 4:2:2 output format */
147 /* 1 : 16-bit YCrCb 4:2:2 output format.*/
148#define LLCMODE 0x20 /* 1 : LLC output mode. */
149 /* 0 : free-run output mode */
150#define AINC 0x10 /* Serial interface auto-indexing control */
151 /* 0 : auto-increment */
152 /* 1 : non-auto */
153#define VSCTL 0x08 /* 1 : Vertical out ctrl by DVALID */
154 /* 0 : Vertical out ctrl by HACTIVE and DVALID */
155#define OEN 0x04 /* Output Enable together with TRI_SEL. */
156
157/* OUTCTR1 */
158#define VSP_LO 0x00 /* 0 : VS pin output polarity is active low */
159#define VSP_HI 0x80 /* 1 : VS pin output polarity is active high. */
160 /* VS pin output control */
161#define VSSL_VSYNC 0x00 /* 0 : VSYNC */
162#define VSSL_VACT 0x10 /* 1 : VACT */
163#define VSSL_FIELD 0x20 /* 2 : FIELD */
164#define VSSL_VVALID 0x30 /* 3 : VVALID */
165#define VSSL_ZERO 0x70 /* 7 : 0 */
166#define HSP_LOW 0x00 /* 0 : HS pin output polarity is active low */
167#define HSP_HI 0x08 /* 1 : HS pin output polarity is active high.*/
168 /* HS pin output control */
169#define HSSL_HACT 0x00 /* 0 : HACT */
170#define HSSL_HSYNC 0x01 /* 1 : HSYNC */
171#define HSSL_DVALID 0x02 /* 2 : DVALID */
172#define HSSL_HLOCK 0x03 /* 3 : HLOCK */
173#define HSSL_ASYNCW 0x04 /* 4 : ASYNCW */
174#define HSSL_ZERO 0x07 /* 7 : 0 */
175
176/* ACNTL1 */
177#define SRESET 0x80 /* resets the device to its default state
178 * but all register content remain unchanged.
179 * This bit is self-resetting.
180 */
181
182/* VBICNTL */
183/* RTSEL : control the real time signal
184* output from the MPOUT pin
185*/
186#define RTSEL_MASK 0x07
187#define RTSEL_VLOSS 0x00 /* 0000 = Video loss */
188#define RTSEL_HLOCK 0x01 /* 0001 = H-lock */
189#define RTSEL_SLOCK 0x02 /* 0010 = S-lock */
190#define RTSEL_VLOCK 0x03 /* 0011 = V-lock */
191#define RTSEL_MONO 0x04 /* 0100 = MONO */
192#define RTSEL_DET50 0x05 /* 0101 = DET50 */
193#define RTSEL_FIELD 0x06 /* 0110 = FIELD */
194#define RTSEL_RTCO 0x07 /* 0111 = RTCO ( Real Time Control ) */
195
196/*
197 * structure
198 */
199
200struct regval_list {
201 unsigned char reg_num;
202 unsigned char value;
203};
204
205struct tw9910_scale_ctrl {
206 char *name;
207 unsigned short width;
208 unsigned short height;
209 u16 hscale;
210 u16 vscale;
211};
212
213struct tw9910_cropping_ctrl {
214 u16 vdelay;
215 u16 vactive;
216 u16 hdelay;
217 u16 hactive;
218};
219
220struct tw9910_hsync_ctrl {
221 u16 start;
222 u16 end;
223};
224
225struct tw9910_priv {
226 struct tw9910_video_info *info;
227 struct i2c_client *client;
228 struct soc_camera_device icd;
229 const struct tw9910_scale_ctrl *scale;
230};
231
232/*
233 * register settings
234 */
235
236#define ENDMARKER { 0xff, 0xff }
237
238static const struct regval_list tw9910_default_regs[] =
239{
240 { OPFORM, 0x00 },
241 { OUTCTR1, VSP_LO | VSSL_VVALID | HSP_HI | HSSL_HSYNC },
242 ENDMARKER,
243};
244
245static const struct soc_camera_data_format tw9910_color_fmt[] = {
246 {
247 .name = "VYUY",
248 .fourcc = V4L2_PIX_FMT_VYUY,
249 .depth = 16,
250 .colorspace = V4L2_COLORSPACE_SMPTE170M,
251 }
252};
253
254static const struct tw9910_scale_ctrl tw9910_ntsc_scales[] = {
255 {
256 .name = "NTSC SQ",
257 .width = 640,
258 .height = 480,
259 .hscale = 0x0100,
260 .vscale = 0x0100,
261 },
262 {
263 .name = "NTSC CCIR601",
264 .width = 720,
265 .height = 480,
266 .hscale = 0x0100,
267 .vscale = 0x0100,
268 },
269 {
270 .name = "NTSC SQ (CIF)",
271 .width = 320,
272 .height = 240,
273 .hscale = 0x0200,
274 .vscale = 0x0200,
275 },
276 {
277 .name = "NTSC CCIR601 (CIF)",
278 .width = 360,
279 .height = 240,
280 .hscale = 0x0200,
281 .vscale = 0x0200,
282 },
283 {
284 .name = "NTSC SQ (QCIF)",
285 .width = 160,
286 .height = 120,
287 .hscale = 0x0400,
288 .vscale = 0x0400,
289 },
290 {
291 .name = "NTSC CCIR601 (QCIF)",
292 .width = 180,
293 .height = 120,
294 .hscale = 0x0400,
295 .vscale = 0x0400,
296 },
297};
298
299static const struct tw9910_scale_ctrl tw9910_pal_scales[] = {
300 {
301 .name = "PAL SQ",
302 .width = 768,
303 .height = 576,
304 .hscale = 0x0100,
305 .vscale = 0x0100,
306 },
307 {
308 .name = "PAL CCIR601",
309 .width = 720,
310 .height = 576,
311 .hscale = 0x0100,
312 .vscale = 0x0100,
313 },
314 {
315 .name = "PAL SQ (CIF)",
316 .width = 384,
317 .height = 288,
318 .hscale = 0x0200,
319 .vscale = 0x0200,
320 },
321 {
322 .name = "PAL CCIR601 (CIF)",
323 .width = 360,
324 .height = 288,
325 .hscale = 0x0200,
326 .vscale = 0x0200,
327 },
328 {
329 .name = "PAL SQ (QCIF)",
330 .width = 192,
331 .height = 144,
332 .hscale = 0x0400,
333 .vscale = 0x0400,
334 },
335 {
336 .name = "PAL CCIR601 (QCIF)",
337 .width = 180,
338 .height = 144,
339 .hscale = 0x0400,
340 .vscale = 0x0400,
341 },
342};
343
344static const struct tw9910_cropping_ctrl tw9910_cropping_ctrl = {
345 .vdelay = 0x0012,
346 .vactive = 0x00F0,
347 .hdelay = 0x0010,
348 .hactive = 0x02D0,
349};
350
351static const struct tw9910_hsync_ctrl tw9910_hsync_ctrl = {
352 .start = 0x0260,
353 .end = 0x0300,
354};
355
356/*
357 * general function
358 */
359static int tw9910_set_scale(struct i2c_client *client,
360 const struct tw9910_scale_ctrl *scale)
361{
362 int ret;
363
364 ret = i2c_smbus_write_byte_data(client, SCALE_HI,
365 (scale->vscale & 0x0F00) >> 4 |
366 (scale->hscale & 0x0F00) >> 8);
367 if (ret < 0)
368 return ret;
369
370 ret = i2c_smbus_write_byte_data(client, HSCALE_LO,
371 scale->hscale & 0x00FF);
372 if (ret < 0)
373 return ret;
374
375 ret = i2c_smbus_write_byte_data(client, VSCALE_LO,
376 scale->vscale & 0x00FF);
377
378 return ret;
379}
380
381static int tw9910_set_cropping(struct i2c_client *client,
382 const struct tw9910_cropping_ctrl *cropping)
383{
384 int ret;
385
386 ret = i2c_smbus_write_byte_data(client, CROP_HI,
387 (cropping->vdelay & 0x0300) >> 2 |
388 (cropping->vactive & 0x0300) >> 4 |
389 (cropping->hdelay & 0x0300) >> 6 |
390 (cropping->hactive & 0x0300) >> 8);
391 if (ret < 0)
392 return ret;
393
394 ret = i2c_smbus_write_byte_data(client, VDELAY_LO,
395 cropping->vdelay & 0x00FF);
396 if (ret < 0)
397 return ret;
398
399 ret = i2c_smbus_write_byte_data(client, VACTIVE_LO,
400 cropping->vactive & 0x00FF);
401 if (ret < 0)
402 return ret;
403
404 ret = i2c_smbus_write_byte_data(client, HDELAY_LO,
405 cropping->hdelay & 0x00FF);
406 if (ret < 0)
407 return ret;
408
409 ret = i2c_smbus_write_byte_data(client, HACTIVE_LO,
410 cropping->hactive & 0x00FF);
411
412 return ret;
413}
414
415static int tw9910_set_hsync(struct i2c_client *client,
416 const struct tw9910_hsync_ctrl *hsync)
417{
418 int ret;
419
420 /* bit 10 - 3 */
421 ret = i2c_smbus_write_byte_data(client, HSGEGIN,
422 (hsync->start & 0x07F8) >> 3);
423 if (ret < 0)
424 return ret;
425
426 /* bit 10 - 3 */
427 ret = i2c_smbus_write_byte_data(client, HSEND,
428 (hsync->end & 0x07F8) >> 3);
429 if (ret < 0)
430 return ret;
431
432 /* bit 2 - 0 */
433 ret = i2c_smbus_read_byte_data(client, HSLOWCTL);
434 if (ret < 0)
435 return ret;
436
437 ret = i2c_smbus_write_byte_data(client, HSLOWCTL,
438 (ret & 0x88) |
439 (hsync->start & 0x0007) << 4 |
440 (hsync->end & 0x0007));
441
442 return ret;
443}
444
445static int tw9910_write_array(struct i2c_client *client,
446 const struct regval_list *vals)
447{
448 while (vals->reg_num != 0xff) {
449 int ret = i2c_smbus_write_byte_data(client,
450 vals->reg_num,
451 vals->value);
452 if (ret < 0)
453 return ret;
454 vals++;
455 }
456 return 0;
457}
458
459static int tw9910_mask_set(struct i2c_client *client, u8 command,
460 u8 mask, u8 set)
461{
462 s32 val = i2c_smbus_read_byte_data(client, command);
463
464 val &= ~mask;
465 val |= set;
466
467 return i2c_smbus_write_byte_data(client, command, val);
468}
469
470static void tw9910_reset(struct i2c_client *client)
471{
472 i2c_smbus_write_byte_data(client, ACNTL1, SRESET);
473 msleep(1);
474}
475
476static const struct tw9910_scale_ctrl*
477tw9910_select_norm(struct soc_camera_device *icd, u32 width, u32 height)
478{
479 const struct tw9910_scale_ctrl *scale;
480 const struct tw9910_scale_ctrl *ret = NULL;
481 v4l2_std_id norm = icd->vdev->current_norm;
482 __u32 diff = 0xffffffff, tmp;
483 int size, i;
484
485 if (norm & V4L2_STD_NTSC) {
486 scale = tw9910_ntsc_scales;
487 size = ARRAY_SIZE(tw9910_ntsc_scales);
488 } else if (norm & V4L2_STD_PAL) {
489 scale = tw9910_pal_scales;
490 size = ARRAY_SIZE(tw9910_pal_scales);
491 } else {
492 return NULL;
493 }
494
495 for (i = 0; i < size; i++) {
496 tmp = abs(width - scale[i].width) +
497 abs(height - scale[i].height);
498 if (tmp < diff) {
499 diff = tmp;
500 ret = scale + i;
501 }
502 }
503
504 return ret;
505}
506
507/*
508 * soc_camera_ops function
509 */
510static int tw9910_init(struct soc_camera_device *icd)
511{
512 struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd);
513 int ret = 0;
514
515 if (priv->info->link.power) {
516 ret = priv->info->link.power(&priv->client->dev, 1);
517 if (ret < 0)
518 return ret;
519 }
520
521 if (priv->info->link.reset)
522 ret = priv->info->link.reset(&priv->client->dev);
523
524 return ret;
525}
526
527static int tw9910_release(struct soc_camera_device *icd)
528{
529 struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd);
530 int ret = 0;
531
532 if (priv->info->link.power)
533 ret = priv->info->link.power(&priv->client->dev, 0);
534
535 return ret;
536}
537
538static int tw9910_start_capture(struct soc_camera_device *icd)
539{
540 struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd);
541
542 if (!priv->scale) {
543 dev_err(&icd->dev, "norm select error\n");
544 return -EPERM;
545 }
546
547 dev_dbg(&icd->dev, "%s %dx%d\n",
548 priv->scale->name,
549 priv->scale->width,
550 priv->scale->height);
551
552 return 0;
553}
554
555static int tw9910_stop_capture(struct soc_camera_device *icd)
556{
557 return 0;
558}
559
560static int tw9910_set_bus_param(struct soc_camera_device *icd,
561 unsigned long flags)
562{
563 return 0;
564}
565
566static unsigned long tw9910_query_bus_param(struct soc_camera_device *icd)
567{
568 struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd);
569 struct soc_camera_link *icl = priv->client->dev.platform_data;
570 unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER |
571 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH |
572 SOCAM_DATA_ACTIVE_HIGH | priv->info->buswidth;
573
574 return soc_camera_apply_sensor_flags(icl, flags);
575}
576
577static int tw9910_get_chip_id(struct soc_camera_device *icd,
578 struct v4l2_chip_ident *id)
579{
580 id->ident = V4L2_IDENT_TW9910;
581 id->revision = 0;
582
583 return 0;
584}
585
586static int tw9910_set_std(struct soc_camera_device *icd,
587 v4l2_std_id *a)
588{
589 int ret = -EINVAL;
590
591 if (*a & (V4L2_STD_NTSC | V4L2_STD_PAL))
592 ret = 0;
593
594 return ret;
595}
596
597static int tw9910_enum_input(struct soc_camera_device *icd,
598 struct v4l2_input *inp)
599{
600 inp->type = V4L2_INPUT_TYPE_TUNER;
601 inp->std = V4L2_STD_UNKNOWN;
602 strcpy(inp->name, "Video");
603
604 return 0;
605}
606
607#ifdef CONFIG_VIDEO_ADV_DEBUG
608static int tw9910_get_register(struct soc_camera_device *icd,
609 struct v4l2_register *reg)
610{
611 struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd);
612 int ret;
613
614 if (reg->reg > 0xff)
615 return -EINVAL;
616
617 ret = i2c_smbus_read_byte_data(priv->client, reg->reg);
618 if (ret < 0)
619 return ret;
620
621 /* ret = int
622 * reg->val = __u64
623 */
624 reg->val = (__u64)ret;
625
626 return 0;
627}
628
629static int tw9910_set_register(struct soc_camera_device *icd,
630 struct v4l2_register *reg)
631{
632 struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd);
633
634 if (reg->reg > 0xff ||
635 reg->val > 0xff)
636 return -EINVAL;
637
638 return i2c_smbus_write_byte_data(priv->client, reg->reg, reg->val);
639}
640#endif
641
642static int tw9910_set_fmt(struct soc_camera_device *icd, __u32 pixfmt,
643 struct v4l2_rect *rect)
644{
645 struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd);
646 int ret = -EINVAL;
647 u8 val;
648
649 /*
650 * select suitable norm
651 */
652 priv->scale = tw9910_select_norm(icd, rect->width, rect->height);
653 if (!priv->scale)
654 goto tw9910_set_fmt_error;
655
656 /*
657 * reset hardware
658 */
659 tw9910_reset(priv->client);
660 ret = tw9910_write_array(priv->client, tw9910_default_regs);
661 if (ret < 0)
662 goto tw9910_set_fmt_error;
663
664 /*
665 * set bus width
666 */
667 val = 0x00;
668 if (SOCAM_DATAWIDTH_16 == priv->info->buswidth)
669 val = LEN;
670
671 ret = tw9910_mask_set(priv->client, OPFORM, LEN, val);
672 if (ret < 0)
673 goto tw9910_set_fmt_error;
674
675 /*
676 * select MPOUT behavior
677 */
678 switch (priv->info->mpout) {
679 case TW9910_MPO_VLOSS:
680 val = RTSEL_VLOSS; break;
681 case TW9910_MPO_HLOCK:
682 val = RTSEL_HLOCK; break;
683 case TW9910_MPO_SLOCK:
684 val = RTSEL_SLOCK; break;
685 case TW9910_MPO_VLOCK:
686 val = RTSEL_VLOCK; break;
687 case TW9910_MPO_MONO:
688 val = RTSEL_MONO; break;
689 case TW9910_MPO_DET50:
690 val = RTSEL_DET50; break;
691 case TW9910_MPO_FIELD:
692 val = RTSEL_FIELD; break;
693 case TW9910_MPO_RTCO:
694 val = RTSEL_RTCO; break;
695 default:
696 val = 0;
697 }
698
699 ret = tw9910_mask_set(priv->client, VBICNTL, RTSEL_MASK, val);
700 if (ret < 0)
701 goto tw9910_set_fmt_error;
702
703 /*
704 * set scale
705 */
706 ret = tw9910_set_scale(priv->client, priv->scale);
707 if (ret < 0)
708 goto tw9910_set_fmt_error;
709
710 /*
711 * set cropping
712 */
713 ret = tw9910_set_cropping(priv->client, &tw9910_cropping_ctrl);
714 if (ret < 0)
715 goto tw9910_set_fmt_error;
716
717 /*
718 * set hsync
719 */
720 ret = tw9910_set_hsync(priv->client, &tw9910_hsync_ctrl);
721 if (ret < 0)
722 goto tw9910_set_fmt_error;
723
724 return ret;
725
726tw9910_set_fmt_error:
727
728 tw9910_reset(priv->client);
729 priv->scale = NULL;
730
731 return ret;
732}
733
734static int tw9910_try_fmt(struct soc_camera_device *icd,
735 struct v4l2_format *f)
736{
737 struct v4l2_pix_format *pix = &f->fmt.pix;
738 const struct tw9910_scale_ctrl *scale;
739
740 if (V4L2_FIELD_ANY == pix->field) {
741 pix->field = V4L2_FIELD_INTERLACED;
742 } else if (V4L2_FIELD_INTERLACED != pix->field) {
743 dev_err(&icd->dev, "Field type invalid.\n");
744 return -EINVAL;
745 }
746
747 /*
748 * select suitable norm
749 */
750 scale = tw9910_select_norm(icd, pix->width, pix->height);
751 if (!scale)
752 return -EINVAL;
753
754 pix->width = scale->width;
755 pix->height = scale->height;
756
757 return 0;
758}
759
760static int tw9910_video_probe(struct soc_camera_device *icd)
761{
762 struct tw9910_priv *priv = container_of(icd, struct tw9910_priv, icd);
763 s32 val;
764 int ret;
765
766 /*
767 * We must have a parent by now. And it cannot be a wrong one.
768 * So this entire test is completely redundant.
769 */
770 if (!icd->dev.parent ||
771 to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
772 return -ENODEV;
773
774 /*
775 * tw9910 only use 8 or 16 bit bus width
776 */
777 if (SOCAM_DATAWIDTH_16 != priv->info->buswidth &&
778 SOCAM_DATAWIDTH_8 != priv->info->buswidth) {
779 dev_err(&icd->dev, "bus width error\n");
780 return -ENODEV;
781 }
782
783 icd->formats = tw9910_color_fmt;
784 icd->num_formats = ARRAY_SIZE(tw9910_color_fmt);
785
786 /*
787 * check and show Product ID
788 */
789 val = i2c_smbus_read_byte_data(priv->client, ID);
790 if (0x0B != GET_ID(val) ||
791 0x00 != GET_ReV(val)) {
792 dev_err(&icd->dev,
793 "Product ID error %x:%x\n", GET_ID(val), GET_ReV(val));
794 return -ENODEV;
795 }
796
797 dev_info(&icd->dev,
798 "tw9910 Product ID %0x:%0x\n", GET_ID(val), GET_ReV(val));
799
800 ret = soc_camera_video_start(icd);
801 if (ret < 0)
802 return ret;
803
804 icd->vdev->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL;
805 icd->vdev->current_norm = V4L2_STD_NTSC;
806
807 return ret;
808}
809
810static void tw9910_video_remove(struct soc_camera_device *icd)
811{
812 soc_camera_video_stop(icd);
813}
814
815static struct soc_camera_ops tw9910_ops = {
816 .owner = THIS_MODULE,
817 .probe = tw9910_video_probe,
818 .remove = tw9910_video_remove,
819 .init = tw9910_init,
820 .release = tw9910_release,
821 .start_capture = tw9910_start_capture,
822 .stop_capture = tw9910_stop_capture,
823 .set_fmt = tw9910_set_fmt,
824 .try_fmt = tw9910_try_fmt,
825 .set_bus_param = tw9910_set_bus_param,
826 .query_bus_param = tw9910_query_bus_param,
827 .get_chip_id = tw9910_get_chip_id,
828 .set_std = tw9910_set_std,
829 .enum_input = tw9910_enum_input,
830#ifdef CONFIG_VIDEO_ADV_DEBUG
831 .get_register = tw9910_get_register,
832 .set_register = tw9910_set_register,
833#endif
834};
835
836/*
837 * i2c_driver function
838 */
839
840static int tw9910_probe(struct i2c_client *client,
841 const struct i2c_device_id *did)
842
843{
844 struct tw9910_priv *priv;
845 struct tw9910_video_info *info;
846 struct soc_camera_device *icd;
847 const struct tw9910_scale_ctrl *scale;
848 int i, ret;
849
850 info = client->dev.platform_data;
851 if (!info)
852 return -EINVAL;
853
854 if (!i2c_check_functionality(to_i2c_adapter(client->dev.parent),
855 I2C_FUNC_SMBUS_BYTE_DATA)) {
856 dev_err(&client->dev,
857 "I2C-Adapter doesn't support "
858 "I2C_FUNC_SMBUS_BYTE_DATA\n");
859 return -EIO;
860 }
861
862 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
863 if (!priv)
864 return -ENOMEM;
865
866 priv->info = info;
867 priv->client = client;
868 i2c_set_clientdata(client, priv);
869
870 icd = &priv->icd;
871 icd->ops = &tw9910_ops;
872 icd->control = &client->dev;
873 icd->iface = info->link.bus_id;
874
875 /*
876 * set width and height
877 */
878 icd->width_max = tw9910_ntsc_scales[0].width; /* set default */
879 icd->width_min = tw9910_ntsc_scales[0].width;
880 icd->height_max = tw9910_ntsc_scales[0].height;
881 icd->height_min = tw9910_ntsc_scales[0].height;
882
883 scale = tw9910_ntsc_scales;
884 for (i = 0; i < ARRAY_SIZE(tw9910_ntsc_scales); i++) {
885 icd->width_max = max(scale[i].width, icd->width_max);
886 icd->width_min = min(scale[i].width, icd->width_min);
887 icd->height_max = max(scale[i].height, icd->height_max);
888 icd->height_min = min(scale[i].height, icd->height_min);
889 }
890 scale = tw9910_pal_scales;
891 for (i = 0; i < ARRAY_SIZE(tw9910_pal_scales); i++) {
892 icd->width_max = max(scale[i].width, icd->width_max);
893 icd->width_min = min(scale[i].width, icd->width_min);
894 icd->height_max = max(scale[i].height, icd->height_max);
895 icd->height_min = min(scale[i].height, icd->height_min);
896 }
897
898 ret = soc_camera_device_register(icd);
899
900 if (ret) {
901 i2c_set_clientdata(client, NULL);
902 kfree(priv);
903 }
904
905 return ret;
906}
907
908static int tw9910_remove(struct i2c_client *client)
909{
910 struct tw9910_priv *priv = i2c_get_clientdata(client);
911
912 soc_camera_device_unregister(&priv->icd);
913 i2c_set_clientdata(client, NULL);
914 kfree(priv);
915 return 0;
916}
917
918static const struct i2c_device_id tw9910_id[] = {
919 { "tw9910", 0 },
920 { }
921};
922MODULE_DEVICE_TABLE(i2c, tw9910_id);
923
924static struct i2c_driver tw9910_i2c_driver = {
925 .driver = {
926 .name = "tw9910",
927 },
928 .probe = tw9910_probe,
929 .remove = tw9910_remove,
930 .id_table = tw9910_id,
931};
932
933/*
934 * module function
935 */
936static int __init tw9910_module_init(void)
937{
938 return i2c_add_driver(&tw9910_i2c_driver);
939}
940
941static void __exit tw9910_module_exit(void)
942{
943 i2c_del_driver(&tw9910_i2c_driver);
944}
945
946module_init(tw9910_module_init);
947module_exit(tw9910_module_exit);
948
949MODULE_DESCRIPTION("SoC Camera driver for tw9910");
950MODULE_AUTHOR("Kuninori Morimoto");
951MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/upd64031a.c b/drivers/media/video/upd64031a.c
index b4628874933b..7a609a3a6dbe 100644
--- a/drivers/media/video/upd64031a.c
+++ b/drivers/media/video/upd64031a.c
@@ -26,7 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/videodev2.h> 28#include <linux/videodev2.h>
29#include <media/v4l2-common.h> 29#include <media/v4l2-device.h>
30#include <media/v4l2-chip-ident.h> 30#include <media/v4l2-chip-ident.h>
31#include <media/v4l2-i2c-drv.h> 31#include <media/v4l2-i2c-drv.h>
32#include <media/upd64031a.h> 32#include <media/upd64031a.h>
@@ -62,6 +62,7 @@ enum {
62}; 62};
63 63
64struct upd64031a_state { 64struct upd64031a_state {
65 struct v4l2_subdev sd;
65 u8 regs[TOT_REGS]; 66 u8 regs[TOT_REGS];
66 u8 gr_mode; 67 u8 gr_mode;
67 u8 direct_3dycs_connect; 68 u8 direct_3dycs_connect;
@@ -69,6 +70,11 @@ struct upd64031a_state {
69 u8 ext_vert_sync; 70 u8 ext_vert_sync;
70}; 71};
71 72
73static inline struct upd64031a_state *to_state(struct v4l2_subdev *sd)
74{
75 return container_of(sd, struct upd64031a_state, sd);
76}
77
72static u8 upd64031a_init[] = { 78static u8 upd64031a_init[] = {
73 0x00, 0xb8, 0x48, 0xd2, 0xe6, 79 0x00, 0xb8, 0x48, 0xd2, 0xe6,
74 0x03, 0x10, 0x0b, 0xaf, 0x7f, 80 0x03, 0x10, 0x0b, 0xaf, 0x7f,
@@ -78,8 +84,9 @@ static u8 upd64031a_init[] = {
78 84
79/* ------------------------------------------------------------------------ */ 85/* ------------------------------------------------------------------------ */
80 86
81static u8 upd64031a_read(struct i2c_client *client, u8 reg) 87static u8 upd64031a_read(struct v4l2_subdev *sd, u8 reg)
82{ 88{
89 struct i2c_client *client = v4l2_get_subdevdata(sd);
83 u8 buf[2]; 90 u8 buf[2];
84 91
85 if (reg >= sizeof(buf)) 92 if (reg >= sizeof(buf))
@@ -90,106 +97,127 @@ static u8 upd64031a_read(struct i2c_client *client, u8 reg)
90 97
91/* ------------------------------------------------------------------------ */ 98/* ------------------------------------------------------------------------ */
92 99
93static void upd64031a_write(struct i2c_client *client, u8 reg, u8 val) 100static void upd64031a_write(struct v4l2_subdev *sd, u8 reg, u8 val)
94{ 101{
102 struct i2c_client *client = v4l2_get_subdevdata(sd);
95 u8 buf[2]; 103 u8 buf[2];
96 104
97 buf[0] = reg; 105 buf[0] = reg;
98 buf[1] = val; 106 buf[1] = val;
99 v4l_dbg(1, debug, client, "write reg: %02X val: %02X\n", reg, val); 107 v4l2_dbg(1, debug, sd, "write reg: %02X val: %02X\n", reg, val);
100 if (i2c_master_send(client, buf, 2) != 2) 108 if (i2c_master_send(client, buf, 2) != 2)
101 v4l_err(client, "I/O error write 0x%02x/0x%02x\n", reg, val); 109 v4l2_err(sd, "I/O error write 0x%02x/0x%02x\n", reg, val);
102} 110}
103 111
104/* ------------------------------------------------------------------------ */ 112/* ------------------------------------------------------------------------ */
105 113
106/* The input changed due to new input or channel changed */ 114/* The input changed due to new input or channel changed */
107static void upd64031a_change(struct i2c_client *client) 115static int upd64031a_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
108{ 116{
109 struct upd64031a_state *state = i2c_get_clientdata(client); 117 struct upd64031a_state *state = to_state(sd);
110 u8 reg = state->regs[R00]; 118 u8 reg = state->regs[R00];
111 119
112 v4l_dbg(1, debug, client, "changed input or channel\n"); 120 v4l2_dbg(1, debug, sd, "changed input or channel\n");
113 upd64031a_write(client, R00, reg | 0x10); 121 upd64031a_write(sd, R00, reg | 0x10);
114 upd64031a_write(client, R00, reg & ~0x10); 122 upd64031a_write(sd, R00, reg & ~0x10);
123 return 0;
115} 124}
116 125
117/* ------------------------------------------------------------------------ */ 126/* ------------------------------------------------------------------------ */
118 127
128static int upd64031a_s_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
129{
130 struct upd64031a_state *state = to_state(sd);
131 u8 r00, r05, r08;
132
133 state->gr_mode = (route->input & 3) << 6;
134 state->direct_3dycs_connect = (route->input & 0xc) << 4;
135 state->ext_comp_sync =
136 (route->input & UPD64031A_COMPOSITE_EXTERNAL) << 1;
137 state->ext_vert_sync =
138 (route->input & UPD64031A_VERTICAL_EXTERNAL) << 2;
139 r00 = (state->regs[R00] & ~GR_MODE_MASK) | state->gr_mode;
140 r05 = (state->regs[R00] & ~SYNC_CIRCUIT_MASK) |
141 state->ext_comp_sync | state->ext_vert_sync;
142 r08 = (state->regs[R08] & ~DIRECT_3DYCS_CONNECT_MASK) |
143 state->direct_3dycs_connect;
144 upd64031a_write(sd, R00, r00);
145 upd64031a_write(sd, R05, r05);
146 upd64031a_write(sd, R08, r08);
147 return upd64031a_s_frequency(sd, NULL);
148}
149
150static int upd64031a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
151{
152 struct i2c_client *client = v4l2_get_subdevdata(sd);
153
154 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_UPD64031A, 0);
155}
156
157static int upd64031a_log_status(struct v4l2_subdev *sd)
158{
159 v4l2_info(sd, "Status: SA00=0x%02x SA01=0x%02x\n",
160 upd64031a_read(sd, 0), upd64031a_read(sd, 1));
161 return 0;
162}
163
164#ifdef CONFIG_VIDEO_ADV_DEBUG
165static int upd64031a_g_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
166{
167 struct i2c_client *client = v4l2_get_subdevdata(sd);
168
169 if (!v4l2_chip_match_i2c_client(client,
170 reg->match_type, reg->match_chip))
171 return -EINVAL;
172 if (!capable(CAP_SYS_ADMIN))
173 return -EPERM;
174 reg->val = upd64031a_read(sd, reg->reg & 0xff);
175 return 0;
176}
177
178static int upd64031a_s_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
179{
180 struct i2c_client *client = v4l2_get_subdevdata(sd);
181
182 if (!v4l2_chip_match_i2c_client(client,
183 reg->match_type, reg->match_chip))
184 return -EINVAL;
185 if (!capable(CAP_SYS_ADMIN))
186 return -EPERM;
187 upd64031a_write(sd, reg->reg & 0xff, reg->val & 0xff);
188 return 0;
189}
190#endif
191
119static int upd64031a_command(struct i2c_client *client, unsigned cmd, void *arg) 192static int upd64031a_command(struct i2c_client *client, unsigned cmd, void *arg)
120{ 193{
121 struct upd64031a_state *state = i2c_get_clientdata(client); 194 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
122 struct v4l2_routing *route = arg; 195}
123 196
124 switch (cmd) { 197/* ----------------------------------------------------------------------- */
125 case VIDIOC_S_FREQUENCY:
126 upd64031a_change(client);
127 break;
128
129 case VIDIOC_INT_G_VIDEO_ROUTING:
130 route->input = (state->gr_mode >> 6) |
131 (state->direct_3dycs_connect >> 4) |
132 (state->ext_comp_sync >> 1) |
133 (state->ext_vert_sync >> 2);
134 route->output = 0;
135 break;
136
137 case VIDIOC_INT_S_VIDEO_ROUTING:
138 {
139 u8 r00, r05, r08;
140
141 state->gr_mode = (route->input & 3) << 6;
142 state->direct_3dycs_connect = (route->input & 0xc) << 4;
143 state->ext_comp_sync =
144 (route->input & UPD64031A_COMPOSITE_EXTERNAL) << 1;
145 state->ext_vert_sync =
146 (route->input & UPD64031A_VERTICAL_EXTERNAL) << 2;
147 r00 = (state->regs[R00] & ~GR_MODE_MASK) | state->gr_mode;
148 r05 = (state->regs[R00] & ~SYNC_CIRCUIT_MASK) |
149 state->ext_comp_sync | state->ext_vert_sync;
150 r08 = (state->regs[R08] & ~DIRECT_3DYCS_CONNECT_MASK) |
151 state->direct_3dycs_connect;
152 upd64031a_write(client, R00, r00);
153 upd64031a_write(client, R05, r05);
154 upd64031a_write(client, R08, r08);
155 upd64031a_change(client);
156 break;
157 }
158
159 case VIDIOC_LOG_STATUS:
160 v4l_info(client, "Status: SA00=0x%02x SA01=0x%02x\n",
161 upd64031a_read(client, 0), upd64031a_read(client, 1));
162 break;
163 198
199static const struct v4l2_subdev_core_ops upd64031a_core_ops = {
200 .log_status = upd64031a_log_status,
201 .g_chip_ident = upd64031a_g_chip_ident,
164#ifdef CONFIG_VIDEO_ADV_DEBUG 202#ifdef CONFIG_VIDEO_ADV_DEBUG
165 case VIDIOC_DBG_G_REGISTER: 203 .g_register = upd64031a_g_register,
166 case VIDIOC_DBG_S_REGISTER: 204 .s_register = upd64031a_s_register,
167 {
168 struct v4l2_register *reg = arg;
169
170 if (!v4l2_chip_match_i2c_client(client,
171 reg->match_type, reg->match_chip))
172 return -EINVAL;
173 if (!capable(CAP_SYS_ADMIN))
174 return -EPERM;
175 if (cmd == VIDIOC_DBG_G_REGISTER) {
176 reg->val = upd64031a_read(client, reg->reg & 0xff);
177 break;
178 }
179 upd64031a_write(client, reg->reg & 0xff, reg->val & 0xff);
180 break;
181 }
182#endif 205#endif
206};
183 207
184 case VIDIOC_G_CHIP_IDENT: 208static const struct v4l2_subdev_tuner_ops upd64031a_tuner_ops = {
185 return v4l2_chip_ident_i2c_client(client, arg, 209 .s_frequency = upd64031a_s_frequency,
186 V4L2_IDENT_UPD64031A, 0); 210};
187 211
188 default: 212static const struct v4l2_subdev_video_ops upd64031a_video_ops = {
189 break; 213 .s_routing = upd64031a_s_routing,
190 } 214};
191 return 0; 215
192} 216static const struct v4l2_subdev_ops upd64031a_ops = {
217 .core = &upd64031a_core_ops,
218 .tuner = &upd64031a_tuner_ops,
219 .video = &upd64031a_video_ops,
220};
193 221
194/* ------------------------------------------------------------------------ */ 222/* ------------------------------------------------------------------------ */
195 223
@@ -199,6 +227,7 @@ static int upd64031a_probe(struct i2c_client *client,
199 const struct i2c_device_id *id) 227 const struct i2c_device_id *id)
200{ 228{
201 struct upd64031a_state *state; 229 struct upd64031a_state *state;
230 struct v4l2_subdev *sd;
202 int i; 231 int i;
203 232
204 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 233 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -210,19 +239,23 @@ static int upd64031a_probe(struct i2c_client *client,
210 state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL); 239 state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL);
211 if (state == NULL) 240 if (state == NULL)
212 return -ENOMEM; 241 return -ENOMEM;
213 i2c_set_clientdata(client, state); 242 sd = &state->sd;
243 v4l2_i2c_subdev_init(sd, client, &upd64031a_ops);
214 memcpy(state->regs, upd64031a_init, sizeof(state->regs)); 244 memcpy(state->regs, upd64031a_init, sizeof(state->regs));
215 state->gr_mode = UPD64031A_GR_ON << 6; 245 state->gr_mode = UPD64031A_GR_ON << 6;
216 state->direct_3dycs_connect = UPD64031A_3DYCS_COMPOSITE << 4; 246 state->direct_3dycs_connect = UPD64031A_3DYCS_COMPOSITE << 4;
217 state->ext_comp_sync = state->ext_vert_sync = 0; 247 state->ext_comp_sync = state->ext_vert_sync = 0;
218 for (i = 0; i < TOT_REGS; i++) 248 for (i = 0; i < TOT_REGS; i++)
219 upd64031a_write(client, i, state->regs[i]); 249 upd64031a_write(sd, i, state->regs[i]);
220 return 0; 250 return 0;
221} 251}
222 252
223static int upd64031a_remove(struct i2c_client *client) 253static int upd64031a_remove(struct i2c_client *client)
224{ 254{
225 kfree(i2c_get_clientdata(client)); 255 struct v4l2_subdev *sd = i2c_get_clientdata(client);
256
257 v4l2_device_unregister_subdev(sd);
258 kfree(to_state(sd));
226 return 0; 259 return 0;
227} 260}
228 261
diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c
index 9521ce004dcc..58412cb9c01a 100644
--- a/drivers/media/video/upd64083.c
+++ b/drivers/media/video/upd64083.c
@@ -26,7 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/videodev2.h> 28#include <linux/videodev2.h>
29#include <media/v4l2-common.h> 29#include <media/v4l2-device.h>
30#include <media/v4l2-chip-ident.h> 30#include <media/v4l2-chip-ident.h>
31#include <media/v4l2-i2c-drv.h> 31#include <media/v4l2-i2c-drv.h>
32#include <media/upd64083.h> 32#include <media/upd64083.h>
@@ -51,11 +51,17 @@ enum {
51}; 51};
52 52
53struct upd64083_state { 53struct upd64083_state {
54 struct v4l2_subdev sd;
54 u8 mode; 55 u8 mode;
55 u8 ext_y_adc; 56 u8 ext_y_adc;
56 u8 regs[TOT_REGS]; 57 u8 regs[TOT_REGS];
57}; 58};
58 59
60static inline struct upd64083_state *to_state(struct v4l2_subdev *sd)
61{
62 return container_of(sd, struct upd64083_state, sd);
63}
64
59/* Initial values when used in combination with the 65/* Initial values when used in combination with the
60 NEC upd64031a ghost reduction chip. */ 66 NEC upd64031a ghost reduction chip. */
61static u8 upd64083_init[] = { 67static u8 upd64083_init[] = {
@@ -68,34 +74,24 @@ static u8 upd64083_init[] = {
68 74
69/* ------------------------------------------------------------------------ */ 75/* ------------------------------------------------------------------------ */
70 76
71static void upd64083_log_status(struct i2c_client *client) 77static void upd64083_write(struct v4l2_subdev *sd, u8 reg, u8 val)
72{
73 u8 buf[7];
74
75 i2c_master_recv(client, buf, 7);
76 v4l_info(client, "Status: SA00=%02x SA01=%02x SA02=%02x SA03=%02x "
77 "SA04=%02x SA05=%02x SA06=%02x\n",
78 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
79}
80
81/* ------------------------------------------------------------------------ */
82
83static void upd64083_write(struct i2c_client *client, u8 reg, u8 val)
84{ 78{
79 struct i2c_client *client = v4l2_get_subdevdata(sd);
85 u8 buf[2]; 80 u8 buf[2];
86 81
87 buf[0] = reg; 82 buf[0] = reg;
88 buf[1] = val; 83 buf[1] = val;
89 v4l_dbg(1, debug, client, "write reg: %02x val: %02x\n", reg, val); 84 v4l2_dbg(1, debug, sd, "write reg: %02x val: %02x\n", reg, val);
90 if (i2c_master_send(client, buf, 2) != 2) 85 if (i2c_master_send(client, buf, 2) != 2)
91 v4l_err(client, "I/O error write 0x%02x/0x%02x\n", reg, val); 86 v4l2_err(sd, "I/O error write 0x%02x/0x%02x\n", reg, val);
92} 87}
93 88
94/* ------------------------------------------------------------------------ */ 89/* ------------------------------------------------------------------------ */
95 90
96#ifdef CONFIG_VIDEO_ADV_DEBUG 91#ifdef CONFIG_VIDEO_ADV_DEBUG
97static u8 upd64083_read(struct i2c_client *client, u8 reg) 92static u8 upd64083_read(struct v4l2_subdev *sd, u8 reg)
98{ 93{
94 struct i2c_client *client = v4l2_get_subdevdata(sd);
99 u8 buf[7]; 95 u8 buf[7];
100 96
101 if (reg >= sizeof(buf)) 97 if (reg >= sizeof(buf))
@@ -107,67 +103,94 @@ static u8 upd64083_read(struct i2c_client *client, u8 reg)
107 103
108/* ------------------------------------------------------------------------ */ 104/* ------------------------------------------------------------------------ */
109 105
110static int upd64083_command(struct i2c_client *client, unsigned cmd, void *arg) 106static int upd64083_s_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
111{ 107{
112 struct upd64083_state *state = i2c_get_clientdata(client); 108 struct upd64083_state *state = to_state(sd);
113 struct v4l2_routing *route = arg; 109 u8 r00, r02;
114 110
115 switch (cmd) { 111 if (route->input > 7 || (route->input & 6) == 6)
116 case VIDIOC_INT_G_VIDEO_ROUTING: 112 return -EINVAL;
117 route->input = (state->mode >> 6) | (state->ext_y_adc >> 3); 113 state->mode = (route->input & 3) << 6;
118 route->output = 0; 114 state->ext_y_adc = (route->input & UPD64083_EXT_Y_ADC) << 3;
119 break; 115 r00 = (state->regs[R00] & ~(3 << 6)) | state->mode;
120 116 r02 = (state->regs[R02] & ~(1 << 5)) | state->ext_y_adc;
121 case VIDIOC_INT_S_VIDEO_ROUTING: 117 upd64083_write(sd, R00, r00);
122 { 118 upd64083_write(sd, R02, r02);
123 u8 r00, r02; 119 return 0;
124 120}
125 if (route->input > 7 || (route->input & 6) == 6)
126 return -EINVAL;
127 state->mode = (route->input & 3) << 6;
128 state->ext_y_adc = (route->input & UPD64083_EXT_Y_ADC) << 3;
129 r00 = (state->regs[R00] & ~(3 << 6)) | state->mode;
130 r02 = (state->regs[R02] & ~(1 << 5)) | state->ext_y_adc;
131 upd64083_write(client, R00, r00);
132 upd64083_write(client, R02, r02);
133 break;
134 }
135
136 case VIDIOC_LOG_STATUS:
137 upd64083_log_status(client);
138 break;
139 121
140#ifdef CONFIG_VIDEO_ADV_DEBUG 122#ifdef CONFIG_VIDEO_ADV_DEBUG
141 case VIDIOC_DBG_G_REGISTER: 123static int upd64083_g_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
142 case VIDIOC_DBG_S_REGISTER: 124{
143 { 125 struct i2c_client *client = v4l2_get_subdevdata(sd);
144 struct v4l2_register *reg = arg; 126
127 if (!v4l2_chip_match_i2c_client(client,
128 reg->match_type, reg->match_chip))
129 return -EINVAL;
130 if (!capable(CAP_SYS_ADMIN))
131 return -EPERM;
132 reg->val = upd64083_read(sd, reg->reg & 0xff);
133 return 0;
134}
145 135
146 if (!v4l2_chip_match_i2c_client(client, 136static int upd64083_s_register(struct v4l2_subdev *sd, struct v4l2_register *reg)
137{
138 struct i2c_client *client = v4l2_get_subdevdata(sd);
139
140 if (!v4l2_chip_match_i2c_client(client,
147 reg->match_type, reg->match_chip)) 141 reg->match_type, reg->match_chip))
148 return -EINVAL; 142 return -EINVAL;
149 if (!capable(CAP_SYS_ADMIN)) 143 if (!capable(CAP_SYS_ADMIN))
150 return -EPERM; 144 return -EPERM;
151 if (cmd == VIDIOC_DBG_G_REGISTER) { 145 upd64083_write(sd, reg->reg & 0xff, reg->val & 0xff);
152 reg->val = upd64083_read(client, reg->reg & 0xff); 146 return 0;
153 break; 147}
154 }
155 upd64083_write(client, reg->reg & 0xff, reg->val & 0xff);
156 break;
157 }
158#endif 148#endif
159 149
160 case VIDIOC_G_CHIP_IDENT: 150static int upd64083_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
161 return v4l2_chip_ident_i2c_client(client, arg, 151{
162 V4L2_IDENT_UPD64083, 0); 152 struct i2c_client *client = v4l2_get_subdevdata(sd);
153
154 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_UPD64083, 0);
155}
163 156
164 default: 157static int upd64083_log_status(struct v4l2_subdev *sd)
165 break; 158{
166 } 159 struct i2c_client *client = v4l2_get_subdevdata(sd);
160 u8 buf[7];
167 161
162 i2c_master_recv(client, buf, 7);
163 v4l2_info(sd, "Status: SA00=%02x SA01=%02x SA02=%02x SA03=%02x "
164 "SA04=%02x SA05=%02x SA06=%02x\n",
165 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
168 return 0; 166 return 0;
169} 167}
170 168
169static int upd64083_command(struct i2c_client *client, unsigned cmd, void *arg)
170{
171 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
172}
173
174/* ----------------------------------------------------------------------- */
175
176static const struct v4l2_subdev_core_ops upd64083_core_ops = {
177 .log_status = upd64083_log_status,
178 .g_chip_ident = upd64083_g_chip_ident,
179#ifdef CONFIG_VIDEO_ADV_DEBUG
180 .g_register = upd64083_g_register,
181 .s_register = upd64083_s_register,
182#endif
183};
184
185static const struct v4l2_subdev_video_ops upd64083_video_ops = {
186 .s_routing = upd64083_s_routing,
187};
188
189static const struct v4l2_subdev_ops upd64083_ops = {
190 .core = &upd64083_core_ops,
191 .video = &upd64083_video_ops,
192};
193
171/* ------------------------------------------------------------------------ */ 194/* ------------------------------------------------------------------------ */
172 195
173/* i2c implementation */ 196/* i2c implementation */
@@ -176,6 +199,7 @@ static int upd64083_probe(struct i2c_client *client,
176 const struct i2c_device_id *id) 199 const struct i2c_device_id *id)
177{ 200{
178 struct upd64083_state *state; 201 struct upd64083_state *state;
202 struct v4l2_subdev *sd;
179 int i; 203 int i;
180 204
181 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 205 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -187,19 +211,23 @@ static int upd64083_probe(struct i2c_client *client,
187 state = kmalloc(sizeof(struct upd64083_state), GFP_KERNEL); 211 state = kmalloc(sizeof(struct upd64083_state), GFP_KERNEL);
188 if (state == NULL) 212 if (state == NULL)
189 return -ENOMEM; 213 return -ENOMEM;
190 i2c_set_clientdata(client, state); 214 sd = &state->sd;
215 v4l2_i2c_subdev_init(sd, client, &upd64083_ops);
191 /* Initially assume that a ghost reduction chip is present */ 216 /* Initially assume that a ghost reduction chip is present */
192 state->mode = 0; /* YCS mode */ 217 state->mode = 0; /* YCS mode */
193 state->ext_y_adc = (1 << 5); 218 state->ext_y_adc = (1 << 5);
194 memcpy(state->regs, upd64083_init, TOT_REGS); 219 memcpy(state->regs, upd64083_init, TOT_REGS);
195 for (i = 0; i < TOT_REGS; i++) 220 for (i = 0; i < TOT_REGS; i++)
196 upd64083_write(client, i, state->regs[i]); 221 upd64083_write(sd, i, state->regs[i]);
197 return 0; 222 return 0;
198} 223}
199 224
200static int upd64083_remove(struct i2c_client *client) 225static int upd64083_remove(struct i2c_client *client)
201{ 226{
202 kfree(i2c_get_clientdata(client)); 227 struct v4l2_subdev *sd = i2c_get_clientdata(client);
228
229 v4l2_device_unregister_subdev(sd);
230 kfree(to_state(sd));
203 return 0; 231 return 0;
204} 232}
205 233
diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
index c710bcd1df48..f8d85ddb4804 100644
--- a/drivers/media/video/usbvideo/ibmcam.c
+++ b/drivers/media/video/usbvideo/ibmcam.c
@@ -3779,11 +3779,11 @@ static int ibmcam_probe(struct usb_interface *intf, const struct usb_device_id *
3779 err("Alternate settings have different endpoint addresses!"); 3779 err("Alternate settings have different endpoint addresses!");
3780 return -ENODEV; 3780 return -ENODEV;
3781 } 3781 }
3782 if ((endpoint->bmAttributes & 0x03) != 0x01) { 3782 if (usb_endpoint_type(endpoint) != USB_ENDPOINT_XFER_ISOC) {
3783 err("Interface %d. has non-ISO endpoint!", ifnum); 3783 err("Interface %d. has non-ISO endpoint!", ifnum);
3784 return -ENODEV; 3784 return -ENODEV;
3785 } 3785 }
3786 if ((endpoint->bEndpointAddress & 0x80) == 0) { 3786 if (usb_endpoint_dir_out(endpoint)) {
3787 err("Interface %d. has ISO OUT endpoint!", ifnum); 3787 err("Interface %d. has ISO OUT endpoint!", ifnum);
3788 return -ENODEV; 3788 return -ENODEV;
3789 } 3789 }
diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
index da27a5287983..90f0ce6a26bc 100644
--- a/drivers/media/video/usbvideo/konicawc.c
+++ b/drivers/media/video/usbvideo/konicawc.c
@@ -823,12 +823,12 @@ static int konicawc_probe(struct usb_interface *intf, const struct usb_device_id
823 err("Alternate settings have different endpoint addresses!"); 823 err("Alternate settings have different endpoint addresses!");
824 return -ENODEV; 824 return -ENODEV;
825 } 825 }
826 if ((endpoint->bmAttributes & 0x03) != 0x01) { 826 if (usb_endpoint_type(endpoint) != USB_ENDPOINT_XFER_ISOC) {
827 err("Interface %d. has non-ISO endpoint!", 827 err("Interface %d. has non-ISO endpoint!",
828 interface->desc.bInterfaceNumber); 828 interface->desc.bInterfaceNumber);
829 return -ENODEV; 829 return -ENODEV;
830 } 830 }
831 if ((endpoint->bEndpointAddress & 0x80) == 0) { 831 if (usb_endpoint_dir_out(endpoint)) {
832 err("Interface %d. has ISO OUT endpoint!", 832 err("Interface %d. has ISO OUT endpoint!",
833 interface->desc.bInterfaceNumber); 833 interface->desc.bInterfaceNumber);
834 return -ENODEV; 834 return -ENODEV;
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index 4459b8a7f818..fd112f0b9d35 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -447,7 +447,7 @@ static int qcm_sensor_init(struct uvd *uvd)
447 CHECK_RET(ret, qcm_stv_setw(uvd->dev, 0x15c1, 447 CHECK_RET(ret, qcm_stv_setw(uvd->dev, 0x15c1,
448 cpu_to_le16(ISOC_PACKET_SIZE))); 448 cpu_to_le16(ISOC_PACKET_SIZE)));
449 CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x15c3, 0x08)); 449 CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x15c3, 0x08));
450 CHECK_RET(ret, ret = qcm_stv_setb(uvd->dev, 0x143f, 0x01)); 450 CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x143f, 0x01));
451 451
452 CHECK_RET(ret, qcm_stv_setb(uvd->dev, STV_ISO_ENABLE, 0x00)); 452 CHECK_RET(ret, qcm_stv_setb(uvd->dev, STV_ISO_ENABLE, 0x00));
453 453
@@ -955,8 +955,7 @@ static int qcm_probe(struct usb_interface *intf,
955 for (j=0; j < interface->desc.bNumEndpoints; j++) { 955 for (j=0; j < interface->desc.bNumEndpoints; j++) {
956 endpoint = &interface->endpoint[j].desc; 956 endpoint = &interface->endpoint[j].desc;
957 957
958 if ((endpoint->bEndpointAddress & 958 if (usb_endpoint_dir_out(endpoint))
959 USB_ENDPOINT_DIR_MASK) != USB_DIR_IN)
960 continue; /* not input then not good */ 959 continue; /* not input then not good */
961 960
962 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); 961 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
@@ -965,9 +964,7 @@ static int qcm_probe(struct usb_interface *intf,
965 continue; /* 0 pkt size is not what we want */ 964 continue; /* 0 pkt size is not what we want */
966 } 965 }
967 966
968 if ((endpoint->bmAttributes & 967 if (usb_endpoint_xfer_isoc(endpoint)) {
969 USB_ENDPOINT_XFERTYPE_MASK) ==
970 USB_ENDPOINT_XFER_ISOC) {
971 video_ep = endpoint->bEndpointAddress; 968 video_ep = endpoint->bEndpointAddress;
972 /* break out of the search */ 969 /* break out of the search */
973 goto good_videoep; 970 goto good_videoep;
diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
index 9714baab7833..839a08240c25 100644
--- a/drivers/media/video/usbvideo/ultracam.c
+++ b/drivers/media/video/usbvideo/ultracam.c
@@ -556,12 +556,12 @@ static int ultracam_probe(struct usb_interface *intf, const struct usb_device_id
556 err("Alternate settings have different endpoint addresses!"); 556 err("Alternate settings have different endpoint addresses!");
557 return -ENODEV; 557 return -ENODEV;
558 } 558 }
559 if ((endpoint->bmAttributes & 0x03) != 0x01) { 559 if (usb_endpoint_type(endpoint) != USB_ENDPOINT_XFER_ISOC) {
560 err("Interface %d. has non-ISO endpoint!", 560 err("Interface %d. has non-ISO endpoint!",
561 interface->desc.bInterfaceNumber); 561 interface->desc.bInterfaceNumber);
562 return -ENODEV; 562 return -ENODEV;
563 } 563 }
564 if ((endpoint->bEndpointAddress & 0x80) == 0) { 564 if (usb_endpoint_dir_out(endpoint)) {
565 err("Interface %d. has ISO OUT endpoint!", 565 err("Interface %d. has ISO OUT endpoint!",
566 interface->desc.bInterfaceNumber); 566 interface->desc.bInterfaceNumber);
567 return -ENODEV; 567 return -ENODEV;
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index 7c575bb8184f..148a1f98c70f 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -1123,7 +1123,7 @@ static int usbvideo_v4l_open(struct inode *inode, struct file *file)
1123 if (uvd->debug > 1) 1123 if (uvd->debug > 1)
1124 dev_info(&uvd->dev->dev, "%s($%p)\n", __func__, dev); 1124 dev_info(&uvd->dev->dev, "%s($%p)\n", __func__, dev);
1125 1125
1126 if (0 < usbvideo_ClientIncModCount(uvd)) 1126 if (usbvideo_ClientIncModCount(uvd) < 0)
1127 return -ENODEV; 1127 return -ENODEV;
1128 mutex_lock(&uvd->lock); 1128 mutex_lock(&uvd->lock);
1129 1129
@@ -1281,8 +1281,7 @@ static int usbvideo_v4l_close(struct inode *inode, struct file *file)
1281 * History: 1281 * History:
1282 * 22-Jan-2000 Corrected VIDIOCSPICT to reject unsupported settings. 1282 * 22-Jan-2000 Corrected VIDIOCSPICT to reject unsupported settings.
1283 */ 1283 */
1284static int usbvideo_v4l_do_ioctl(struct inode *inode, struct file *file, 1284static int usbvideo_v4l_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1285 unsigned int cmd, void *arg)
1286{ 1285{
1287 struct uvd *uvd = file->private_data; 1286 struct uvd *uvd = file->private_data;
1288 1287
@@ -1505,7 +1504,7 @@ static int usbvideo_v4l_do_ioctl(struct inode *inode, struct file *file,
1505static int usbvideo_v4l_ioctl(struct inode *inode, struct file *file, 1504static int usbvideo_v4l_ioctl(struct inode *inode, struct file *file,
1506 unsigned int cmd, unsigned long arg) 1505 unsigned int cmd, unsigned long arg)
1507{ 1506{
1508 return video_usercopy(inode, file, cmd, arg, usbvideo_v4l_do_ioctl); 1507 return video_usercopy(file, cmd, arg, usbvideo_v4l_do_ioctl);
1509} 1508}
1510 1509
1511/* 1510/*
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 8e2d58bec481..4602597ed8d1 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -844,8 +844,7 @@ vicam_probe( struct usb_interface *intf, const struct usb_device_id *id)
844 interface->desc.bInterfaceNumber, (unsigned) (interface->desc.bNumEndpoints)); 844 interface->desc.bInterfaceNumber, (unsigned) (interface->desc.bNumEndpoints));
845 endpoint = &interface->endpoint[0].desc; 845 endpoint = &interface->endpoint[0].desc;
846 846
847 if ((endpoint->bEndpointAddress & 0x80) && 847 if (usb_endpoint_is_bulk_in(endpoint)) {
848 ((endpoint->bmAttributes & 3) == 0x02)) {
849 /* we found a bulk in endpoint */ 848 /* we found a bulk in endpoint */
850 bulkEndpoint = endpoint->bEndpointAddress; 849 bulkEndpoint = endpoint->bEndpointAddress;
851 } else { 850 } else {
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index d185b57fdcd0..85661b1848fe 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -523,7 +523,7 @@ static int vidioc_querycap (struct file *file, void *priv,
523 strlcpy(vc->card, 523 strlcpy(vc->card,
524 usbvision_device_data[usbvision->DevModel].ModelString, 524 usbvision_device_data[usbvision->DevModel].ModelString,
525 sizeof(vc->card)); 525 sizeof(vc->card));
526 strlcpy(vc->bus_info, usbvision->dev->dev.bus_id, 526 strlcpy(vc->bus_info, dev_name(&usbvision->dev->dev),
527 sizeof(vc->bus_info)); 527 sizeof(vc->bus_info));
528 vc->version = USBVISION_DRIVER_VERSION; 528 vc->version = USBVISION_DRIVER_VERSION;
529 vc->capabilities = V4L2_CAP_VIDEO_CAPTURE | 529 vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
@@ -1278,7 +1278,7 @@ static int usbvision_vbi_close(struct inode *inode, struct file *file)
1278 return -ENODEV; 1278 return -ENODEV;
1279} 1279}
1280 1280
1281static int usbvision_do_vbi_ioctl(struct inode *inode, struct file *file, 1281static int usbvision_do_vbi_ioctl(struct file *file,
1282 unsigned int cmd, void *arg) 1282 unsigned int cmd, void *arg)
1283{ 1283{
1284 /* TODO */ 1284 /* TODO */
@@ -1288,7 +1288,7 @@ static int usbvision_do_vbi_ioctl(struct inode *inode, struct file *file,
1288static int usbvision_vbi_ioctl(struct inode *inode, struct file *file, 1288static int usbvision_vbi_ioctl(struct inode *inode, struct file *file,
1289 unsigned int cmd, unsigned long arg) 1289 unsigned int cmd, unsigned long arg)
1290{ 1290{
1291 return video_usercopy(inode, file, cmd, arg, usbvision_do_vbi_ioctl); 1291 return video_usercopy(file, cmd, arg, usbvision_do_vbi_ioctl);
1292} 1292}
1293 1293
1294 1294
@@ -1679,7 +1679,7 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
1679 interface = &dev->actconfig->interface[ifnum]->altsetting[0]; 1679 interface = &dev->actconfig->interface[ifnum]->altsetting[0];
1680 } 1680 }
1681 endpoint = &interface->endpoint[1].desc; 1681 endpoint = &interface->endpoint[1].desc;
1682 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != 1682 if (usb_endpoint_type(endpoint) !=
1683 USB_ENDPOINT_XFER_ISOC) { 1683 USB_ENDPOINT_XFER_ISOC) {
1684 err("%s: interface %d. has non-ISO endpoint!", 1684 err("%s: interface %d. has non-ISO endpoint!",
1685 __func__, ifnum); 1685 __func__, ifnum);
@@ -1687,8 +1687,7 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
1687 __func__, endpoint->bmAttributes); 1687 __func__, endpoint->bmAttributes);
1688 return -ENODEV; 1688 return -ENODEV;
1689 } 1689 }
1690 if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == 1690 if (usb_endpoint_dir_out(endpoint)) {
1691 USB_DIR_OUT) {
1692 err("%s: interface %d. has ISO OUT endpoint!", 1691 err("%s: interface %d. has ISO OUT endpoint!",
1693 __func__, ifnum); 1692 __func__, ifnum);
1694 return -ENODEV; 1693 return -ENODEV;
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index f16aafe9cf14..2208165aa6f0 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -327,6 +327,31 @@ static struct uvc_menu_info exposure_auto_controls[] = {
327 { 8, "Aperture Priority Mode" }, 327 { 8, "Aperture Priority Mode" },
328}; 328};
329 329
330static __s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping,
331 __u8 query, const __u8 *data)
332{
333 __s8 zoom = (__s8)data[0];
334
335 switch (query) {
336 case GET_CUR:
337 return (zoom == 0) ? 0 : (zoom > 0 ? data[2] : -data[2]);
338
339 case GET_MIN:
340 case GET_MAX:
341 case GET_RES:
342 case GET_DEF:
343 default:
344 return data[2];
345 }
346}
347
348static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping,
349 __s32 value, __u8 *data)
350{
351 data[0] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
352 data[2] = min(abs(value), 0xff);
353}
354
330static struct uvc_control_mapping uvc_ctrl_mappings[] = { 355static struct uvc_control_mapping uvc_ctrl_mappings[] = {
331 { 356 {
332 .id = V4L2_CID_BRIGHTNESS, 357 .id = V4L2_CID_BRIGHTNESS,
@@ -532,6 +557,38 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
532 .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, 557 .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
533 .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, 558 .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
534 }, 559 },
560 {
561 .id = V4L2_CID_ZOOM_ABSOLUTE,
562 .name = "Zoom, Absolute",
563 .entity = UVC_GUID_UVC_CAMERA,
564 .selector = CT_ZOOM_ABSOLUTE_CONTROL,
565 .size = 16,
566 .offset = 0,
567 .v4l2_type = V4L2_CTRL_TYPE_INTEGER,
568 .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
569 },
570 {
571 .id = V4L2_CID_ZOOM_CONTINUOUS,
572 .name = "Zoom, Continuous",
573 .entity = UVC_GUID_UVC_CAMERA,
574 .selector = CT_ZOOM_RELATIVE_CONTROL,
575 .size = 0,
576 .offset = 0,
577 .v4l2_type = V4L2_CTRL_TYPE_INTEGER,
578 .data_type = UVC_CTRL_DATA_TYPE_SIGNED,
579 .get = uvc_ctrl_get_zoom,
580 .set = uvc_ctrl_set_zoom,
581 },
582 {
583 .id = V4L2_CID_PRIVACY,
584 .name = "Privacy",
585 .entity = UVC_GUID_UVC_CAMERA,
586 .selector = CT_PRIVACY_CONTROL,
587 .size = 1,
588 .offset = 0,
589 .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
590 .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
591 },
535}; 592};
536 593
537/* ------------------------------------------------------------------------ 594/* ------------------------------------------------------------------------
@@ -543,18 +600,23 @@ static inline __u8 *uvc_ctrl_data(struct uvc_control *ctrl, int id)
543 return ctrl->data + id * ctrl->info->size; 600 return ctrl->data + id * ctrl->info->size;
544} 601}
545 602
546static inline int uvc_get_bit(const __u8 *data, int bit) 603static inline int uvc_test_bit(const __u8 *data, int bit)
547{ 604{
548 return (data[bit >> 3] >> (bit & 7)) & 1; 605 return (data[bit >> 3] >> (bit & 7)) & 1;
549} 606}
550 607
608static inline void uvc_clear_bit(__u8 *data, int bit)
609{
610 data[bit >> 3] &= ~(1 << (bit & 7));
611}
612
551/* Extract the bit string specified by mapping->offset and mapping->size 613/* Extract the bit string specified by mapping->offset and mapping->size
552 * from the little-endian data stored at 'data' and return the result as 614 * from the little-endian data stored at 'data' and return the result as
553 * a signed 32bit integer. Sign extension will be performed if the mapping 615 * a signed 32bit integer. Sign extension will be performed if the mapping
554 * references a signed data type. 616 * references a signed data type.
555 */ 617 */
556static __s32 uvc_get_le_value(const __u8 *data, 618static __s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
557 struct uvc_control_mapping *mapping) 619 __u8 query, const __u8 *data)
558{ 620{
559 int bits = mapping->size; 621 int bits = mapping->size;
560 int offset = mapping->offset; 622 int offset = mapping->offset;
@@ -583,8 +645,8 @@ static __s32 uvc_get_le_value(const __u8 *data,
583/* Set the bit string specified by mapping->offset and mapping->size 645/* Set the bit string specified by mapping->offset and mapping->size
584 * in the little-endian data stored at 'data' to the value 'value'. 646 * in the little-endian data stored at 'data' to the value 'value'.
585 */ 647 */
586static void uvc_set_le_value(__s32 value, __u8 *data, 648static void uvc_set_le_value(struct uvc_control_mapping *mapping,
587 struct uvc_control_mapping *mapping) 649 __s32 value, __u8 *data)
588{ 650{
589 int bits = mapping->size; 651 int bits = mapping->size;
590 int offset = mapping->offset; 652 int offset = mapping->offset;
@@ -736,7 +798,7 @@ int uvc_query_v4l2_ctrl(struct uvc_video_device *video,
736 video->dev->intfnum, ctrl->info->selector, 798 video->dev->intfnum, ctrl->info->selector,
737 data, ctrl->info->size)) < 0) 799 data, ctrl->info->size)) < 0)
738 goto out; 800 goto out;
739 v4l2_ctrl->default_value = uvc_get_le_value(data, mapping); 801 v4l2_ctrl->default_value = mapping->get(mapping, GET_DEF, data);
740 } 802 }
741 803
742 switch (mapping->v4l2_type) { 804 switch (mapping->v4l2_type) {
@@ -772,21 +834,21 @@ int uvc_query_v4l2_ctrl(struct uvc_video_device *video,
772 video->dev->intfnum, ctrl->info->selector, 834 video->dev->intfnum, ctrl->info->selector,
773 data, ctrl->info->size)) < 0) 835 data, ctrl->info->size)) < 0)
774 goto out; 836 goto out;
775 v4l2_ctrl->minimum = uvc_get_le_value(data, mapping); 837 v4l2_ctrl->minimum = mapping->get(mapping, GET_MIN, data);
776 } 838 }
777 if (ctrl->info->flags & UVC_CONTROL_GET_MAX) { 839 if (ctrl->info->flags & UVC_CONTROL_GET_MAX) {
778 if ((ret = uvc_query_ctrl(video->dev, GET_MAX, ctrl->entity->id, 840 if ((ret = uvc_query_ctrl(video->dev, GET_MAX, ctrl->entity->id,
779 video->dev->intfnum, ctrl->info->selector, 841 video->dev->intfnum, ctrl->info->selector,
780 data, ctrl->info->size)) < 0) 842 data, ctrl->info->size)) < 0)
781 goto out; 843 goto out;
782 v4l2_ctrl->maximum = uvc_get_le_value(data, mapping); 844 v4l2_ctrl->maximum = mapping->get(mapping, GET_MAX, data);
783 } 845 }
784 if (ctrl->info->flags & UVC_CONTROL_GET_RES) { 846 if (ctrl->info->flags & UVC_CONTROL_GET_RES) {
785 if ((ret = uvc_query_ctrl(video->dev, GET_RES, ctrl->entity->id, 847 if ((ret = uvc_query_ctrl(video->dev, GET_RES, ctrl->entity->id,
786 video->dev->intfnum, ctrl->info->selector, 848 video->dev->intfnum, ctrl->info->selector,
787 data, ctrl->info->size)) < 0) 849 data, ctrl->info->size)) < 0)
788 goto out; 850 goto out;
789 v4l2_ctrl->step = uvc_get_le_value(data, mapping); 851 v4l2_ctrl->step = mapping->get(mapping, GET_RES, data);
790 } 852 }
791 853
792 ret = 0; 854 ret = 0;
@@ -923,8 +985,8 @@ int uvc_ctrl_get(struct uvc_video_device *video,
923 ctrl->loaded = 1; 985 ctrl->loaded = 1;
924 } 986 }
925 987
926 xctrl->value = uvc_get_le_value( 988 xctrl->value = mapping->get(mapping, GET_CUR,
927 uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), mapping); 989 uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
928 990
929 if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) { 991 if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
930 menu = mapping->menu_info; 992 menu = mapping->menu_info;
@@ -980,8 +1042,8 @@ int uvc_ctrl_set(struct uvc_video_device *video,
980 ctrl->info->size); 1042 ctrl->info->size);
981 } 1043 }
982 1044
983 uvc_set_le_value(value, 1045 mapping->set(mapping, value,
984 uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), mapping); 1046 uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
985 1047
986 ctrl->dirty = 1; 1048 ctrl->dirty = 1;
987 ctrl->modified = 1; 1049 ctrl->modified = 1;
@@ -1257,6 +1319,11 @@ int uvc_ctrl_add_mapping(struct uvc_control_mapping *mapping)
1257 struct uvc_control_mapping *map; 1319 struct uvc_control_mapping *map;
1258 int ret = -EINVAL; 1320 int ret = -EINVAL;
1259 1321
1322 if (mapping->get == NULL)
1323 mapping->get = uvc_get_le_value;
1324 if (mapping->set == NULL)
1325 mapping->set = uvc_set_le_value;
1326
1260 if (mapping->id & ~V4L2_CTRL_ID_MASK) { 1327 if (mapping->id & ~V4L2_CTRL_ID_MASK) {
1261 uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s' with " 1328 uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s' with "
1262 "invalid control id 0x%08x\n", mapping->name, 1329 "invalid control id 0x%08x\n", mapping->name,
@@ -1306,6 +1373,51 @@ end:
1306} 1373}
1307 1374
1308/* 1375/*
1376 * Prune an entity of its bogus controls. This currently includes processing
1377 * unit auto controls for which no corresponding manual control is available.
1378 * Such auto controls make little sense if any, and are known to crash at
1379 * least the SiGma Micro webcam.
1380 */
1381static void
1382uvc_ctrl_prune_entity(struct uvc_entity *entity)
1383{
1384 static const struct {
1385 u8 idx_manual;
1386 u8 idx_auto;
1387 } blacklist[] = {
1388 { 2, 11 }, /* Hue */
1389 { 6, 12 }, /* White Balance Temperature */
1390 { 7, 13 }, /* White Balance Component */
1391 };
1392
1393 u8 *controls;
1394 unsigned int size;
1395 unsigned int i;
1396
1397 if (UVC_ENTITY_TYPE(entity) != VC_PROCESSING_UNIT)
1398 return;
1399
1400 controls = entity->processing.bmControls;
1401 size = entity->processing.bControlSize;
1402
1403 for (i = 0; i < ARRAY_SIZE(blacklist); ++i) {
1404 if (blacklist[i].idx_auto >= 8 * size ||
1405 blacklist[i].idx_manual >= 8 * size)
1406 continue;
1407
1408 if (!uvc_test_bit(controls, blacklist[i].idx_auto) ||
1409 uvc_test_bit(controls, blacklist[i].idx_manual))
1410 continue;
1411
1412 uvc_trace(UVC_TRACE_CONTROL, "Auto control %u/%u has no "
1413 "matching manual control, removing it.\n", entity->id,
1414 blacklist[i].idx_auto);
1415
1416 uvc_clear_bit(controls, blacklist[i].idx_auto);
1417 }
1418}
1419
1420/*
1309 * Initialize device controls. 1421 * Initialize device controls.
1310 */ 1422 */
1311int uvc_ctrl_init_device(struct uvc_device *dev) 1423int uvc_ctrl_init_device(struct uvc_device *dev)
@@ -1331,6 +1443,9 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
1331 bControlSize = entity->camera.bControlSize; 1443 bControlSize = entity->camera.bControlSize;
1332 } 1444 }
1333 1445
1446 if (dev->quirks & UVC_QUIRK_PRUNE_CONTROLS)
1447 uvc_ctrl_prune_entity(entity);
1448
1334 for (i = 0; i < bControlSize; ++i) 1449 for (i = 0; i < bControlSize; ++i)
1335 ncontrols += hweight8(bmControls[i]); 1450 ncontrols += hweight8(bmControls[i]);
1336 1451
@@ -1345,7 +1460,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
1345 1460
1346 ctrl = entity->controls; 1461 ctrl = entity->controls;
1347 for (i = 0; i < bControlSize * 8; ++i) { 1462 for (i = 0; i < bControlSize * 8; ++i) {
1348 if (uvc_get_bit(bmControls, i) == 0) 1463 if (uvc_test_bit(bmControls, i) == 0)
1349 continue; 1464 continue;
1350 1465
1351 ctrl->entity = entity; 1466 ctrl->entity = entity;
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index d7ad060640bc..89d8bd10a852 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -12,8 +12,8 @@
12 */ 12 */
13 13
14/* 14/*
15 * This driver aims to support video input devices compliant with the 'USB 15 * This driver aims to support video input and ouput devices compliant with the
16 * Video Class' specification. 16 * 'USB Video Class' specification.
17 * 17 *
18 * The driver doesn't support the deprecated v4l1 interface. It implements the 18 * The driver doesn't support the deprecated v4l1 interface. It implements the
19 * mmap capture method only, and doesn't do any image format conversion in 19 * mmap capture method only, and doesn't do any image format conversion in
@@ -32,6 +32,7 @@
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <asm/atomic.h> 34#include <asm/atomic.h>
35#include <asm/unaligned.h>
35 36
36#include <media/v4l2-common.h> 37#include <media/v4l2-common.h>
37 38
@@ -43,6 +44,7 @@
43#define DRIVER_VERSION "v0.1.0" 44#define DRIVER_VERSION "v0.1.0"
44#endif 45#endif
45 46
47unsigned int uvc_no_drop_param;
46static unsigned int uvc_quirks_param; 48static unsigned int uvc_quirks_param;
47unsigned int uvc_trace_param; 49unsigned int uvc_trace_param;
48 50
@@ -288,8 +290,10 @@ static int uvc_parse_format(struct uvc_device *dev,
288 struct uvc_format_desc *fmtdesc; 290 struct uvc_format_desc *fmtdesc;
289 struct uvc_frame *frame; 291 struct uvc_frame *frame;
290 const unsigned char *start = buffer; 292 const unsigned char *start = buffer;
293 unsigned char *_buffer;
291 unsigned int interval; 294 unsigned int interval;
292 unsigned int i, n; 295 unsigned int i, n;
296 int _buflen;
293 __u8 ftype; 297 __u8 ftype;
294 298
295 format->type = buffer[2]; 299 format->type = buffer[2];
@@ -410,12 +414,20 @@ static int uvc_parse_format(struct uvc_device *dev,
410 buflen -= buffer[0]; 414 buflen -= buffer[0];
411 buffer += buffer[0]; 415 buffer += buffer[0];
412 416
417 /* Count the number of frame descriptors to test the bFrameIndex
418 * field when parsing the descriptors. We can't rely on the
419 * bNumFrameDescriptors field as some cameras don't initialize it
420 * properly.
421 */
422 for (_buflen = buflen, _buffer = buffer;
423 _buflen > 2 && _buffer[2] == ftype;
424 _buflen -= _buffer[0], _buffer += _buffer[0])
425 format->nframes++;
426
413 /* Parse the frame descriptors. Only uncompressed, MJPEG and frame 427 /* Parse the frame descriptors. Only uncompressed, MJPEG and frame
414 * based formats have frame descriptors. 428 * based formats have frame descriptors.
415 */ 429 */
416 while (buflen > 2 && buffer[2] == ftype) { 430 while (buflen > 2 && buffer[2] == ftype) {
417 frame = &format->frame[format->nframes];
418
419 if (ftype != VS_FRAME_FRAME_BASED) 431 if (ftype != VS_FRAME_FRAME_BASED)
420 n = buflen > 25 ? buffer[25] : 0; 432 n = buflen > 25 ? buffer[25] : 0;
421 else 433 else
@@ -430,22 +442,32 @@ static int uvc_parse_format(struct uvc_device *dev,
430 return -EINVAL; 442 return -EINVAL;
431 } 443 }
432 444
445 if (buffer[3] - 1 >= format->nframes) {
446 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
447 "interface %d frame index %u out of range\n",
448 dev->udev->devnum, alts->desc.bInterfaceNumber,
449 buffer[3]);
450 return -EINVAL;
451 }
452
453 frame = &format->frame[buffer[3] - 1];
454
433 frame->bFrameIndex = buffer[3]; 455 frame->bFrameIndex = buffer[3];
434 frame->bmCapabilities = buffer[4]; 456 frame->bmCapabilities = buffer[4];
435 frame->wWidth = le16_to_cpup((__le16 *)&buffer[5]); 457 frame->wWidth = get_unaligned_le16(&buffer[5]);
436 frame->wHeight = le16_to_cpup((__le16 *)&buffer[7]); 458 frame->wHeight = get_unaligned_le16(&buffer[7]);
437 frame->dwMinBitRate = le32_to_cpup((__le32 *)&buffer[9]); 459 frame->dwMinBitRate = get_unaligned_le32(&buffer[9]);
438 frame->dwMaxBitRate = le32_to_cpup((__le32 *)&buffer[13]); 460 frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]);
439 if (ftype != VS_FRAME_FRAME_BASED) { 461 if (ftype != VS_FRAME_FRAME_BASED) {
440 frame->dwMaxVideoFrameBufferSize = 462 frame->dwMaxVideoFrameBufferSize =
441 le32_to_cpup((__le32 *)&buffer[17]); 463 get_unaligned_le32(&buffer[17]);
442 frame->dwDefaultFrameInterval = 464 frame->dwDefaultFrameInterval =
443 le32_to_cpup((__le32 *)&buffer[21]); 465 get_unaligned_le32(&buffer[21]);
444 frame->bFrameIntervalType = buffer[25]; 466 frame->bFrameIntervalType = buffer[25];
445 } else { 467 } else {
446 frame->dwMaxVideoFrameBufferSize = 0; 468 frame->dwMaxVideoFrameBufferSize = 0;
447 frame->dwDefaultFrameInterval = 469 frame->dwDefaultFrameInterval =
448 le32_to_cpup((__le32 *)&buffer[17]); 470 get_unaligned_le32(&buffer[17]);
449 frame->bFrameIntervalType = buffer[21]; 471 frame->bFrameIntervalType = buffer[21];
450 } 472 }
451 frame->dwFrameInterval = *intervals; 473 frame->dwFrameInterval = *intervals;
@@ -468,7 +490,7 @@ static int uvc_parse_format(struct uvc_device *dev,
468 * some other divisions by zero which could happen. 490 * some other divisions by zero which could happen.
469 */ 491 */
470 for (i = 0; i < n; ++i) { 492 for (i = 0; i < n; ++i) {
471 interval = le32_to_cpup((__le32 *)&buffer[26+4*i]); 493 interval = get_unaligned_le32(&buffer[26+4*i]);
472 *(*intervals)++ = interval ? interval : 1; 494 *(*intervals)++ = interval ? interval : 1;
473 } 495 }
474 496
@@ -486,7 +508,6 @@ static int uvc_parse_format(struct uvc_device *dev,
486 10000000/frame->dwDefaultFrameInterval, 508 10000000/frame->dwDefaultFrameInterval,
487 (100000000/frame->dwDefaultFrameInterval)%10); 509 (100000000/frame->dwDefaultFrameInterval)%10);
488 510
489 format->nframes++;
490 buflen -= buffer[0]; 511 buflen -= buffer[0];
491 buffer += buffer[0]; 512 buffer += buffer[0];
492 } 513 }
@@ -588,46 +609,55 @@ static int uvc_parse_streaming(struct uvc_device *dev,
588 } 609 }
589 610
590 /* Parse the header descriptor. */ 611 /* Parse the header descriptor. */
591 if (buffer[2] == VS_OUTPUT_HEADER) { 612 switch (buffer[2]) {
613 case VS_OUTPUT_HEADER:
614 streaming->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
615 size = 9;
616 break;
617
618 case VS_INPUT_HEADER:
619 streaming->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
620 size = 13;
621 break;
622
623 default:
592 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming interface " 624 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming interface "
593 "%d OUTPUT HEADER descriptor is not supported.\n", 625 "%d HEADER descriptor not found.\n", dev->udev->devnum,
594 dev->udev->devnum, alts->desc.bInterfaceNumber); 626 alts->desc.bInterfaceNumber);
595 goto error; 627 goto error;
596 } else if (buffer[2] == VS_INPUT_HEADER) { 628 }
597 p = buflen >= 5 ? buffer[3] : 0;
598 n = buflen >= 12 ? buffer[12] : 0;
599 629
600 if (buflen < 13 + p*n || buffer[2] != VS_INPUT_HEADER) { 630 p = buflen >= 4 ? buffer[3] : 0;
601 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming " 631 n = buflen >= size ? buffer[size-1] : 0;
602 "interface %d INPUT HEADER descriptor is " 632
603 "invalid.\n", dev->udev->devnum, 633 if (buflen < size + p*n) {
604 alts->desc.bInterfaceNumber); 634 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
605 goto error; 635 "interface %d HEADER descriptor is invalid.\n",
606 } 636 dev->udev->devnum, alts->desc.bInterfaceNumber);
637 goto error;
638 }
607 639
608 streaming->header.bNumFormats = p; 640 streaming->header.bNumFormats = p;
609 streaming->header.bEndpointAddress = buffer[6]; 641 streaming->header.bEndpointAddress = buffer[6];
642 if (buffer[2] == VS_INPUT_HEADER) {
610 streaming->header.bmInfo = buffer[7]; 643 streaming->header.bmInfo = buffer[7];
611 streaming->header.bTerminalLink = buffer[8]; 644 streaming->header.bTerminalLink = buffer[8];
612 streaming->header.bStillCaptureMethod = buffer[9]; 645 streaming->header.bStillCaptureMethod = buffer[9];
613 streaming->header.bTriggerSupport = buffer[10]; 646 streaming->header.bTriggerSupport = buffer[10];
614 streaming->header.bTriggerUsage = buffer[11]; 647 streaming->header.bTriggerUsage = buffer[11];
615 streaming->header.bControlSize = n;
616
617 streaming->header.bmaControls = kmalloc(p*n, GFP_KERNEL);
618 if (streaming->header.bmaControls == NULL) {
619 ret = -ENOMEM;
620 goto error;
621 }
622
623 memcpy(streaming->header.bmaControls, &buffer[13], p*n);
624 } else { 648 } else {
625 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming interface " 649 streaming->header.bTerminalLink = buffer[7];
626 "%d HEADER descriptor not found.\n", dev->udev->devnum, 650 }
627 alts->desc.bInterfaceNumber); 651 streaming->header.bControlSize = n;
652
653 streaming->header.bmaControls = kmalloc(p*n, GFP_KERNEL);
654 if (streaming->header.bmaControls == NULL) {
655 ret = -ENOMEM;
628 goto error; 656 goto error;
629 } 657 }
630 658
659 memcpy(streaming->header.bmaControls, &buffer[size], p*n);
660
631 buflen -= buffer[0]; 661 buflen -= buffer[0];
632 buffer += buffer[0]; 662 buffer += buffer[0];
633 663
@@ -813,8 +843,7 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
813 unit->type = VC_EXTENSION_UNIT; 843 unit->type = VC_EXTENSION_UNIT;
814 memcpy(unit->extension.guidExtensionCode, &buffer[4], 16); 844 memcpy(unit->extension.guidExtensionCode, &buffer[4], 16);
815 unit->extension.bNumControls = buffer[20]; 845 unit->extension.bNumControls = buffer[20];
816 unit->extension.bNrInPins = 846 unit->extension.bNrInPins = get_unaligned_le16(&buffer[21]);
817 le16_to_cpup((__le16 *)&buffer[21]);
818 unit->extension.baSourceID = (__u8 *)unit + sizeof *unit; 847 unit->extension.baSourceID = (__u8 *)unit + sizeof *unit;
819 memcpy(unit->extension.baSourceID, &buffer[22], p); 848 memcpy(unit->extension.baSourceID, &buffer[22], p);
820 unit->extension.bControlSize = buffer[22+p]; 849 unit->extension.bControlSize = buffer[22+p];
@@ -858,8 +887,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
858 return -EINVAL; 887 return -EINVAL;
859 } 888 }
860 889
861 dev->uvc_version = le16_to_cpup((__le16 *)&buffer[3]); 890 dev->uvc_version = get_unaligned_le16(&buffer[3]);
862 dev->clock_frequency = le32_to_cpup((__le32 *)&buffer[7]); 891 dev->clock_frequency = get_unaligned_le32(&buffer[7]);
863 892
864 /* Parse all USB Video Streaming interfaces. */ 893 /* Parse all USB Video Streaming interfaces. */
865 for (i = 0; i < n; ++i) { 894 for (i = 0; i < n; ++i) {
@@ -886,7 +915,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
886 /* Make sure the terminal type MSB is not null, otherwise it 915 /* Make sure the terminal type MSB is not null, otherwise it
887 * could be confused with a unit. 916 * could be confused with a unit.
888 */ 917 */
889 type = le16_to_cpup((__le16 *)&buffer[4]); 918 type = get_unaligned_le16(&buffer[4]);
890 if ((type & 0xff00) == 0) { 919 if ((type & 0xff00) == 0) {
891 uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol " 920 uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
892 "interface %d INPUT_TERMINAL %d has invalid " 921 "interface %d INPUT_TERMINAL %d has invalid "
@@ -928,11 +957,11 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
928 term->camera.bControlSize = n; 957 term->camera.bControlSize = n;
929 term->camera.bmControls = (__u8 *)term + sizeof *term; 958 term->camera.bmControls = (__u8 *)term + sizeof *term;
930 term->camera.wObjectiveFocalLengthMin = 959 term->camera.wObjectiveFocalLengthMin =
931 le16_to_cpup((__le16 *)&buffer[8]); 960 get_unaligned_le16(&buffer[8]);
932 term->camera.wObjectiveFocalLengthMax = 961 term->camera.wObjectiveFocalLengthMax =
933 le16_to_cpup((__le16 *)&buffer[10]); 962 get_unaligned_le16(&buffer[10]);
934 term->camera.wOcularFocalLength = 963 term->camera.wOcularFocalLength =
935 le16_to_cpup((__le16 *)&buffer[12]); 964 get_unaligned_le16(&buffer[12]);
936 memcpy(term->camera.bmControls, &buffer[15], n); 965 memcpy(term->camera.bmControls, &buffer[15], n);
937 } else if (UVC_ENTITY_TYPE(term) == ITT_MEDIA_TRANSPORT_INPUT) { 966 } else if (UVC_ENTITY_TYPE(term) == ITT_MEDIA_TRANSPORT_INPUT) {
938 term->media.bControlSize = n; 967 term->media.bControlSize = n;
@@ -968,7 +997,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
968 /* Make sure the terminal type MSB is not null, otherwise it 997 /* Make sure the terminal type MSB is not null, otherwise it
969 * could be confused with a unit. 998 * could be confused with a unit.
970 */ 999 */
971 type = le16_to_cpup((__le16 *)&buffer[4]); 1000 type = get_unaligned_le16(&buffer[4]);
972 if ((type & 0xff00) == 0) { 1001 if ((type & 0xff00) == 0) {
973 uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol " 1002 uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
974 "interface %d OUTPUT_TERMINAL %d has invalid " 1003 "interface %d OUTPUT_TERMINAL %d has invalid "
@@ -1042,7 +1071,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
1042 unit->type = buffer[2]; 1071 unit->type = buffer[2];
1043 unit->processing.bSourceID = buffer[4]; 1072 unit->processing.bSourceID = buffer[4];
1044 unit->processing.wMaxMultiplier = 1073 unit->processing.wMaxMultiplier =
1045 le16_to_cpup((__le16 *)&buffer[5]); 1074 get_unaligned_le16(&buffer[5]);
1046 unit->processing.bControlSize = buffer[7]; 1075 unit->processing.bControlSize = buffer[7];
1047 unit->processing.bmControls = (__u8 *)unit + sizeof *unit; 1076 unit->processing.bmControls = (__u8 *)unit + sizeof *unit;
1048 memcpy(unit->processing.bmControls, &buffer[8], n); 1077 memcpy(unit->processing.bmControls, &buffer[8], n);
@@ -1077,8 +1106,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
1077 unit->type = buffer[2]; 1106 unit->type = buffer[2];
1078 memcpy(unit->extension.guidExtensionCode, &buffer[4], 16); 1107 memcpy(unit->extension.guidExtensionCode, &buffer[4], 16);
1079 unit->extension.bNumControls = buffer[20]; 1108 unit->extension.bNumControls = buffer[20];
1080 unit->extension.bNrInPins = 1109 unit->extension.bNrInPins = get_unaligned_le16(&buffer[21]);
1081 le16_to_cpup((__le16 *)&buffer[21]);
1082 unit->extension.baSourceID = (__u8 *)unit + sizeof *unit; 1110 unit->extension.baSourceID = (__u8 *)unit + sizeof *unit;
1083 memcpy(unit->extension.baSourceID, &buffer[22], p); 1111 memcpy(unit->extension.baSourceID, &buffer[22], p);
1084 unit->extension.bControlSize = buffer[22+p]; 1112 unit->extension.bControlSize = buffer[22+p];
@@ -1128,8 +1156,13 @@ next_descriptor:
1128 buffer += buffer[0]; 1156 buffer += buffer[0];
1129 } 1157 }
1130 1158
1131 /* Check if the optional status endpoint is present. */ 1159 /* Check if the optional status endpoint is present. Built-in iSight
1132 if (alts->desc.bNumEndpoints == 1) { 1160 * webcams have an interrupt endpoint but spit proprietary data that
1161 * don't conform to the UVC status endpoint messages. Don't try to
1162 * handle the interrupt endpoint for those cameras.
1163 */
1164 if (alts->desc.bNumEndpoints == 1 &&
1165 !(dev->quirks & UVC_QUIRK_BUILTIN_ISIGHT)) {
1133 struct usb_host_endpoint *ep = &alts->endpoint[0]; 1166 struct usb_host_endpoint *ep = &alts->endpoint[0];
1134 struct usb_endpoint_descriptor *desc = &ep->desc; 1167 struct usb_endpoint_descriptor *desc = &ep->desc;
1135 1168
@@ -1234,6 +1267,26 @@ static int uvc_scan_chain_entity(struct uvc_video_device *video,
1234 list_add_tail(&entity->chain, &video->iterms); 1267 list_add_tail(&entity->chain, &video->iterms);
1235 break; 1268 break;
1236 1269
1270 case TT_STREAMING:
1271 if (uvc_trace_param & UVC_TRACE_PROBE)
1272 printk(" <- IT %d\n", entity->id);
1273
1274 if (!UVC_ENTITY_IS_ITERM(entity)) {
1275 uvc_trace(UVC_TRACE_DESCR, "Unsupported input "
1276 "terminal %u.\n", entity->id);
1277 return -1;
1278 }
1279
1280 if (video->sterm != NULL) {
1281 uvc_trace(UVC_TRACE_DESCR, "Found multiple streaming "
1282 "entities in chain.\n");
1283 return -1;
1284 }
1285
1286 list_add_tail(&entity->chain, &video->iterms);
1287 video->sterm = entity;
1288 break;
1289
1237 default: 1290 default:
1238 uvc_trace(UVC_TRACE_DESCR, "Unsupported entity type " 1291 uvc_trace(UVC_TRACE_DESCR, "Unsupported entity type "
1239 "0x%04x found in chain.\n", UVC_ENTITY_TYPE(entity)); 1292 "0x%04x found in chain.\n", UVC_ENTITY_TYPE(entity));
@@ -1344,6 +1397,10 @@ static int uvc_scan_chain(struct uvc_video_device *video)
1344 1397
1345 entity = video->oterm; 1398 entity = video->oterm;
1346 uvc_trace(UVC_TRACE_PROBE, "Scanning UVC chain: OT %d", entity->id); 1399 uvc_trace(UVC_TRACE_PROBE, "Scanning UVC chain: OT %d", entity->id);
1400
1401 if (UVC_ENTITY_TYPE(entity) == TT_STREAMING)
1402 video->sterm = entity;
1403
1347 id = entity->output.bSourceID; 1404 id = entity->output.bSourceID;
1348 while (id != 0) { 1405 while (id != 0) {
1349 prev = entity; 1406 prev = entity;
@@ -1372,8 +1429,11 @@ static int uvc_scan_chain(struct uvc_video_device *video)
1372 return id; 1429 return id;
1373 } 1430 }
1374 1431
1375 /* Initialize the video buffers queue. */ 1432 if (video->sterm == NULL) {
1376 uvc_queue_init(&video->queue); 1433 uvc_trace(UVC_TRACE_DESCR, "No streaming entity found in "
1434 "chain.\n");
1435 return -1;
1436 }
1377 1437
1378 return 0; 1438 return 0;
1379} 1439}
@@ -1384,7 +1444,8 @@ static int uvc_scan_chain(struct uvc_video_device *video)
1384 * The driver currently supports a single video device per control interface 1444 * The driver currently supports a single video device per control interface
1385 * only. The terminal and units must match the following structure: 1445 * only. The terminal and units must match the following structure:
1386 * 1446 *
1387 * ITT_CAMERA -> VC_PROCESSING_UNIT -> VC_EXTENSION_UNIT{0,n} -> TT_STREAMING 1447 * ITT_* -> VC_PROCESSING_UNIT -> VC_EXTENSION_UNIT{0,n} -> TT_STREAMING
1448 * TT_STREAMING -> VC_PROCESSING_UNIT -> VC_EXTENSION_UNIT{0,n} -> OTT_*
1388 * 1449 *
1389 * The Extension Units, if present, must have a single input pin. The 1450 * The Extension Units, if present, must have a single input pin. The
1390 * Processing Unit and Extension Units can be in any order. Additional 1451 * Processing Unit and Extension Units can be in any order. Additional
@@ -1401,7 +1462,7 @@ static int uvc_register_video(struct uvc_device *dev)
1401 list_for_each_entry(term, &dev->entities, list) { 1462 list_for_each_entry(term, &dev->entities, list) {
1402 struct uvc_streaming *streaming; 1463 struct uvc_streaming *streaming;
1403 1464
1404 if (UVC_ENTITY_TYPE(term) != TT_STREAMING) 1465 if (!UVC_ENTITY_IS_TERM(term) || !UVC_ENTITY_IS_OTERM(term))
1405 continue; 1466 continue;
1406 1467
1407 memset(&dev->video, 0, sizeof dev->video); 1468 memset(&dev->video, 0, sizeof dev->video);
@@ -1414,7 +1475,8 @@ static int uvc_register_video(struct uvc_device *dev)
1414 continue; 1475 continue;
1415 1476
1416 list_for_each_entry(streaming, &dev->streaming, list) { 1477 list_for_each_entry(streaming, &dev->streaming, list) {
1417 if (streaming->header.bTerminalLink == term->id) { 1478 if (streaming->header.bTerminalLink ==
1479 dev->video.sterm->id) {
1418 dev->video.streaming = streaming; 1480 dev->video.streaming = streaming;
1419 found = 1; 1481 found = 1;
1420 break; 1482 break;
@@ -1440,6 +1502,9 @@ static int uvc_register_video(struct uvc_device *dev)
1440 printk(" -> %d).\n", dev->video.oterm->id); 1502 printk(" -> %d).\n", dev->video.oterm->id);
1441 } 1503 }
1442 1504
1505 /* Initialize the video buffers queue. */
1506 uvc_queue_init(&dev->video.queue, dev->video.streaming->type);
1507
1443 /* Initialize the streaming interface with default streaming 1508 /* Initialize the streaming interface with default streaming
1444 * parameters. 1509 * parameters.
1445 */ 1510 */
@@ -1707,24 +1772,6 @@ static int uvc_reset_resume(struct usb_interface *intf)
1707 * though they are compliant. 1772 * though they are compliant.
1708 */ 1773 */
1709static struct usb_device_id uvc_ids[] = { 1774static struct usb_device_id uvc_ids[] = {
1710 /* ALi M5606 (Clevo M540SR) */
1711 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1712 | USB_DEVICE_ID_MATCH_INT_INFO,
1713 .idVendor = 0x0402,
1714 .idProduct = 0x5606,
1715 .bInterfaceClass = USB_CLASS_VIDEO,
1716 .bInterfaceSubClass = 1,
1717 .bInterfaceProtocol = 0,
1718 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1719 /* Creative Live! Optia */
1720 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1721 | USB_DEVICE_ID_MATCH_INT_INFO,
1722 .idVendor = 0x041e,
1723 .idProduct = 0x4057,
1724 .bInterfaceClass = USB_CLASS_VIDEO,
1725 .bInterfaceSubClass = 1,
1726 .bInterfaceProtocol = 0,
1727 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1728 /* Microsoft Lifecam NX-6000 */ 1775 /* Microsoft Lifecam NX-6000 */
1729 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 1776 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1730 | USB_DEVICE_ID_MATCH_INT_INFO, 1777 | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -1810,15 +1857,6 @@ static struct usb_device_id uvc_ids[] = {
1810 .bInterfaceSubClass = 1, 1857 .bInterfaceSubClass = 1,
1811 .bInterfaceProtocol = 0, 1858 .bInterfaceProtocol = 0,
1812 .driver_info = UVC_QUIRK_STREAM_NO_FID }, 1859 .driver_info = UVC_QUIRK_STREAM_NO_FID },
1813 /* Silicon Motion SM371 */
1814 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1815 | USB_DEVICE_ID_MATCH_INT_INFO,
1816 .idVendor = 0x090c,
1817 .idProduct = 0xb371,
1818 .bInterfaceClass = USB_CLASS_VIDEO,
1819 .bInterfaceSubClass = 1,
1820 .bInterfaceProtocol = 0,
1821 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1822 /* MT6227 */ 1860 /* MT6227 */
1823 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 1861 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1824 | USB_DEVICE_ID_MATCH_INT_INFO, 1862 | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -1837,6 +1875,15 @@ static struct usb_device_id uvc_ids[] = {
1837 .bInterfaceSubClass = 1, 1875 .bInterfaceSubClass = 1,
1838 .bInterfaceProtocol = 0, 1876 .bInterfaceProtocol = 0,
1839 .driver_info = UVC_QUIRK_STREAM_NO_FID }, 1877 .driver_info = UVC_QUIRK_STREAM_NO_FID },
1878 /* Syntek (Samsung Q310) */
1879 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1880 | USB_DEVICE_ID_MATCH_INT_INFO,
1881 .idVendor = 0x174f,
1882 .idProduct = 0x5931,
1883 .bInterfaceClass = USB_CLASS_VIDEO,
1884 .bInterfaceSubClass = 1,
1885 .bInterfaceProtocol = 0,
1886 .driver_info = UVC_QUIRK_STREAM_NO_FID },
1840 /* Asus F9SG */ 1887 /* Asus F9SG */
1841 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 1888 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1842 | USB_DEVICE_ID_MATCH_INT_INFO, 1889 | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -1855,6 +1902,15 @@ static struct usb_device_id uvc_ids[] = {
1855 .bInterfaceSubClass = 1, 1902 .bInterfaceSubClass = 1,
1856 .bInterfaceProtocol = 0, 1903 .bInterfaceProtocol = 0,
1857 .driver_info = UVC_QUIRK_STREAM_NO_FID }, 1904 .driver_info = UVC_QUIRK_STREAM_NO_FID },
1905 /* Lenovo Thinkpad SL500 */
1906 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1907 | USB_DEVICE_ID_MATCH_INT_INFO,
1908 .idVendor = 0x17ef,
1909 .idProduct = 0x480b,
1910 .bInterfaceClass = USB_CLASS_VIDEO,
1911 .bInterfaceSubClass = 1,
1912 .bInterfaceProtocol = 0,
1913 .driver_info = UVC_QUIRK_STREAM_NO_FID },
1858 /* Ecamm Pico iMage */ 1914 /* Ecamm Pico iMage */
1859 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 1915 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1860 | USB_DEVICE_ID_MATCH_INT_INFO, 1916 | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -1884,106 +1940,8 @@ static struct usb_device_id uvc_ids[] = {
1884 .bInterfaceSubClass = 1, 1940 .bInterfaceSubClass = 1,
1885 .bInterfaceProtocol = 0, 1941 .bInterfaceProtocol = 0,
1886 .driver_info = UVC_QUIRK_PROBE_MINMAX 1942 .driver_info = UVC_QUIRK_PROBE_MINMAX
1887 | UVC_QUIRK_IGNORE_SELECTOR_UNIT}, 1943 | UVC_QUIRK_IGNORE_SELECTOR_UNIT
1888 /* Acer OEM Webcam - Unknown vendor */ 1944 | UVC_QUIRK_PRUNE_CONTROLS },
1889 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1890 | USB_DEVICE_ID_MATCH_INT_INFO,
1891 .idVendor = 0x5986,
1892 .idProduct = 0x0100,
1893 .bInterfaceClass = USB_CLASS_VIDEO,
1894 .bInterfaceSubClass = 1,
1895 .bInterfaceProtocol = 0,
1896 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1897 /* Packard Bell OEM Webcam - Bison Electronics */
1898 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1899 | USB_DEVICE_ID_MATCH_INT_INFO,
1900 .idVendor = 0x5986,
1901 .idProduct = 0x0101,
1902 .bInterfaceClass = USB_CLASS_VIDEO,
1903 .bInterfaceSubClass = 1,
1904 .bInterfaceProtocol = 0,
1905 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1906 /* Acer Crystal Eye webcam - Bison Electronics */
1907 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1908 | USB_DEVICE_ID_MATCH_INT_INFO,
1909 .idVendor = 0x5986,
1910 .idProduct = 0x0102,
1911 .bInterfaceClass = USB_CLASS_VIDEO,
1912 .bInterfaceSubClass = 1,
1913 .bInterfaceProtocol = 0,
1914 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1915 /* Compaq Presario B1200 - Bison Electronics */
1916 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1917 | USB_DEVICE_ID_MATCH_INT_INFO,
1918 .idVendor = 0x5986,
1919 .idProduct = 0x0104,
1920 .bInterfaceClass = USB_CLASS_VIDEO,
1921 .bInterfaceSubClass = 1,
1922 .bInterfaceProtocol = 0,
1923 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1924 /* Acer Travelmate 7720 - Bison Electronics */
1925 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1926 | USB_DEVICE_ID_MATCH_INT_INFO,
1927 .idVendor = 0x5986,
1928 .idProduct = 0x0105,
1929 .bInterfaceClass = USB_CLASS_VIDEO,
1930 .bInterfaceSubClass = 1,
1931 .bInterfaceProtocol = 0,
1932 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1933 /* Medion Akoya Mini E1210 - Bison Electronics */
1934 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1935 | USB_DEVICE_ID_MATCH_INT_INFO,
1936 .idVendor = 0x5986,
1937 .idProduct = 0x0141,
1938 .bInterfaceClass = USB_CLASS_VIDEO,
1939 .bInterfaceSubClass = 1,
1940 .bInterfaceProtocol = 0,
1941 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1942 /* Acer OrbiCam - Bison Electronics */
1943 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1944 | USB_DEVICE_ID_MATCH_INT_INFO,
1945 .idVendor = 0x5986,
1946 .idProduct = 0x0200,
1947 .bInterfaceClass = USB_CLASS_VIDEO,
1948 .bInterfaceSubClass = 1,
1949 .bInterfaceProtocol = 0,
1950 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1951 /* Fujitsu Amilo SI2636 - Bison Electronics */
1952 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1953 | USB_DEVICE_ID_MATCH_INT_INFO,
1954 .idVendor = 0x5986,
1955 .idProduct = 0x0202,
1956 .bInterfaceClass = USB_CLASS_VIDEO,
1957 .bInterfaceSubClass = 1,
1958 .bInterfaceProtocol = 0,
1959 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1960 /* Advent 4211 - Bison Electronics */
1961 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1962 | USB_DEVICE_ID_MATCH_INT_INFO,
1963 .idVendor = 0x5986,
1964 .idProduct = 0x0203,
1965 .bInterfaceClass = USB_CLASS_VIDEO,
1966 .bInterfaceSubClass = 1,
1967 .bInterfaceProtocol = 0,
1968 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1969 /* Bison Electronics */
1970 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1971 | USB_DEVICE_ID_MATCH_INT_INFO,
1972 .idVendor = 0x5986,
1973 .idProduct = 0x0300,
1974 .bInterfaceClass = USB_CLASS_VIDEO,
1975 .bInterfaceSubClass = 1,
1976 .bInterfaceProtocol = 0,
1977 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1978 /* Clevo M570TU - Bison Electronics */
1979 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1980 | USB_DEVICE_ID_MATCH_INT_INFO,
1981 .idVendor = 0x5986,
1982 .idProduct = 0x0303,
1983 .bInterfaceClass = USB_CLASS_VIDEO,
1984 .bInterfaceSubClass = 1,
1985 .bInterfaceProtocol = 0,
1986 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1987 /* Generic USB Video Class */ 1945 /* Generic USB Video Class */
1988 { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) }, 1946 { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) },
1989 {} 1947 {}
@@ -2029,6 +1987,8 @@ static void __exit uvc_cleanup(void)
2029module_init(uvc_init); 1987module_init(uvc_init);
2030module_exit(uvc_cleanup); 1988module_exit(uvc_cleanup);
2031 1989
1990module_param_named(nodrop, uvc_no_drop_param, uint, S_IRUGO|S_IWUSR);
1991MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames");
2032module_param_named(quirks, uvc_quirks_param, uint, S_IRUGO|S_IWUSR); 1992module_param_named(quirks, uvc_quirks_param, uint, S_IRUGO|S_IWUSR);
2033MODULE_PARM_DESC(quirks, "Forced device quirks"); 1993MODULE_PARM_DESC(quirks, "Forced device quirks");
2034module_param_named(trace, uvc_trace_param, uint, S_IRUGO|S_IWUSR); 1994module_param_named(trace, uvc_trace_param, uint, S_IRUGO|S_IWUSR);
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 5646a6a32939..42546342e97d 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -79,12 +79,13 @@
79 * 79 *
80 */ 80 */
81 81
82void uvc_queue_init(struct uvc_video_queue *queue) 82void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
83{ 83{
84 mutex_init(&queue->mutex); 84 mutex_init(&queue->mutex);
85 spin_lock_init(&queue->irqlock); 85 spin_lock_init(&queue->irqlock);
86 INIT_LIST_HEAD(&queue->mainqueue); 86 INIT_LIST_HEAD(&queue->mainqueue);
87 INIT_LIST_HEAD(&queue->irqqueue); 87 INIT_LIST_HEAD(&queue->irqqueue);
88 queue->type = type;
88} 89}
89 90
90/* 91/*
@@ -132,7 +133,7 @@ int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
132 queue->buffer[i].buf.index = i; 133 queue->buffer[i].buf.index = i;
133 queue->buffer[i].buf.m.offset = i * bufsize; 134 queue->buffer[i].buf.m.offset = i * bufsize;
134 queue->buffer[i].buf.length = buflength; 135 queue->buffer[i].buf.length = buflength;
135 queue->buffer[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 136 queue->buffer[i].buf.type = queue->type;
136 queue->buffer[i].buf.sequence = 0; 137 queue->buffer[i].buf.sequence = 0;
137 queue->buffer[i].buf.field = V4L2_FIELD_NONE; 138 queue->buffer[i].buf.field = V4L2_FIELD_NONE;
138 queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP; 139 queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
@@ -226,7 +227,7 @@ int uvc_queue_buffer(struct uvc_video_queue *queue,
226 227
227 uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index); 228 uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);
228 229
229 if (v4l2_buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || 230 if (v4l2_buf->type != queue->type ||
230 v4l2_buf->memory != V4L2_MEMORY_MMAP) { 231 v4l2_buf->memory != V4L2_MEMORY_MMAP) {
231 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) " 232 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
232 "and/or memory (%u).\n", v4l2_buf->type, 233 "and/or memory (%u).\n", v4l2_buf->type,
@@ -249,6 +250,13 @@ int uvc_queue_buffer(struct uvc_video_queue *queue,
249 goto done; 250 goto done;
250 } 251 }
251 252
253 if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
254 v4l2_buf->bytesused > buf->buf.length) {
255 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
256 ret = -EINVAL;
257 goto done;
258 }
259
252 spin_lock_irqsave(&queue->irqlock, flags); 260 spin_lock_irqsave(&queue->irqlock, flags);
253 if (queue->flags & UVC_QUEUE_DISCONNECTED) { 261 if (queue->flags & UVC_QUEUE_DISCONNECTED) {
254 spin_unlock_irqrestore(&queue->irqlock, flags); 262 spin_unlock_irqrestore(&queue->irqlock, flags);
@@ -256,7 +264,11 @@ int uvc_queue_buffer(struct uvc_video_queue *queue,
256 goto done; 264 goto done;
257 } 265 }
258 buf->state = UVC_BUF_STATE_QUEUED; 266 buf->state = UVC_BUF_STATE_QUEUED;
259 buf->buf.bytesused = 0; 267 if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
268 buf->buf.bytesused = 0;
269 else
270 buf->buf.bytesused = v4l2_buf->bytesused;
271
260 list_add_tail(&buf->stream, &queue->mainqueue); 272 list_add_tail(&buf->stream, &queue->mainqueue);
261 list_add_tail(&buf->queue, &queue->irqqueue); 273 list_add_tail(&buf->queue, &queue->irqqueue);
262 spin_unlock_irqrestore(&queue->irqlock, flags); 274 spin_unlock_irqrestore(&queue->irqlock, flags);
@@ -289,7 +301,7 @@ int uvc_dequeue_buffer(struct uvc_video_queue *queue,
289 struct uvc_buffer *buf; 301 struct uvc_buffer *buf;
290 int ret = 0; 302 int ret = 0;
291 303
292 if (v4l2_buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || 304 if (v4l2_buf->type != queue->type ||
293 v4l2_buf->memory != V4L2_MEMORY_MMAP) { 305 v4l2_buf->memory != V4L2_MEMORY_MMAP) {
294 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) " 306 uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
295 "and/or memory (%u).\n", v4l2_buf->type, 307 "and/or memory (%u).\n", v4l2_buf->type,
@@ -397,6 +409,7 @@ int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
397 } 409 }
398 queue->sequence = 0; 410 queue->sequence = 0;
399 queue->flags |= UVC_QUEUE_STREAMING; 411 queue->flags |= UVC_QUEUE_STREAMING;
412 queue->buf_used = 0;
400 } else { 413 } else {
401 uvc_queue_cancel(queue, 0); 414 uvc_queue_cancel(queue, 0);
402 INIT_LIST_HEAD(&queue->mainqueue); 415 INIT_LIST_HEAD(&queue->mainqueue);
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 758dfefaba8d..afcc6934559e 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -110,7 +110,7 @@ static int uvc_v4l2_try_format(struct uvc_video_device *video,
110 int ret = 0; 110 int ret = 0;
111 __u8 *fcc; 111 __u8 *fcc;
112 112
113 if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 113 if (fmt->type != video->streaming->type)
114 return -EINVAL; 114 return -EINVAL;
115 115
116 fcc = (__u8 *)&fmt->fmt.pix.pixelformat; 116 fcc = (__u8 *)&fmt->fmt.pix.pixelformat;
@@ -216,7 +216,7 @@ static int uvc_v4l2_get_format(struct uvc_video_device *video,
216 struct uvc_format *format = video->streaming->cur_format; 216 struct uvc_format *format = video->streaming->cur_format;
217 struct uvc_frame *frame = video->streaming->cur_frame; 217 struct uvc_frame *frame = video->streaming->cur_frame;
218 218
219 if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 219 if (fmt->type != video->streaming->type)
220 return -EINVAL; 220 return -EINVAL;
221 221
222 if (format == NULL || frame == NULL) 222 if (format == NULL || frame == NULL)
@@ -242,7 +242,7 @@ static int uvc_v4l2_set_format(struct uvc_video_device *video,
242 struct uvc_frame *frame; 242 struct uvc_frame *frame;
243 int ret; 243 int ret;
244 244
245 if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 245 if (fmt->type != video->streaming->type)
246 return -EINVAL; 246 return -EINVAL;
247 247
248 if (uvc_queue_streaming(&video->queue)) 248 if (uvc_queue_streaming(&video->queue))
@@ -252,9 +252,6 @@ static int uvc_v4l2_set_format(struct uvc_video_device *video,
252 if (ret < 0) 252 if (ret < 0)
253 return ret; 253 return ret;
254 254
255 if ((ret = uvc_set_video_ctrl(video, &probe, 0)) < 0)
256 return ret;
257
258 memcpy(&video->streaming->ctrl, &probe, sizeof probe); 255 memcpy(&video->streaming->ctrl, &probe, sizeof probe);
259 video->streaming->cur_format = format; 256 video->streaming->cur_format = format;
260 video->streaming->cur_frame = frame; 257 video->streaming->cur_frame = frame;
@@ -267,7 +264,7 @@ static int uvc_v4l2_get_streamparm(struct uvc_video_device *video,
267{ 264{
268 uint32_t numerator, denominator; 265 uint32_t numerator, denominator;
269 266
270 if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 267 if (parm->type != video->streaming->type)
271 return -EINVAL; 268 return -EINVAL;
272 269
273 numerator = video->streaming->ctrl.dwFrameInterval; 270 numerator = video->streaming->ctrl.dwFrameInterval;
@@ -275,13 +272,21 @@ static int uvc_v4l2_get_streamparm(struct uvc_video_device *video,
275 uvc_simplify_fraction(&numerator, &denominator, 8, 333); 272 uvc_simplify_fraction(&numerator, &denominator, 8, 333);
276 273
277 memset(parm, 0, sizeof *parm); 274 memset(parm, 0, sizeof *parm);
278 parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 275 parm->type = video->streaming->type;
279 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 276
280 parm->parm.capture.capturemode = 0; 277 if (video->streaming->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
281 parm->parm.capture.timeperframe.numerator = numerator; 278 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
282 parm->parm.capture.timeperframe.denominator = denominator; 279 parm->parm.capture.capturemode = 0;
283 parm->parm.capture.extendedmode = 0; 280 parm->parm.capture.timeperframe.numerator = numerator;
284 parm->parm.capture.readbuffers = 0; 281 parm->parm.capture.timeperframe.denominator = denominator;
282 parm->parm.capture.extendedmode = 0;
283 parm->parm.capture.readbuffers = 0;
284 } else {
285 parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
286 parm->parm.output.outputmode = 0;
287 parm->parm.output.timeperframe.numerator = numerator;
288 parm->parm.output.timeperframe.denominator = denominator;
289 }
285 290
286 return 0; 291 return 0;
287} 292}
@@ -291,42 +296,45 @@ static int uvc_v4l2_set_streamparm(struct uvc_video_device *video,
291{ 296{
292 struct uvc_frame *frame = video->streaming->cur_frame; 297 struct uvc_frame *frame = video->streaming->cur_frame;
293 struct uvc_streaming_control probe; 298 struct uvc_streaming_control probe;
299 struct v4l2_fract timeperframe;
294 uint32_t interval; 300 uint32_t interval;
295 int ret; 301 int ret;
296 302
297 if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 303 if (parm->type != video->streaming->type)
298 return -EINVAL; 304 return -EINVAL;
299 305
300 if (uvc_queue_streaming(&video->queue)) 306 if (uvc_queue_streaming(&video->queue))
301 return -EBUSY; 307 return -EBUSY;
302 308
309 if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
310 timeperframe = parm->parm.capture.timeperframe;
311 else
312 timeperframe = parm->parm.output.timeperframe;
313
303 memcpy(&probe, &video->streaming->ctrl, sizeof probe); 314 memcpy(&probe, &video->streaming->ctrl, sizeof probe);
304 interval = uvc_fraction_to_interval( 315 interval = uvc_fraction_to_interval(timeperframe.numerator,
305 parm->parm.capture.timeperframe.numerator, 316 timeperframe.denominator);
306 parm->parm.capture.timeperframe.denominator);
307 317
308 uvc_trace(UVC_TRACE_FORMAT, "Setting frame interval to %u/%u (%u).\n", 318 uvc_trace(UVC_TRACE_FORMAT, "Setting frame interval to %u/%u (%u).\n",
309 parm->parm.capture.timeperframe.numerator, 319 timeperframe.numerator, timeperframe.denominator, interval);
310 parm->parm.capture.timeperframe.denominator,
311 interval);
312 probe.dwFrameInterval = uvc_try_frame_interval(frame, interval); 320 probe.dwFrameInterval = uvc_try_frame_interval(frame, interval);
313 321
314 /* Probe the device with the new settings. */ 322 /* Probe the device with the new settings. */
315 if ((ret = uvc_probe_video(video, &probe)) < 0) 323 if ((ret = uvc_probe_video(video, &probe)) < 0)
316 return ret; 324 return ret;
317 325
318 /* Commit the new settings. */
319 if ((ret = uvc_set_video_ctrl(video, &probe, 0)) < 0)
320 return ret;
321
322 memcpy(&video->streaming->ctrl, &probe, sizeof probe); 326 memcpy(&video->streaming->ctrl, &probe, sizeof probe);
323 327
324 /* Return the actual frame period. */ 328 /* Return the actual frame period. */
325 parm->parm.capture.timeperframe.numerator = probe.dwFrameInterval; 329 timeperframe.numerator = probe.dwFrameInterval;
326 parm->parm.capture.timeperframe.denominator = 10000000; 330 timeperframe.denominator = 10000000;
327 uvc_simplify_fraction(&parm->parm.capture.timeperframe.numerator, 331 uvc_simplify_fraction(&timeperframe.numerator,
328 &parm->parm.capture.timeperframe.denominator, 332 &timeperframe.denominator, 8, 333);
329 8, 333); 333
334 if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
335 parm->parm.capture.timeperframe = timeperframe;
336 else
337 parm->parm.output.timeperframe = timeperframe;
330 338
331 return 0; 339 return 0;
332} 340}
@@ -464,17 +472,13 @@ static int uvc_v4l2_release(struct inode *inode, struct file *file)
464 return 0; 472 return 0;
465} 473}
466 474
467static int __uvc_v4l2_do_ioctl(struct file *file, 475static int uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
468 unsigned int cmd, void *arg)
469{ 476{
470 struct video_device *vdev = video_devdata(file); 477 struct video_device *vdev = video_devdata(file);
471 struct uvc_video_device *video = video_get_drvdata(vdev); 478 struct uvc_video_device *video = video_get_drvdata(vdev);
472 struct uvc_fh *handle = (struct uvc_fh *)file->private_data; 479 struct uvc_fh *handle = (struct uvc_fh *)file->private_data;
473 int ret = 0; 480 int ret = 0;
474 481
475 if (uvc_trace_param & UVC_TRACE_IOCTL)
476 v4l_printk_ioctl(cmd);
477
478 switch (cmd) { 482 switch (cmd) {
479 /* Query capabilities */ 483 /* Query capabilities */
480 case VIDIOC_QUERYCAP: 484 case VIDIOC_QUERYCAP:
@@ -487,8 +491,12 @@ static int __uvc_v4l2_do_ioctl(struct file *file,
487 strncpy(cap->bus_info, video->dev->udev->bus->bus_name, 491 strncpy(cap->bus_info, video->dev->udev->bus->bus_name,
488 sizeof cap->bus_info); 492 sizeof cap->bus_info);
489 cap->version = DRIVER_VERSION_NUMBER; 493 cap->version = DRIVER_VERSION_NUMBER;
490 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE 494 if (video->streaming->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
491 | V4L2_CAP_STREAMING; 495 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
496 | V4L2_CAP_STREAMING;
497 else
498 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT
499 | V4L2_CAP_STREAMING;
492 break; 500 break;
493 } 501 }
494 502
@@ -666,7 +674,7 @@ static int __uvc_v4l2_do_ioctl(struct file *file,
666 struct v4l2_fmtdesc *fmt = arg; 674 struct v4l2_fmtdesc *fmt = arg;
667 struct uvc_format *format; 675 struct uvc_format *format;
668 676
669 if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || 677 if (fmt->type != video->streaming->type ||
670 fmt->index >= video->streaming->nformats) 678 fmt->index >= video->streaming->nformats)
671 return -EINVAL; 679 return -EINVAL;
672 680
@@ -805,7 +813,7 @@ static int __uvc_v4l2_do_ioctl(struct file *file,
805 struct v4l2_cropcap *ccap = arg; 813 struct v4l2_cropcap *ccap = arg;
806 struct uvc_frame *frame = video->streaming->cur_frame; 814 struct uvc_frame *frame = video->streaming->cur_frame;
807 815
808 if (ccap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 816 if (ccap->type != video->streaming->type)
809 return -EINVAL; 817 return -EINVAL;
810 818
811 ccap->bounds.left = 0; 819 ccap->bounds.left = 0;
@@ -831,7 +839,7 @@ static int __uvc_v4l2_do_ioctl(struct file *file,
831 unsigned int bufsize = 839 unsigned int bufsize =
832 video->streaming->ctrl.dwMaxVideoFrameSize; 840 video->streaming->ctrl.dwMaxVideoFrameSize;
833 841
834 if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || 842 if (rb->type != video->streaming->type ||
835 rb->memory != V4L2_MEMORY_MMAP) 843 rb->memory != V4L2_MEMORY_MMAP)
836 return -EINVAL; 844 return -EINVAL;
837 845
@@ -851,7 +859,7 @@ static int __uvc_v4l2_do_ioctl(struct file *file,
851 { 859 {
852 struct v4l2_buffer *buf = arg; 860 struct v4l2_buffer *buf = arg;
853 861
854 if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 862 if (buf->type != video->streaming->type)
855 return -EINVAL; 863 return -EINVAL;
856 864
857 if (!uvc_has_privileges(handle)) 865 if (!uvc_has_privileges(handle))
@@ -877,7 +885,7 @@ static int __uvc_v4l2_do_ioctl(struct file *file,
877 { 885 {
878 int *type = arg; 886 int *type = arg;
879 887
880 if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 888 if (*type != video->streaming->type)
881 return -EINVAL; 889 return -EINVAL;
882 890
883 if (!uvc_has_privileges(handle)) 891 if (!uvc_has_privileges(handle))
@@ -892,7 +900,7 @@ static int __uvc_v4l2_do_ioctl(struct file *file,
892 { 900 {
893 int *type = arg; 901 int *type = arg;
894 902
895 if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 903 if (*type != video->streaming->type)
896 return -EINVAL; 904 return -EINVAL;
897 905
898 if (!uvc_has_privileges(handle)) 906 if (!uvc_has_privileges(handle))
@@ -925,7 +933,7 @@ static int __uvc_v4l2_do_ioctl(struct file *file,
925 if (!capable(CAP_SYS_ADMIN)) 933 if (!capable(CAP_SYS_ADMIN))
926 return -EPERM; 934 return -EPERM;
927 935
928 info = kmalloc(sizeof *info, GFP_KERNEL); 936 info = kzalloc(sizeof *info, GFP_KERNEL);
929 if (info == NULL) 937 if (info == NULL)
930 return -ENOMEM; 938 return -ENOMEM;
931 939
@@ -952,7 +960,7 @@ static int __uvc_v4l2_do_ioctl(struct file *file,
952 if (!capable(CAP_SYS_ADMIN)) 960 if (!capable(CAP_SYS_ADMIN))
953 return -EPERM; 961 return -EPERM;
954 962
955 map = kmalloc(sizeof *map, GFP_KERNEL); 963 map = kzalloc(sizeof *map, GFP_KERNEL);
956 if (map == NULL) 964 if (map == NULL)
957 return -ENOMEM; 965 return -ENOMEM;
958 966
@@ -979,7 +987,7 @@ static int __uvc_v4l2_do_ioctl(struct file *file,
979 987
980 default: 988 default:
981 if ((ret = v4l_compat_translate_ioctl(file, cmd, arg, 989 if ((ret = v4l_compat_translate_ioctl(file, cmd, arg,
982 __uvc_v4l2_do_ioctl)) == -ENOIOCTLCMD) 990 uvc_v4l2_do_ioctl)) == -ENOIOCTLCMD)
983 uvc_trace(UVC_TRACE_IOCTL, "Unknown ioctl 0x%08x\n", 991 uvc_trace(UVC_TRACE_IOCTL, "Unknown ioctl 0x%08x\n",
984 cmd); 992 cmd);
985 return ret; 993 return ret;
@@ -988,17 +996,16 @@ static int __uvc_v4l2_do_ioctl(struct file *file,
988 return ret; 996 return ret;
989} 997}
990 998
991static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
992 unsigned int cmd, void *arg)
993{
994 return __uvc_v4l2_do_ioctl(file, cmd, arg);
995}
996
997static int uvc_v4l2_ioctl(struct inode *inode, struct file *file, 999static int uvc_v4l2_ioctl(struct inode *inode, struct file *file,
998 unsigned int cmd, unsigned long arg) 1000 unsigned int cmd, unsigned long arg)
999{ 1001{
1000 uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_ioctl\n"); 1002 if (uvc_trace_param & UVC_TRACE_IOCTL) {
1001 return video_usercopy(inode, file, cmd, arg, uvc_v4l2_do_ioctl); 1003 uvc_printk(KERN_DEBUG, "uvc_v4l2_ioctl(");
1004 v4l_printk_ioctl(cmd);
1005 printk(")\n");
1006 }
1007
1008 return video_usercopy(file, cmd, arg, uvc_v4l2_do_ioctl);
1002} 1009}
1003 1010
1004static ssize_t uvc_v4l2_read(struct file *file, char __user *data, 1011static ssize_t uvc_v4l2_read(struct file *file, char __user *data,
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index b7bb23820d80..e7c31995527f 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -36,15 +36,22 @@ static int __uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
36{ 36{
37 __u8 type = USB_TYPE_CLASS | USB_RECIP_INTERFACE; 37 __u8 type = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
38 unsigned int pipe; 38 unsigned int pipe;
39 int ret;
40 39
41 pipe = (query & 0x80) ? usb_rcvctrlpipe(dev->udev, 0) 40 pipe = (query & 0x80) ? usb_rcvctrlpipe(dev->udev, 0)
42 : usb_sndctrlpipe(dev->udev, 0); 41 : usb_sndctrlpipe(dev->udev, 0);
43 type |= (query & 0x80) ? USB_DIR_IN : USB_DIR_OUT; 42 type |= (query & 0x80) ? USB_DIR_IN : USB_DIR_OUT;
44 43
45 ret = usb_control_msg(dev->udev, pipe, query, type, cs << 8, 44 return usb_control_msg(dev->udev, pipe, query, type, cs << 8,
46 unit << 8 | intfnum, data, size, timeout); 45 unit << 8 | intfnum, data, size, timeout);
46}
47
48int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
49 __u8 intfnum, __u8 cs, void *data, __u16 size)
50{
51 int ret;
47 52
53 ret = __uvc_query_ctrl(dev, query, unit, intfnum, cs, data, size,
54 UVC_CTRL_CONTROL_TIMEOUT);
48 if (ret != size) { 55 if (ret != size) {
49 uvc_printk(KERN_ERR, "Failed to query (%u) UVC control %u " 56 uvc_printk(KERN_ERR, "Failed to query (%u) UVC control %u "
50 "(unit %u) : %d (exp. %u).\n", query, cs, unit, ret, 57 "(unit %u) : %d (exp. %u).\n", query, cs, unit, ret,
@@ -55,13 +62,6 @@ static int __uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
55 return 0; 62 return 0;
56} 63}
57 64
58int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
59 __u8 intfnum, __u8 cs, void *data, __u16 size)
60{
61 return __uvc_query_ctrl(dev, query, unit, intfnum, cs, data, size,
62 UVC_CTRL_CONTROL_TIMEOUT);
63}
64
65static void uvc_fixup_buffer_size(struct uvc_video_device *video, 65static void uvc_fixup_buffer_size(struct uvc_video_device *video,
66 struct uvc_streaming_control *ctrl) 66 struct uvc_streaming_control *ctrl)
67{ 67{
@@ -102,8 +102,36 @@ static int uvc_get_video_ctrl(struct uvc_video_device *video,
102 ret = __uvc_query_ctrl(video->dev, query, 0, video->streaming->intfnum, 102 ret = __uvc_query_ctrl(video->dev, query, 0, video->streaming->intfnum,
103 probe ? VS_PROBE_CONTROL : VS_COMMIT_CONTROL, data, size, 103 probe ? VS_PROBE_CONTROL : VS_COMMIT_CONTROL, data, size,
104 UVC_CTRL_STREAMING_TIMEOUT); 104 UVC_CTRL_STREAMING_TIMEOUT);
105 if (ret < 0) 105
106 if ((query == GET_MIN || query == GET_MAX) && ret == 2) {
107 /* Some cameras, mostly based on Bison Electronics chipsets,
108 * answer a GET_MIN or GET_MAX request with the wCompQuality
109 * field only.
110 */
111 uvc_warn_once(video->dev, UVC_WARN_MINMAX, "UVC non "
112 "compliance - GET_MIN/MAX(PROBE) incorrectly "
113 "supported. Enabling workaround.\n");
114 memset(ctrl, 0, sizeof ctrl);
115 ctrl->wCompQuality = le16_to_cpup((__le16 *)data);
116 ret = 0;
117 goto out;
118 } else if (query == GET_DEF && probe == 1) {
119 /* Many cameras don't support the GET_DEF request on their
120 * video probe control. Warn once and return, the caller will
121 * fall back to GET_CUR.
122 */
123 uvc_warn_once(video->dev, UVC_WARN_PROBE_DEF, "UVC non "
124 "compliance - GET_DEF(PROBE) not supported. "
125 "Enabling workaround.\n");
126 ret = -EIO;
127 goto out;
128 } else if (ret != size) {
129 uvc_printk(KERN_ERR, "Failed to query (%u) UVC %s control : "
130 "%d (exp. %u).\n", query, probe ? "probe" : "commit",
131 ret, size);
132 ret = -EIO;
106 goto out; 133 goto out;
134 }
107 135
108 ctrl->bmHint = le16_to_cpup((__le16 *)&data[0]); 136 ctrl->bmHint = le16_to_cpup((__le16 *)&data[0]);
109 ctrl->bFormatIndex = data[2]; 137 ctrl->bFormatIndex = data[2];
@@ -114,14 +142,11 @@ static int uvc_get_video_ctrl(struct uvc_video_device *video,
114 ctrl->wCompQuality = le16_to_cpup((__le16 *)&data[12]); 142 ctrl->wCompQuality = le16_to_cpup((__le16 *)&data[12]);
115 ctrl->wCompWindowSize = le16_to_cpup((__le16 *)&data[14]); 143 ctrl->wCompWindowSize = le16_to_cpup((__le16 *)&data[14]);
116 ctrl->wDelay = le16_to_cpup((__le16 *)&data[16]); 144 ctrl->wDelay = le16_to_cpup((__le16 *)&data[16]);
117 ctrl->dwMaxVideoFrameSize = 145 ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
118 le32_to_cpu(get_unaligned((__le32 *)&data[18])); 146 ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
119 ctrl->dwMaxPayloadTransferSize =
120 le32_to_cpu(get_unaligned((__le32 *)&data[22]));
121 147
122 if (size == 34) { 148 if (size == 34) {
123 ctrl->dwClockFrequency = 149 ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
124 le32_to_cpu(get_unaligned((__le32 *)&data[26]));
125 ctrl->bmFramingInfo = data[30]; 150 ctrl->bmFramingInfo = data[30];
126 ctrl->bPreferedVersion = data[31]; 151 ctrl->bPreferedVersion = data[31];
127 ctrl->bMinVersion = data[32]; 152 ctrl->bMinVersion = data[32];
@@ -138,13 +163,14 @@ static int uvc_get_video_ctrl(struct uvc_video_device *video,
138 * Try to get the value from the format and frame descriptor. 163 * Try to get the value from the format and frame descriptor.
139 */ 164 */
140 uvc_fixup_buffer_size(video, ctrl); 165 uvc_fixup_buffer_size(video, ctrl);
166 ret = 0;
141 167
142out: 168out:
143 kfree(data); 169 kfree(data);
144 return ret; 170 return ret;
145} 171}
146 172
147int uvc_set_video_ctrl(struct uvc_video_device *video, 173static int uvc_set_video_ctrl(struct uvc_video_device *video,
148 struct uvc_streaming_control *ctrl, int probe) 174 struct uvc_streaming_control *ctrl, int probe)
149{ 175{
150 __u8 *data; 176 __u8 *data;
@@ -168,14 +194,11 @@ int uvc_set_video_ctrl(struct uvc_video_device *video,
168 /* Note: Some of the fields below are not required for IN devices (see 194 /* Note: Some of the fields below are not required for IN devices (see
169 * UVC spec, 4.3.1.1), but we still copy them in case support for OUT 195 * UVC spec, 4.3.1.1), but we still copy them in case support for OUT
170 * devices is added in the future. */ 196 * devices is added in the future. */
171 put_unaligned(cpu_to_le32(ctrl->dwMaxVideoFrameSize), 197 put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
172 (__le32 *)&data[18]); 198 put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
173 put_unaligned(cpu_to_le32(ctrl->dwMaxPayloadTransferSize),
174 (__le32 *)&data[22]);
175 199
176 if (size == 34) { 200 if (size == 34) {
177 put_unaligned(cpu_to_le32(ctrl->dwClockFrequency), 201 put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
178 (__le32 *)&data[26]);
179 data[30] = ctrl->bmFramingInfo; 202 data[30] = ctrl->bmFramingInfo;
180 data[31] = ctrl->bPreferedVersion; 203 data[31] = ctrl->bPreferedVersion;
181 data[32] = ctrl->bMinVersion; 204 data[32] = ctrl->bMinVersion;
@@ -186,6 +209,12 @@ int uvc_set_video_ctrl(struct uvc_video_device *video,
186 video->streaming->intfnum, 209 video->streaming->intfnum,
187 probe ? VS_PROBE_CONTROL : VS_COMMIT_CONTROL, data, size, 210 probe ? VS_PROBE_CONTROL : VS_COMMIT_CONTROL, data, size,
188 UVC_CTRL_STREAMING_TIMEOUT); 211 UVC_CTRL_STREAMING_TIMEOUT);
212 if (ret != size) {
213 uvc_printk(KERN_ERR, "Failed to set UVC %s control : "
214 "%d (exp. %u).\n", probe ? "probe" : "commit",
215 ret, size);
216 ret = -EIO;
217 }
189 218
190 kfree(data); 219 kfree(data);
191 return ret; 220 return ret;
@@ -252,6 +281,12 @@ done:
252 return ret; 281 return ret;
253} 282}
254 283
284int uvc_commit_video(struct uvc_video_device *video,
285 struct uvc_streaming_control *probe)
286{
287 return uvc_set_video_ctrl(video, probe, 0);
288}
289
255/* ------------------------------------------------------------------------ 290/* ------------------------------------------------------------------------
256 * Video codecs 291 * Video codecs
257 */ 292 */
@@ -333,7 +368,7 @@ static int uvc_video_decode_start(struct uvc_video_device *video,
333 368
334 /* Synchronize to the input stream by waiting for the FID bit to be 369 /* Synchronize to the input stream by waiting for the FID bit to be
335 * toggled when the the buffer state is not UVC_BUF_STATE_ACTIVE. 370 * toggled when the the buffer state is not UVC_BUF_STATE_ACTIVE.
336 * queue->last_fid is initialized to -1, so the first isochronous 371 * video->last_fid is initialized to -1, so the first isochronous
337 * frame will always be in sync. 372 * frame will always be in sync.
338 * 373 *
339 * If the device doesn't toggle the FID bit, invert video->last_fid 374 * If the device doesn't toggle the FID bit, invert video->last_fid
@@ -360,7 +395,7 @@ static int uvc_video_decode_start(struct uvc_video_device *video,
360 * last payload can be lost anyway). We thus must check if the FID has 395 * last payload can be lost anyway). We thus must check if the FID has
361 * been toggled. 396 * been toggled.
362 * 397 *
363 * queue->last_fid is initialized to -1, so the first isochronous 398 * video->last_fid is initialized to -1, so the first isochronous
364 * frame will never trigger an end of frame detection. 399 * frame will never trigger an end of frame detection.
365 * 400 *
366 * Empty buffers (bytesused == 0) don't trigger end of frame detection 401 * Empty buffers (bytesused == 0) don't trigger end of frame detection
@@ -418,6 +453,34 @@ static void uvc_video_decode_end(struct uvc_video_device *video,
418 } 453 }
419} 454}
420 455
456static int uvc_video_encode_header(struct uvc_video_device *video,
457 struct uvc_buffer *buf, __u8 *data, int len)
458{
459 data[0] = 2; /* Header length */
460 data[1] = UVC_STREAM_EOH | UVC_STREAM_EOF
461 | (video->last_fid & UVC_STREAM_FID);
462 return 2;
463}
464
465static int uvc_video_encode_data(struct uvc_video_device *video,
466 struct uvc_buffer *buf, __u8 *data, int len)
467{
468 struct uvc_video_queue *queue = &video->queue;
469 unsigned int nbytes;
470 void *mem;
471
472 /* Copy video data to the URB buffer. */
473 mem = queue->mem + buf->buf.m.offset + queue->buf_used;
474 nbytes = min((unsigned int)len, buf->buf.bytesused - queue->buf_used);
475 nbytes = min(video->bulk.max_payload_size - video->bulk.payload_size,
476 nbytes);
477 memcpy(data, mem, nbytes);
478
479 queue->buf_used += nbytes;
480
481 return nbytes;
482}
483
421/* ------------------------------------------------------------------------ 484/* ------------------------------------------------------------------------
422 * URB handling 485 * URB handling
423 */ 486 */
@@ -477,7 +540,7 @@ static void uvc_video_decode_bulk(struct urb *urb,
477 /* If the URB is the first of its payload, decode and save the 540 /* If the URB is the first of its payload, decode and save the
478 * header. 541 * header.
479 */ 542 */
480 if (video->bulk.header_size == 0) { 543 if (video->bulk.header_size == 0 && !video->bulk.skip_payload) {
481 do { 544 do {
482 ret = uvc_video_decode_start(video, buf, mem, len); 545 ret = uvc_video_decode_start(video, buf, mem, len);
483 if (ret == -EAGAIN) 546 if (ret == -EAGAIN)
@@ -487,14 +550,13 @@ static void uvc_video_decode_bulk(struct urb *urb,
487 /* If an error occured skip the rest of the payload. */ 550 /* If an error occured skip the rest of the payload. */
488 if (ret < 0 || buf == NULL) { 551 if (ret < 0 || buf == NULL) {
489 video->bulk.skip_payload = 1; 552 video->bulk.skip_payload = 1;
490 return; 553 } else {
491 } 554 memcpy(video->bulk.header, mem, ret);
555 video->bulk.header_size = ret;
492 556
493 video->bulk.header_size = ret; 557 mem += ret;
494 memcpy(video->bulk.header, mem, video->bulk.header_size); 558 len -= ret;
495 559 }
496 mem += ret;
497 len -= ret;
498 } 560 }
499 561
500 /* The buffer queue might have been cancelled while a bulk transfer 562 /* The buffer queue might have been cancelled while a bulk transfer
@@ -525,6 +587,48 @@ static void uvc_video_decode_bulk(struct urb *urb,
525 } 587 }
526} 588}
527 589
590static void uvc_video_encode_bulk(struct urb *urb,
591 struct uvc_video_device *video, struct uvc_buffer *buf)
592{
593 u8 *mem = urb->transfer_buffer;
594 int len = video->urb_size, ret;
595
596 if (buf == NULL) {
597 urb->transfer_buffer_length = 0;
598 return;
599 }
600
601 /* If the URB is the first of its payload, add the header. */
602 if (video->bulk.header_size == 0) {
603 ret = uvc_video_encode_header(video, buf, mem, len);
604 video->bulk.header_size = ret;
605 video->bulk.payload_size += ret;
606 mem += ret;
607 len -= ret;
608 }
609
610 /* Process video data. */
611 ret = uvc_video_encode_data(video, buf, mem, len);
612
613 video->bulk.payload_size += ret;
614 len -= ret;
615
616 if (buf->buf.bytesused == video->queue.buf_used ||
617 video->bulk.payload_size == video->bulk.max_payload_size) {
618 if (buf->buf.bytesused == video->queue.buf_used) {
619 video->queue.buf_used = 0;
620 buf->state = UVC_BUF_STATE_DONE;
621 uvc_queue_next_buffer(&video->queue, buf);
622 video->last_fid ^= UVC_STREAM_FID;
623 }
624
625 video->bulk.header_size = 0;
626 video->bulk.payload_size = 0;
627 }
628
629 urb->transfer_buffer_length = video->urb_size - len;
630}
631
528static void uvc_video_complete(struct urb *urb) 632static void uvc_video_complete(struct urb *urb)
529{ 633{
530 struct uvc_video_device *video = urb->context; 634 struct uvc_video_device *video = urb->context;
@@ -722,7 +826,15 @@ static int uvc_init_video_bulk(struct uvc_video_device *video,
722 if (uvc_alloc_urb_buffers(video, size) < 0) 826 if (uvc_alloc_urb_buffers(video, size) < 0)
723 return -ENOMEM; 827 return -ENOMEM;
724 828
725 pipe = usb_rcvbulkpipe(video->dev->udev, ep->desc.bEndpointAddress); 829 if (usb_endpoint_dir_in(&ep->desc))
830 pipe = usb_rcvbulkpipe(video->dev->udev,
831 ep->desc.bEndpointAddress);
832 else
833 pipe = usb_sndbulkpipe(video->dev->udev,
834 ep->desc.bEndpointAddress);
835
836 if (video->streaming->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
837 size = 0;
726 838
727 for (i = 0; i < UVC_URBS; ++i) { 839 for (i = 0; i < UVC_URBS; ++i) {
728 urb = usb_alloc_urb(0, gfp_flags); 840 urb = usb_alloc_urb(0, gfp_flags);
@@ -854,7 +966,7 @@ int uvc_video_resume(struct uvc_video_device *video)
854 966
855 video->frozen = 0; 967 video->frozen = 0;
856 968
857 if ((ret = uvc_set_video_ctrl(video, &video->streaming->ctrl, 0)) < 0) { 969 if ((ret = uvc_commit_video(video, &video->streaming->ctrl)) < 0) {
858 uvc_queue_enable(&video->queue, 0); 970 uvc_queue_enable(&video->queue, 0);
859 return ret; 971 return ret;
860 } 972 }
@@ -935,23 +1047,30 @@ int uvc_video_init(struct uvc_video_device *video)
935 break; 1047 break;
936 } 1048 }
937 1049
938 /* Commit the default settings. */
939 probe->bFormatIndex = format->index; 1050 probe->bFormatIndex = format->index;
940 probe->bFrameIndex = frame->bFrameIndex; 1051 probe->bFrameIndex = frame->bFrameIndex;
941 if ((ret = uvc_set_video_ctrl(video, probe, 0)) < 0)
942 return ret;
943 1052
944 video->streaming->cur_format = format; 1053 video->streaming->cur_format = format;
945 video->streaming->cur_frame = frame; 1054 video->streaming->cur_frame = frame;
946 atomic_set(&video->active, 0); 1055 atomic_set(&video->active, 0);
947 1056
948 /* Select the video decoding function */ 1057 /* Select the video decoding function */
949 if (video->dev->quirks & UVC_QUIRK_BUILTIN_ISIGHT) 1058 if (video->streaming->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
950 video->decode = uvc_video_decode_isight; 1059 if (video->dev->quirks & UVC_QUIRK_BUILTIN_ISIGHT)
951 else if (video->streaming->intf->num_altsetting > 1) 1060 video->decode = uvc_video_decode_isight;
952 video->decode = uvc_video_decode_isoc; 1061 else if (video->streaming->intf->num_altsetting > 1)
953 else 1062 video->decode = uvc_video_decode_isoc;
954 video->decode = uvc_video_decode_bulk; 1063 else
1064 video->decode = uvc_video_decode_bulk;
1065 } else {
1066 if (video->streaming->intf->num_altsetting == 1)
1067 video->decode = uvc_video_encode_bulk;
1068 else {
1069 uvc_printk(KERN_INFO, "Isochronous endpoints are not "
1070 "supported for video output devices.\n");
1071 return -EINVAL;
1072 }
1073 }
955 1074
956 return 0; 1075 return 0;
957} 1076}
@@ -971,7 +1090,8 @@ int uvc_video_enable(struct uvc_video_device *video, int enable)
971 return 0; 1090 return 0;
972 } 1091 }
973 1092
974 if (video->streaming->cur_format->flags & UVC_FMT_FLAG_COMPRESSED) 1093 if ((video->streaming->cur_format->flags & UVC_FMT_FLAG_COMPRESSED) ||
1094 uvc_no_drop_param)
975 video->queue.flags &= ~UVC_QUEUE_DROP_INCOMPLETE; 1095 video->queue.flags &= ~UVC_QUEUE_DROP_INCOMPLETE;
976 else 1096 else
977 video->queue.flags |= UVC_QUEUE_DROP_INCOMPLETE; 1097 video->queue.flags |= UVC_QUEUE_DROP_INCOMPLETE;
@@ -979,6 +1099,10 @@ int uvc_video_enable(struct uvc_video_device *video, int enable)
979 if ((ret = uvc_queue_enable(&video->queue, 1)) < 0) 1099 if ((ret = uvc_queue_enable(&video->queue, 1)) < 0)
980 return ret; 1100 return ret;
981 1101
1102 /* Commit the streaming parameters. */
1103 if ((ret = uvc_commit_video(video, &video->streaming->ctrl)) < 0)
1104 return ret;
1105
982 return uvc_init_video(video, GFP_KERNEL); 1106 return uvc_init_video(video, GFP_KERNEL);
983} 1107}
984 1108
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 9a6bc1aafb16..896b791ece15 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -4,7 +4,6 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/videodev2.h> 5#include <linux/videodev2.h>
6 6
7
8/* 7/*
9 * Dynamic controls 8 * Dynamic controls
10 */ 9 */
@@ -316,6 +315,7 @@ struct uvc_xu_control {
316#define UVC_QUIRK_BUILTIN_ISIGHT 0x00000008 315#define UVC_QUIRK_BUILTIN_ISIGHT 0x00000008
317#define UVC_QUIRK_STREAM_NO_FID 0x00000010 316#define UVC_QUIRK_STREAM_NO_FID 0x00000010
318#define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020 317#define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020
318#define UVC_QUIRK_PRUNE_CONTROLS 0x00000040
319 319
320/* Format flags */ 320/* Format flags */
321#define UVC_FMT_FLAG_COMPRESSED 0x00000001 321#define UVC_FMT_FLAG_COMPRESSED 0x00000001
@@ -383,6 +383,11 @@ struct uvc_control_mapping {
383 383
384 struct uvc_menu_info *menu_info; 384 struct uvc_menu_info *menu_info;
385 __u32 menu_count; 385 __u32 menu_count;
386
387 __s32 (*get) (struct uvc_control_mapping *mapping, __u8 query,
388 const __u8 *data);
389 void (*set) (struct uvc_control_mapping *mapping, __s32 value,
390 __u8 *data);
386}; 391};
387 392
388struct uvc_control { 393struct uvc_control {
@@ -523,6 +528,7 @@ struct uvc_streaming {
523 __u16 maxpsize; 528 __u16 maxpsize;
524 529
525 struct uvc_streaming_header header; 530 struct uvc_streaming_header header;
531 enum v4l2_buf_type type;
526 532
527 unsigned int nformats; 533 unsigned int nformats;
528 struct uvc_format *format; 534 struct uvc_format *format;
@@ -558,12 +564,15 @@ struct uvc_buffer {
558#define UVC_QUEUE_DROP_INCOMPLETE (1 << 2) 564#define UVC_QUEUE_DROP_INCOMPLETE (1 << 2)
559 565
560struct uvc_video_queue { 566struct uvc_video_queue {
567 enum v4l2_buf_type type;
568
561 void *mem; 569 void *mem;
562 unsigned int flags; 570 unsigned int flags;
563 __u32 sequence; 571 __u32 sequence;
564 572
565 unsigned int count; 573 unsigned int count;
566 unsigned int buf_size; 574 unsigned int buf_size;
575 unsigned int buf_used;
567 struct uvc_buffer buffer[UVC_MAX_VIDEO_BUFFERS]; 576 struct uvc_buffer buffer[UVC_MAX_VIDEO_BUFFERS];
568 struct mutex mutex; /* protects buffers and mainqueue */ 577 struct mutex mutex; /* protects buffers and mainqueue */
569 spinlock_t irqlock; /* protects irqqueue */ 578 spinlock_t irqlock; /* protects irqqueue */
@@ -578,8 +587,9 @@ struct uvc_video_device {
578 atomic_t active; 587 atomic_t active;
579 unsigned int frozen : 1; 588 unsigned int frozen : 1;
580 589
581 struct list_head iterms; 590 struct list_head iterms; /* Input terminals */
582 struct uvc_entity *oterm; 591 struct uvc_entity *oterm; /* Output terminal */
592 struct uvc_entity *sterm; /* USB streaming terminal */
583 struct uvc_entity *processing; 593 struct uvc_entity *processing;
584 struct uvc_entity *selector; 594 struct uvc_entity *selector;
585 struct list_head extensions; 595 struct list_head extensions;
@@ -617,6 +627,7 @@ enum uvc_device_state {
617struct uvc_device { 627struct uvc_device {
618 struct usb_device *udev; 628 struct usb_device *udev;
619 struct usb_interface *intf; 629 struct usb_interface *intf;
630 unsigned long warnings;
620 __u32 quirks; 631 __u32 quirks;
621 int intfnum; 632 int intfnum;
622 char name[32]; 633 char name[32];
@@ -679,6 +690,10 @@ struct uvc_driver {
679#define UVC_TRACE_SUSPEND (1 << 8) 690#define UVC_TRACE_SUSPEND (1 << 8)
680#define UVC_TRACE_STATUS (1 << 9) 691#define UVC_TRACE_STATUS (1 << 9)
681 692
693#define UVC_WARN_MINMAX 0
694#define UVC_WARN_PROBE_DEF 1
695
696extern unsigned int uvc_no_drop_param;
682extern unsigned int uvc_trace_param; 697extern unsigned int uvc_trace_param;
683 698
684#define uvc_trace(flag, msg...) \ 699#define uvc_trace(flag, msg...) \
@@ -687,6 +702,12 @@ extern unsigned int uvc_trace_param;
687 printk(KERN_DEBUG "uvcvideo: " msg); \ 702 printk(KERN_DEBUG "uvcvideo: " msg); \
688 } while (0) 703 } while (0)
689 704
705#define uvc_warn_once(dev, warn, msg...) \
706 do { \
707 if (!test_and_set_bit(warn, &dev->warnings)) \
708 printk(KERN_INFO "uvcvideo: " msg); \
709 } while (0)
710
690#define uvc_printk(level, msg...) \ 711#define uvc_printk(level, msg...) \
691 printk(level "uvcvideo: " msg) 712 printk(level "uvcvideo: " msg)
692 713
@@ -709,7 +730,8 @@ extern struct uvc_driver uvc_driver;
709extern void uvc_delete(struct kref *kref); 730extern void uvc_delete(struct kref *kref);
710 731
711/* Video buffers queue management. */ 732/* Video buffers queue management. */
712extern void uvc_queue_init(struct uvc_video_queue *queue); 733extern void uvc_queue_init(struct uvc_video_queue *queue,
734 enum v4l2_buf_type type);
713extern int uvc_alloc_buffers(struct uvc_video_queue *queue, 735extern int uvc_alloc_buffers(struct uvc_video_queue *queue,
714 unsigned int nbuffers, unsigned int buflength); 736 unsigned int nbuffers, unsigned int buflength);
715extern int uvc_free_buffers(struct uvc_video_queue *queue); 737extern int uvc_free_buffers(struct uvc_video_queue *queue);
@@ -740,10 +762,10 @@ extern int uvc_video_resume(struct uvc_video_device *video);
740extern int uvc_video_enable(struct uvc_video_device *video, int enable); 762extern int uvc_video_enable(struct uvc_video_device *video, int enable);
741extern int uvc_probe_video(struct uvc_video_device *video, 763extern int uvc_probe_video(struct uvc_video_device *video,
742 struct uvc_streaming_control *probe); 764 struct uvc_streaming_control *probe);
765extern int uvc_commit_video(struct uvc_video_device *video,
766 struct uvc_streaming_control *ctrl);
743extern int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit, 767extern int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
744 __u8 intfnum, __u8 cs, void *data, __u16 size); 768 __u8 intfnum, __u8 cs, void *data, __u16 size);
745extern int uvc_set_video_ctrl(struct uvc_video_device *video,
746 struct uvc_streaming_control *ctrl, int probe);
747 769
748/* Status */ 770/* Status */
749extern int uvc_status_init(struct uvc_device *dev); 771extern int uvc_status_init(struct uvc_device *dev);
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 846763d7349e..c676b0b0f708 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -28,7 +28,7 @@
28 * as published by the Free Software Foundation; either version 28 * as published by the Free Software Foundation; either version
29 * 2 of the License, or (at your option) any later version. 29 * 2 of the License, or (at your option) any later version.
30 * 30 *
31 * Author: Alan Cox, <alan@redhat.com> 31 * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
32 * 32 *
33 * Fixes: 33 * Fixes:
34 */ 34 */
@@ -58,6 +58,7 @@
58#include <asm/div64.h> 58#include <asm/div64.h>
59#define __OLD_VIDIOC_ /* To allow fixing old calls*/ 59#define __OLD_VIDIOC_ /* To allow fixing old calls*/
60#include <media/v4l2-common.h> 60#include <media/v4l2-common.h>
61#include <media/v4l2-device.h>
61#include <media/v4l2-chip-ident.h> 62#include <media/v4l2-chip-ident.h>
62 63
63#include <linux/videodev2.h> 64#include <linux/videodev2.h>
@@ -320,6 +321,19 @@ const char **v4l2_ctrl_get_menu(u32 id)
320 "Private packet, IVTV format", 321 "Private packet, IVTV format",
321 NULL 322 NULL
322 }; 323 };
324 static const char *camera_power_line_frequency[] = {
325 "Disabled",
326 "50 Hz",
327 "60 Hz",
328 NULL
329 };
330 static const char *camera_exposure_auto[] = {
331 "Auto Mode",
332 "Manual Mode",
333 "Shutter Priority Mode",
334 "Aperture Priority Mode",
335 NULL
336 };
323 337
324 switch (id) { 338 switch (id) {
325 case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: 339 case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -352,6 +366,10 @@ const char **v4l2_ctrl_get_menu(u32 id)
352 return mpeg_stream_type; 366 return mpeg_stream_type;
353 case V4L2_CID_MPEG_STREAM_VBI_FMT: 367 case V4L2_CID_MPEG_STREAM_VBI_FMT:
354 return mpeg_stream_vbi_fmt; 368 return mpeg_stream_vbi_fmt;
369 case V4L2_CID_POWER_LINE_FREQUENCY:
370 return camera_power_line_frequency;
371 case V4L2_CID_EXPOSURE_AUTO:
372 return camera_exposure_auto;
355 default: 373 default:
356 return NULL; 374 return NULL;
357 } 375 }
@@ -363,17 +381,37 @@ const char *v4l2_ctrl_get_name(u32 id)
363{ 381{
364 switch (id) { 382 switch (id) {
365 /* USER controls */ 383 /* USER controls */
366 case V4L2_CID_USER_CLASS: return "User Controls"; 384 case V4L2_CID_USER_CLASS: return "User Controls";
367 case V4L2_CID_AUDIO_VOLUME: return "Volume"; 385 case V4L2_CID_AUDIO_VOLUME: return "Volume";
368 case V4L2_CID_AUDIO_MUTE: return "Mute"; 386 case V4L2_CID_AUDIO_MUTE: return "Mute";
369 case V4L2_CID_AUDIO_BALANCE: return "Balance"; 387 case V4L2_CID_AUDIO_BALANCE: return "Balance";
370 case V4L2_CID_AUDIO_BASS: return "Bass"; 388 case V4L2_CID_AUDIO_BASS: return "Bass";
371 case V4L2_CID_AUDIO_TREBLE: return "Treble"; 389 case V4L2_CID_AUDIO_TREBLE: return "Treble";
372 case V4L2_CID_AUDIO_LOUDNESS: return "Loudness"; 390 case V4L2_CID_AUDIO_LOUDNESS: return "Loudness";
373 case V4L2_CID_BRIGHTNESS: return "Brightness"; 391 case V4L2_CID_BRIGHTNESS: return "Brightness";
374 case V4L2_CID_CONTRAST: return "Contrast"; 392 case V4L2_CID_CONTRAST: return "Contrast";
375 case V4L2_CID_SATURATION: return "Saturation"; 393 case V4L2_CID_SATURATION: return "Saturation";
376 case V4L2_CID_HUE: return "Hue"; 394 case V4L2_CID_HUE: return "Hue";
395 case V4L2_CID_BLACK_LEVEL: return "Black Level";
396 case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic";
397 case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance";
398 case V4L2_CID_RED_BALANCE: return "Red Balance";
399 case V4L2_CID_BLUE_BALANCE: return "Blue Balance";
400 case V4L2_CID_GAMMA: return "Gamma";
401 case V4L2_CID_EXPOSURE: return "Exposure";
402 case V4L2_CID_AUTOGAIN: return "Gain, Automatic";
403 case V4L2_CID_GAIN: return "Gain";
404 case V4L2_CID_HFLIP: return "Horizontal Flip";
405 case V4L2_CID_VFLIP: return "Vertical Flip";
406 case V4L2_CID_HCENTER: return "Horizontal Center";
407 case V4L2_CID_VCENTER: return "Vertical Center";
408 case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
409 case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
410 case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
411 case V4L2_CID_SHARPNESS: return "Sharpness";
412 case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
413 case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
414 case V4L2_CID_COLOR_KILLER: return "Color Killer";
377 415
378 /* MPEG controls */ 416 /* MPEG controls */
379 case V4L2_CID_MPEG_CLASS: return "MPEG Encoder Controls"; 417 case V4L2_CID_MPEG_CLASS: return "MPEG Encoder Controls";
@@ -410,6 +448,25 @@ const char *v4l2_ctrl_get_name(u32 id)
410 case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID"; 448 case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID";
411 case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format"; 449 case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format";
412 450
451 /* CAMERA controls */
452 case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
453 case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure";
454 case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute";
455 case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate";
456 case V4L2_CID_PAN_RELATIVE: return "Pan, Relative";
457 case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative";
458 case V4L2_CID_PAN_RESET: return "Pan, Reset";
459 case V4L2_CID_TILT_RESET: return "Tilt, Reset";
460 case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute";
461 case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
462 case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
463 case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
464 case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic";
465 case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
466 case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
467 case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
468 case V4L2_CID_PRIVACY: return "Privacy";
469
413 default: 470 default:
414 return NULL; 471 return NULL;
415 } 472 }
@@ -428,14 +485,22 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
428 switch (qctrl->id) { 485 switch (qctrl->id) {
429 case V4L2_CID_AUDIO_MUTE: 486 case V4L2_CID_AUDIO_MUTE:
430 case V4L2_CID_AUDIO_LOUDNESS: 487 case V4L2_CID_AUDIO_LOUDNESS:
488 case V4L2_CID_AUTO_WHITE_BALANCE:
489 case V4L2_CID_AUTOGAIN:
490 case V4L2_CID_HFLIP:
491 case V4L2_CID_VFLIP:
492 case V4L2_CID_HUE_AUTO:
431 case V4L2_CID_MPEG_AUDIO_MUTE: 493 case V4L2_CID_MPEG_AUDIO_MUTE:
432 case V4L2_CID_MPEG_VIDEO_MUTE: 494 case V4L2_CID_MPEG_VIDEO_MUTE:
433 case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: 495 case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
434 case V4L2_CID_MPEG_VIDEO_PULLDOWN: 496 case V4L2_CID_MPEG_VIDEO_PULLDOWN:
497 case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
498 case V4L2_CID_PRIVACY:
435 qctrl->type = V4L2_CTRL_TYPE_BOOLEAN; 499 qctrl->type = V4L2_CTRL_TYPE_BOOLEAN;
436 min = 0; 500 min = 0;
437 max = step = 1; 501 max = step = 1;
438 break; 502 break;
503 case V4L2_CID_POWER_LINE_FREQUENCY:
439 case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: 504 case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
440 case V4L2_CID_MPEG_AUDIO_ENCODING: 505 case V4L2_CID_MPEG_AUDIO_ENCODING:
441 case V4L2_CID_MPEG_AUDIO_L1_BITRATE: 506 case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
@@ -451,10 +516,12 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
451 case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: 516 case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
452 case V4L2_CID_MPEG_STREAM_TYPE: 517 case V4L2_CID_MPEG_STREAM_TYPE:
453 case V4L2_CID_MPEG_STREAM_VBI_FMT: 518 case V4L2_CID_MPEG_STREAM_VBI_FMT:
519 case V4L2_CID_EXPOSURE_AUTO:
454 qctrl->type = V4L2_CTRL_TYPE_MENU; 520 qctrl->type = V4L2_CTRL_TYPE_MENU;
455 step = 1; 521 step = 1;
456 break; 522 break;
457 case V4L2_CID_USER_CLASS: 523 case V4L2_CID_USER_CLASS:
524 case V4L2_CID_CAMERA_CLASS:
458 case V4L2_CID_MPEG_CLASS: 525 case V4L2_CID_MPEG_CLASS:
459 qctrl->type = V4L2_CTRL_TYPE_CTRL_CLASS; 526 qctrl->type = V4L2_CTRL_TYPE_CTRL_CLASS;
460 qctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; 527 qctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
@@ -801,4 +868,116 @@ int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver
801 return err != -ENOMEM ? 0 : err; 868 return err != -ENOMEM ? 0 : err;
802} 869}
803EXPORT_SYMBOL(v4l2_i2c_attach); 870EXPORT_SYMBOL(v4l2_i2c_attach);
871
872void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
873 const struct v4l2_subdev_ops *ops)
874{
875 v4l2_subdev_init(sd, ops);
876 /* the owner is the same as the i2c_client's driver owner */
877 sd->owner = client->driver->driver.owner;
878 /* i2c_client and v4l2_subdev point to one another */
879 v4l2_set_subdevdata(sd, client);
880 i2c_set_clientdata(client, sd);
881 /* initialize name */
882 snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
883 client->driver->driver.name, i2c_adapter_id(client->adapter),
884 client->addr);
885}
886EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
887
888
889
890/* Load an i2c sub-device. It assumes that i2c_get_adapdata(adapter)
891 returns the v4l2_device and that i2c_get_clientdata(client)
892 returns the v4l2_subdev. */
893struct v4l2_subdev *v4l2_i2c_new_subdev(struct i2c_adapter *adapter,
894 const char *module_name, const char *client_type, u8 addr)
895{
896 struct v4l2_device *dev = i2c_get_adapdata(adapter);
897 struct v4l2_subdev *sd = NULL;
898 struct i2c_client *client;
899 struct i2c_board_info info;
900
901 BUG_ON(!dev);
902#ifdef MODULE
903 if (module_name)
904 request_module(module_name);
905#endif
906 /* Setup the i2c board info with the device type and
907 the device address. */
908 memset(&info, 0, sizeof(info));
909 strlcpy(info.type, client_type, sizeof(info.type));
910 info.addr = addr;
911
912 /* Create the i2c client */
913 client = i2c_new_device(adapter, &info);
914 /* Note: it is possible in the future that
915 c->driver is NULL if the driver is still being loaded.
916 We need better support from the kernel so that we
917 can easily wait for the load to finish. */
918 if (client == NULL || client->driver == NULL)
919 return NULL;
920
921 /* Lock the module so we can safely get the v4l2_subdev pointer */
922 if (!try_module_get(client->driver->driver.owner))
923 return NULL;
924 sd = i2c_get_clientdata(client);
925
926 /* Register with the v4l2_device which increases the module's
927 use count as well. */
928 if (v4l2_device_register_subdev(dev, sd))
929 sd = NULL;
930 /* Decrease the module use count to match the first try_module_get. */
931 module_put(client->driver->driver.owner);
932 return sd;
933
934}
935EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
936
937/* Probe and load an i2c sub-device. It assumes that i2c_get_adapdata(adapter)
938 returns the v4l2_device and that i2c_get_clientdata(client)
939 returns the v4l2_subdev. */
940struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct i2c_adapter *adapter,
941 const char *module_name, const char *client_type,
942 const unsigned short *addrs)
943{
944 struct v4l2_device *dev = i2c_get_adapdata(adapter);
945 struct v4l2_subdev *sd = NULL;
946 struct i2c_client *client = NULL;
947 struct i2c_board_info info;
948
949 BUG_ON(!dev);
950#ifdef MODULE
951 if (module_name)
952 request_module(module_name);
953#endif
954 /* Setup the i2c board info with the device type and
955 the device address. */
956 memset(&info, 0, sizeof(info));
957 strlcpy(info.type, client_type, sizeof(info.type));
958
959 /* Probe and create the i2c client */
960 client = i2c_new_probed_device(adapter, &info, addrs);
961 /* Note: it is possible in the future that
962 c->driver is NULL if the driver is still being loaded.
963 We need better support from the kernel so that we
964 can easily wait for the load to finish. */
965 if (client == NULL || client->driver == NULL)
966 return NULL;
967
968 /* Lock the module so we can safely get the v4l2_subdev pointer */
969 if (!try_module_get(client->driver->driver.owner))
970 return NULL;
971 sd = i2c_get_clientdata(client);
972
973 /* Register with the v4l2_device which increases the module's
974 use count as well. */
975 if (v4l2_device_register_subdev(dev, sd))
976 sd = NULL;
977 /* Decrease the module use count to match the first try_module_get. */
978 module_put(client->driver->driver.owner);
979 return sd;
980}
981EXPORT_SYMBOL_GPL(v4l2_i2c_new_probed_subdev);
982
804#endif 983#endif
diff --git a/drivers/media/video/compat_ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 0ea85a05e5c0..d0e1bd3ace6a 100644
--- a/drivers/media/video/compat_ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -7,12 +7,14 @@
7 * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs 7 * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
8 * Copyright (C) 2003 Pavel Machek (pavel@suse.cz) 8 * Copyright (C) 2003 Pavel Machek (pavel@suse.cz)
9 * Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be) 9 * Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be)
10 * Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
10 * 11 *
11 * These routines maintain argument size conversion between 32bit and 64bit 12 * These routines maintain argument size conversion between 32bit and 64bit
12 * ioctls. 13 * ioctls.
13 */ 14 */
14 15
15#include <linux/compat.h> 16#include <linux/compat.h>
17#define __OLD_VIDIOC_ /* To allow fixing old calls*/
16#include <linux/videodev.h> 18#include <linux/videodev.h>
17#include <linux/videodev2.h> 19#include <linux/videodev2.h>
18#include <linux/module.h> 20#include <linux/module.h>
@@ -32,7 +34,7 @@ struct video_tuner32 {
32 34
33static int get_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user *up) 35static int get_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user *up)
34{ 36{
35 if(!access_ok(VERIFY_READ, up, sizeof(struct video_tuner32)) || 37 if (!access_ok(VERIFY_READ, up, sizeof(struct video_tuner32)) ||
36 get_user(kp->tuner, &up->tuner) || 38 get_user(kp->tuner, &up->tuner) ||
37 copy_from_user(kp->name, up->name, 32) || 39 copy_from_user(kp->name, up->name, 32) ||
38 get_user(kp->rangelow, &up->rangelow) || 40 get_user(kp->rangelow, &up->rangelow) ||
@@ -46,7 +48,7 @@ static int get_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user
46 48
47static int put_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user *up) 49static int put_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user *up)
48{ 50{
49 if(!access_ok(VERIFY_WRITE, up, sizeof(struct video_tuner32)) || 51 if (!access_ok(VERIFY_WRITE, up, sizeof(struct video_tuner32)) ||
50 put_user(kp->tuner, &up->tuner) || 52 put_user(kp->tuner, &up->tuner) ||
51 copy_to_user(up->name, kp->name, 32) || 53 copy_to_user(up->name, kp->name, 32) ||
52 put_user(kp->rangelow, &up->rangelow) || 54 put_user(kp->rangelow, &up->rangelow) ||
@@ -58,7 +60,6 @@ static int put_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user
58 return 0; 60 return 0;
59} 61}
60 62
61
62struct video_buffer32 { 63struct video_buffer32 {
63 compat_caddr_t base; 64 compat_caddr_t base;
64 compat_int_t height, width, depth, bytesperline; 65 compat_int_t height, width, depth, bytesperline;
@@ -88,7 +89,7 @@ static int put_video_buffer32(struct video_buffer *kp, struct video_buffer32 __u
88{ 89{
89 u32 tmp = (u32)((unsigned long)kp->base); 90 u32 tmp = (u32)((unsigned long)kp->base);
90 91
91 if(!access_ok(VERIFY_WRITE, up, sizeof(struct video_buffer32)) || 92 if (!access_ok(VERIFY_WRITE, up, sizeof(struct video_buffer32)) ||
92 put_user(tmp, &up->base) || 93 put_user(tmp, &up->base) ||
93 put_user(kp->height, &up->height) || 94 put_user(kp->height, &up->height) ||
94 put_user(kp->width, &up->width) || 95 put_user(kp->width, &up->width) ||
@@ -99,7 +100,7 @@ static int put_video_buffer32(struct video_buffer *kp, struct video_buffer32 __u
99} 100}
100 101
101struct video_clip32 { 102struct video_clip32 {
102 s32 x, y, width, height; /* Its really s32 in videodev.h */ 103 s32 x, y, width, height; /* It's really s32 in videodev.h */
103 compat_caddr_t next; 104 compat_caddr_t next;
104}; 105};
105 106
@@ -108,29 +109,76 @@ struct video_window32 {
108 compat_caddr_t clips; 109 compat_caddr_t clips;
109 compat_int_t clipcount; 110 compat_int_t clipcount;
110}; 111};
111#endif
112 112
113static int native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 113static int get_video_window32(struct video_window *kp, struct video_window32 __user *up)
114{ 114{
115 int ret = -ENOIOCTLCMD; 115 struct video_clip __user *uclips;
116 struct video_clip __user *kclips;
117 compat_caddr_t p;
118 int nclips;
116 119
117 if (file->f_op->unlocked_ioctl) 120 if (!access_ok(VERIFY_READ, up, sizeof(struct video_window32)))
118 ret = file->f_op->unlocked_ioctl(file, cmd, arg); 121 return -EFAULT;
119 else if (file->f_op->ioctl) { 122
120 lock_kernel(); 123 if (get_user(nclips, &up->clipcount))
121 ret = file->f_op->ioctl(file->f_path.dentry->d_inode, file, cmd, arg); 124 return -EFAULT;
122 unlock_kernel(); 125
126 if (!access_ok(VERIFY_READ, up, sizeof(struct video_window32)) ||
127 get_user(kp->x, &up->x) ||
128 get_user(kp->y, &up->y) ||
129 get_user(kp->width, &up->width) ||
130 get_user(kp->height, &up->height) ||
131 get_user(kp->chromakey, &up->chromakey) ||
132 get_user(kp->flags, &up->flags) ||
133 get_user(kp->clipcount, &up->clipcount))
134 return -EFAULT;
135
136 nclips = kp->clipcount;
137 kp->clips = NULL;
138
139 if (nclips == 0)
140 return 0;
141 if (get_user(p, &up->clips))
142 return -EFAULT;
143 uclips = compat_ptr(p);
144
145 /* If nclips < 0, then it is a clipping bitmap of size
146 VIDEO_CLIPMAP_SIZE */
147 if (nclips < 0) {
148 if (!access_ok(VERIFY_READ, uclips, VIDEO_CLIPMAP_SIZE))
149 return -EFAULT;
150 kp->clips = compat_alloc_user_space(VIDEO_CLIPMAP_SIZE);
151 if (copy_in_user(kp->clips, uclips, VIDEO_CLIPMAP_SIZE))
152 return -EFAULT;
153 return 0;
123 } 154 }
124 155
125 return ret; 156 /* Otherwise it is an array of video_clip structs. */
126} 157 if (!access_ok(VERIFY_READ, uclips, nclips * sizeof(struct video_clip)))
158 return -EFAULT;
127 159
160 kp->clips = compat_alloc_user_space(nclips * sizeof(struct video_clip));
161 kclips = kp->clips;
162 while (nclips--) {
163 int err;
164
165 err = copy_in_user(&kclips->x, &uclips->x, sizeof(kclips->x));
166 err |= copy_in_user(&kclips->y, &uclips->y, sizeof(kclips->y));
167 err |= copy_in_user(&kclips->width, &uclips->width, sizeof(kclips->width));
168 err |= copy_in_user(&kclips->height, &uclips->height, sizeof(kclips->height));
169 kclips->next = NULL;
170 if (err)
171 return -EFAULT;
172 kclips++;
173 uclips++;
174 }
175 return 0;
176}
128 177
129#ifdef CONFIG_VIDEO_V4L1_COMPAT
130/* You get back everything except the clips... */ 178/* You get back everything except the clips... */
131static int put_video_window32(struct video_window *kp, struct video_window32 __user *up) 179static int put_video_window32(struct video_window *kp, struct video_window32 __user *up)
132{ 180{
133 if(!access_ok(VERIFY_WRITE, up, sizeof(struct video_window32)) || 181 if (!access_ok(VERIFY_WRITE, up, sizeof(struct video_window32)) ||
134 put_user(kp->x, &up->x) || 182 put_user(kp->x, &up->x) ||
135 put_user(kp->y, &up->y) || 183 put_user(kp->y, &up->y) ||
136 put_user(kp->width, &up->width) || 184 put_user(kp->width, &up->width) ||
@@ -141,16 +189,61 @@ static int put_video_window32(struct video_window *kp, struct video_window32 __u
141 return -EFAULT; 189 return -EFAULT;
142 return 0; 190 return 0;
143} 191}
192
193struct video_code32 {
194 char loadwhat[16]; /* name or tag of file being passed */
195 compat_int_t datasize;
196 unsigned char *data;
197};
198
199static int get_microcode32(struct video_code *kp, struct video_code32 __user *up)
200{
201 if (!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) ||
202 copy_from_user(kp->loadwhat, up->loadwhat, sizeof(up->loadwhat)) ||
203 get_user(kp->datasize, &up->datasize) ||
204 copy_from_user(kp->data, up->data, up->datasize))
205 return -EFAULT;
206 return 0;
207}
208
209#define VIDIOCGTUNER32 _IOWR('v', 4, struct video_tuner32)
210#define VIDIOCSTUNER32 _IOW('v', 5, struct video_tuner32)
211#define VIDIOCGWIN32 _IOR('v', 9, struct video_window32)
212#define VIDIOCSWIN32 _IOW('v', 10, struct video_window32)
213#define VIDIOCGFBUF32 _IOR('v', 11, struct video_buffer32)
214#define VIDIOCSFBUF32 _IOW('v', 12, struct video_buffer32)
215#define VIDIOCGFREQ32 _IOR('v', 14, u32)
216#define VIDIOCSFREQ32 _IOW('v', 15, u32)
217#define VIDIOCSMICROCODE32 _IOW('v', 27, struct video_code32)
218
219#define VIDIOCCAPTURE32 _IOW('v', 8, s32)
220#define VIDIOCSYNC32 _IOW('v', 18, s32)
221#define VIDIOCSWRITEMODE32 _IOW('v', 25, s32)
222
144#endif 223#endif
145 224
146struct v4l2_clip32 225static int native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
147{ 226{
227 int ret = -ENOIOCTLCMD;
228
229 if (file->f_op->unlocked_ioctl)
230 ret = file->f_op->unlocked_ioctl(file, cmd, arg);
231 else if (file->f_op->ioctl) {
232 lock_kernel();
233 ret = file->f_op->ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
234 unlock_kernel();
235 }
236
237 return ret;
238}
239
240
241struct v4l2_clip32 {
148 struct v4l2_rect c; 242 struct v4l2_rect c;
149 compat_caddr_t next; 243 compat_caddr_t next;
150}; 244};
151 245
152struct v4l2_window32 246struct v4l2_window32 {
153{
154 struct v4l2_rect w; 247 struct v4l2_rect w;
155 enum v4l2_field field; 248 enum v4l2_field field;
156 __u32 chromakey; 249 __u32 chromakey;
@@ -231,15 +324,28 @@ static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vb
231 return 0; 324 return 0;
232} 325}
233 326
234struct v4l2_format32 327static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
328{
329 if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
330 return -EFAULT;
331 return 0;
332}
333
334static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
235{ 335{
336 if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
337 return -EFAULT;
338 return 0;
339}
340
341struct v4l2_format32 {
236 enum v4l2_buf_type type; 342 enum v4l2_buf_type type;
237 union 343 union {
238 { 344 struct v4l2_pix_format pix;
239 struct v4l2_pix_format pix; // V4L2_BUF_TYPE_VIDEO_CAPTURE 345 struct v4l2_window32 win;
240 struct v4l2_window32 win; // V4L2_BUF_TYPE_VIDEO_OVERLAY 346 struct v4l2_vbi_format vbi;
241 struct v4l2_vbi_format vbi; // V4L2_BUF_TYPE_VBI_CAPTURE 347 struct v4l2_sliced_vbi_format sliced;
242 __u8 raw_data[200]; // user-defined 348 __u8 raw_data[200]; /* user-defined */
243 } fmt; 349 } fmt;
244}; 350};
245 351
@@ -250,52 +356,62 @@ static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user
250 return -EFAULT; 356 return -EFAULT;
251 switch (kp->type) { 357 switch (kp->type) {
252 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 358 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
359 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
253 return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); 360 return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
254 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 361 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
362 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
255 return get_v4l2_window32(&kp->fmt.win, &up->fmt.win); 363 return get_v4l2_window32(&kp->fmt.win, &up->fmt.win);
256 case V4L2_BUF_TYPE_VBI_CAPTURE: 364 case V4L2_BUF_TYPE_VBI_CAPTURE:
365 case V4L2_BUF_TYPE_VBI_OUTPUT:
257 return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); 366 return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
367 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
368 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
369 return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
370 case V4L2_BUF_TYPE_PRIVATE:
371 if (copy_from_user(kp, up, sizeof(kp->fmt.raw_data)))
372 return -EFAULT;
373 return 0;
374 case 0:
375 return -EINVAL;
258 default: 376 default:
259 printk("compat_ioctl : unexpected VIDIOC_FMT type %d\n", 377 printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
260 kp->type); 378 kp->type);
261 return -ENXIO; 379 return -EINVAL;
262 } 380 }
263} 381}
264 382
265static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 383static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
266{ 384{
267 if(!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) || 385 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) ||
268 put_user(kp->type, &up->type)) 386 put_user(kp->type, &up->type))
269 return -EFAULT; 387 return -EFAULT;
270 switch (kp->type) { 388 switch (kp->type) {
271 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 389 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
390 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
272 return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); 391 return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
273 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 392 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
393 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
274 return put_v4l2_window32(&kp->fmt.win, &up->fmt.win); 394 return put_v4l2_window32(&kp->fmt.win, &up->fmt.win);
275 case V4L2_BUF_TYPE_VBI_CAPTURE: 395 case V4L2_BUF_TYPE_VBI_CAPTURE:
396 case V4L2_BUF_TYPE_VBI_OUTPUT:
276 return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); 397 return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
398 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
399 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
400 return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
401 case V4L2_BUF_TYPE_PRIVATE:
402 if (copy_to_user(up, kp, sizeof(up->fmt.raw_data)))
403 return -EFAULT;
404 return 0;
405 case 0:
406 return -EINVAL;
277 default: 407 default:
278 return -ENXIO; 408 printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
409 kp->type);
410 return -EINVAL;
279 } 411 }
280} 412}
281 413
282static inline int get_v4l2_standard(struct v4l2_standard *kp, struct v4l2_standard __user *up) 414struct v4l2_standard32 {
283{
284 if (copy_from_user(kp, up, sizeof(struct v4l2_standard)))
285 return -EFAULT;
286 return 0;
287
288}
289
290static inline int put_v4l2_standard(struct v4l2_standard *kp, struct v4l2_standard __user *up)
291{
292 if (copy_to_user(up, kp, sizeof(struct v4l2_standard)))
293 return -EFAULT;
294 return 0;
295}
296
297struct v4l2_standard32
298{
299 __u32 index; 415 __u32 index;
300 __u32 id[2]; /* __u64 would get the alignment wrong */ 416 __u32 id[2]; /* __u64 would get the alignment wrong */
301 __u8 name[24]; 417 __u8 name[24];
@@ -315,7 +431,7 @@ static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32
315 431
316static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) 432static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
317{ 433{
318 if(!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) || 434 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
319 put_user(kp->index, &up->index) || 435 put_user(kp->index, &up->index) ||
320 copy_to_user(up->id, &kp->id, sizeof(__u64)) || 436 copy_to_user(up->id, &kp->id, sizeof(__u64)) ||
321 copy_to_user(up->name, kp->name, 24) || 437 copy_to_user(up->name, kp->name, 24) ||
@@ -326,23 +442,7 @@ static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32
326 return 0; 442 return 0;
327} 443}
328 444
329static inline int get_v4l2_tuner(struct v4l2_tuner *kp, struct v4l2_tuner __user *up) 445struct v4l2_buffer32 {
330{
331 if (copy_from_user(kp, up, sizeof(struct v4l2_tuner)))
332 return -EFAULT;
333 return 0;
334
335}
336
337static inline int put_v4l2_tuner(struct v4l2_tuner *kp, struct v4l2_tuner __user *up)
338{
339 if (copy_to_user(up, kp, sizeof(struct v4l2_tuner)))
340 return -EFAULT;
341 return 0;
342}
343
344struct v4l2_buffer32
345{
346 __u32 index; 446 __u32 index;
347 enum v4l2_buf_type type; 447 enum v4l2_buf_type type;
348 __u32 bytesused; 448 __u32 bytesused;
@@ -373,7 +473,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
373 get_user(kp->memory, &up->memory) || 473 get_user(kp->memory, &up->memory) ||
374 get_user(kp->input, &up->input)) 474 get_user(kp->input, &up->input))
375 return -EFAULT; 475 return -EFAULT;
376 switch(kp->memory) { 476 switch (kp->memory) {
377 case V4L2_MEMORY_MMAP: 477 case V4L2_MEMORY_MMAP:
378 break; 478 break;
379 case V4L2_MEMORY_USERPTR: 479 case V4L2_MEMORY_USERPTR:
@@ -388,7 +488,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
388 } 488 }
389 break; 489 break;
390 case V4L2_MEMORY_OVERLAY: 490 case V4L2_MEMORY_OVERLAY:
391 if(get_user(kp->m.offset, &up->m.offset)) 491 if (get_user(kp->m.offset, &up->m.offset))
392 return -EFAULT; 492 return -EFAULT;
393 break; 493 break;
394 } 494 }
@@ -404,7 +504,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
404 put_user(kp->memory, &up->memory) || 504 put_user(kp->memory, &up->memory) ||
405 put_user(kp->input, &up->input)) 505 put_user(kp->input, &up->input))
406 return -EFAULT; 506 return -EFAULT;
407 switch(kp->memory) { 507 switch (kp->memory) {
408 case V4L2_MEMORY_MMAP: 508 case V4L2_MEMORY_MMAP:
409 if (put_user(kp->length, &up->length) || 509 if (put_user(kp->length, &up->length) ||
410 put_user(kp->m.offset, &up->m.offset)) 510 put_user(kp->m.offset, &up->m.offset))
@@ -431,8 +531,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
431 return 0; 531 return 0;
432} 532}
433 533
434struct v4l2_framebuffer32 534struct v4l2_framebuffer32 {
435{
436 __u32 capability; 535 __u32 capability;
437 __u32 flags; 536 __u32 flags;
438 compat_caddr_t base; 537 compat_caddr_t base;
@@ -457,7 +556,7 @@ static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
457{ 556{
458 u32 tmp = (u32)((unsigned long)kp->base); 557 u32 tmp = (u32)((unsigned long)kp->base);
459 558
460 if(!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) || 559 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
461 put_user(tmp, &up->base) || 560 put_user(tmp, &up->base) ||
462 put_user(kp->capability, &up->capability) || 561 put_user(kp->capability, &up->capability) ||
463 put_user(kp->flags, &up->flags)) 562 put_user(kp->flags, &up->flags))
@@ -466,150 +565,145 @@ static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
466 return 0; 565 return 0;
467} 566}
468 567
469static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input __user *up) 568struct v4l2_input32 {
569 __u32 index; /* Which input */
570 __u8 name[32]; /* Label */
571 __u32 type; /* Type of input */
572 __u32 audioset; /* Associated audios (bitfield) */
573 __u32 tuner; /* Associated tuner */
574 v4l2_std_id std;
575 __u32 status;
576 __u32 reserved[4];
577} __attribute__ ((packed));
578
579/* The 64-bit v4l2_input struct has extra padding at the end of the struct.
580 Otherwise it is identical to the 32-bit version. */
581static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
470{ 582{
471 if (copy_from_user(kp, up, sizeof(struct v4l2_input) - 4)) 583 if (copy_from_user(kp, up, sizeof(struct v4l2_input32)))
472 return -EFAULT; 584 return -EFAULT;
473 return 0; 585 return 0;
474} 586}
475 587
476static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input __user *up) 588static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
477{ 589{
478 if (copy_to_user(up, kp, sizeof(struct v4l2_input) - 4)) 590 if (copy_to_user(up, kp, sizeof(struct v4l2_input32)))
479 return -EFAULT; 591 return -EFAULT;
480 return 0; 592 return 0;
481} 593}
482 594
483static inline int get_v4l2_input(struct v4l2_input *kp, struct v4l2_input __user *up) 595struct v4l2_ext_controls32 {
484{ 596 __u32 ctrl_class;
485 if (copy_from_user(kp, up, sizeof(struct v4l2_input))) 597 __u32 count;
486 return -EFAULT; 598 __u32 error_idx;
487 return 0; 599 __u32 reserved[2];
488} 600 compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
489
490static inline int put_v4l2_input(struct v4l2_input *kp, struct v4l2_input __user *up)
491{
492 if (copy_to_user(up, kp, sizeof(struct v4l2_input)))
493 return -EFAULT;
494 return 0;
495}
496
497#ifdef CONFIG_VIDEO_V4L1_COMPAT
498struct video_code32
499{
500 char loadwhat[16]; /* name or tag of file being passed */
501 compat_int_t datasize;
502 unsigned char *data;
503}; 601};
504 602
505static inline int microcode32(struct video_code *kp, struct video_code32 __user *up) 603static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
506{ 604{
507 if(!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) || 605 struct v4l2_ext_control __user *ucontrols;
508 copy_from_user(kp->loadwhat, up->loadwhat, sizeof (up->loadwhat)) || 606 struct v4l2_ext_control __user *kcontrols;
509 get_user(kp->datasize, &up->datasize) || 607 int n;
510 copy_from_user(kp->data, up->data, up->datasize)) 608 compat_caddr_t p;
609
610 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
611 get_user(kp->ctrl_class, &up->ctrl_class) ||
612 get_user(kp->count, &up->count) ||
613 get_user(kp->error_idx, &up->error_idx) ||
614 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
511 return -EFAULT; 615 return -EFAULT;
616 n = kp->count;
617 if (n == 0) {
618 kp->controls = NULL;
619 return 0;
620 }
621 if (get_user(p, &up->controls))
622 return -EFAULT;
623 ucontrols = compat_ptr(p);
624 if (!access_ok(VERIFY_READ, ucontrols, n * sizeof(struct v4l2_ext_control)))
625 return -EFAULT;
626 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
627 kp->controls = kcontrols;
628 while (--n >= 0) {
629 if (copy_in_user(&kcontrols->id, &ucontrols->id, sizeof(__u32)))
630 return -EFAULT;
631 if (copy_in_user(&kcontrols->reserved2, &ucontrols->reserved2, sizeof(ucontrols->reserved2)))
632 return -EFAULT;
633 /* Note: if the void * part of the union ever becomes relevant
634 then we need to know the type of the control in order to do
635 the right thing here. Luckily, that is not yet an issue. */
636 if (copy_in_user(&kcontrols->value, &ucontrols->value, sizeof(ucontrols->value)))
637 return -EFAULT;
638 ucontrols++;
639 kcontrols++;
640 }
512 return 0; 641 return 0;
513} 642}
514 643
515#define VIDIOCGTUNER32 _IOWR('v',4, struct video_tuner32) 644static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
516#define VIDIOCSTUNER32 _IOW('v',5, struct video_tuner32)
517#define VIDIOCGWIN32 _IOR('v',9, struct video_window32)
518#define VIDIOCSWIN32 _IOW('v',10, struct video_window32)
519#define VIDIOCGFBUF32 _IOR('v',11, struct video_buffer32)
520#define VIDIOCSFBUF32 _IOW('v',12, struct video_buffer32)
521#define VIDIOCGFREQ32 _IOR('v',14, u32)
522#define VIDIOCSFREQ32 _IOW('v',15, u32)
523#define VIDIOCSMICROCODE32 _IOW('v',27, struct video_code32)
524
525#endif
526
527/* VIDIOC_ENUMINPUT32 is VIDIOC_ENUMINPUT minus 4 bytes of padding alignement */
528#define VIDIOC_ENUMINPUT32 VIDIOC_ENUMINPUT - _IOC(0, 0, 0, 4)
529#define VIDIOC_G_FMT32 _IOWR ('V', 4, struct v4l2_format32)
530#define VIDIOC_S_FMT32 _IOWR ('V', 5, struct v4l2_format32)
531#define VIDIOC_QUERYBUF32 _IOWR ('V', 9, struct v4l2_buffer32)
532#define VIDIOC_G_FBUF32 _IOR ('V', 10, struct v4l2_framebuffer32)
533#define VIDIOC_S_FBUF32 _IOW ('V', 11, struct v4l2_framebuffer32)
534/* VIDIOC_OVERLAY is now _IOW, but was _IOWR */
535#define VIDIOC_OVERLAY32 _IOWR ('V', 14, compat_int_t)
536#define VIDIOC_QBUF32 _IOWR ('V', 15, struct v4l2_buffer32)
537#define VIDIOC_DQBUF32 _IOWR ('V', 17, struct v4l2_buffer32)
538#define VIDIOC_STREAMON32 _IOW ('V', 18, compat_int_t)
539#define VIDIOC_STREAMOFF32 _IOW ('V', 19, compat_int_t)
540#define VIDIOC_ENUMSTD32 _IOWR ('V', 25, struct v4l2_standard32)
541/* VIDIOC_S_CTRL is now _IOWR, but was _IOW */
542#define VIDIOC_S_CTRL32 _IOW ('V', 28, struct v4l2_control)
543#define VIDIOC_G_INPUT32 _IOR ('V', 38, compat_int_t)
544#define VIDIOC_S_INPUT32 _IOWR ('V', 39, compat_int_t)
545#define VIDIOC_TRY_FMT32 _IOWR ('V', 64, struct v4l2_format32)
546
547#ifdef CONFIG_VIDEO_V4L1_COMPAT
548enum {
549 MaxClips = (~0U-sizeof(struct video_window))/sizeof(struct video_clip)
550};
551
552static int do_set_window(struct file *file, unsigned int cmd, unsigned long arg)
553{ 645{
554 struct video_window32 __user *up = compat_ptr(arg); 646 struct v4l2_ext_control __user *ucontrols;
555 struct video_window __user *vw; 647 struct v4l2_ext_control __user *kcontrols = kp->controls;
556 struct video_clip __user *p; 648 int n = kp->count;
557 int nclips; 649 compat_caddr_t p;
558 u32 n; 650
559 651 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) ||
560 if (!access_ok(VERIFY_READ, up, sizeof(struct video_window32))) 652 put_user(kp->ctrl_class, &up->ctrl_class) ||
561 return -EFAULT; 653 put_user(kp->count, &up->count) ||
654 put_user(kp->error_idx, &up->error_idx) ||
655 copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
656 return -EFAULT;
657 if (!kp->count)
658 return 0;
562 659
563 if (get_user(nclips, &up->clipcount)) 660 if (get_user(p, &up->controls))
564 return -EFAULT; 661 return -EFAULT;
565 662 ucontrols = compat_ptr(p);
566 /* Peculiar interface... */ 663 if (!access_ok(VERIFY_WRITE, ucontrols, n * sizeof(struct v4l2_ext_control)))
567 if (nclips < 0)
568 nclips = VIDEO_CLIPMAP_SIZE;
569
570 if (nclips > MaxClips)
571 return -ENOMEM;
572
573 vw = compat_alloc_user_space(sizeof(struct video_window) +
574 nclips * sizeof(struct video_clip));
575
576 p = nclips ? (struct video_clip __user *)(vw + 1) : NULL;
577
578 if (get_user(n, &up->x) || put_user(n, &vw->x) ||
579 get_user(n, &up->y) || put_user(n, &vw->y) ||
580 get_user(n, &up->width) || put_user(n, &vw->width) ||
581 get_user(n, &up->height) || put_user(n, &vw->height) ||
582 get_user(n, &up->chromakey) || put_user(n, &vw->chromakey) ||
583 get_user(n, &up->flags) || put_user(n, &vw->flags) ||
584 get_user(n, &up->clipcount) || put_user(n, &vw->clipcount) ||
585 get_user(n, &up->clips) || put_user(p, &vw->clips))
586 return -EFAULT; 664 return -EFAULT;
587 665
588 if (nclips) { 666 while (--n >= 0) {
589 struct video_clip32 __user *u = compat_ptr(n); 667 if (copy_in_user(&ucontrols->id, &kcontrols->id, sizeof(__u32)))
590 int i; 668 return -EFAULT;
591 if (!u) 669 if (copy_in_user(&ucontrols->reserved2, &kcontrols->reserved2,
592 return -EINVAL; 670 sizeof(ucontrols->reserved2)))
593 for (i = 0; i < nclips; i++, u++, p++) { 671 return -EFAULT;
594 s32 v; 672 /* Note: if the void * part of the union ever becomes relevant
595 if (!access_ok(VERIFY_READ, u, sizeof(struct video_clip32)) || 673 then we need to know the type of the control in order to do
596 !access_ok(VERIFY_WRITE, p, sizeof(struct video_clip32)) || 674 the right thing here. Luckily, that is not yet an issue. */
597 get_user(v, &u->x) || 675 if (copy_in_user(&ucontrols->value, &kcontrols->value, sizeof(ucontrols->value)))
598 put_user(v, &p->x) || 676 return -EFAULT;
599 get_user(v, &u->y) || 677 ucontrols++;
600 put_user(v, &p->y) || 678 kcontrols++;
601 get_user(v, &u->width) ||
602 put_user(v, &p->width) ||
603 get_user(v, &u->height) ||
604 put_user(v, &p->height) ||
605 put_user(NULL, &p->next))
606 return -EFAULT;
607 }
608 } 679 }
609 680 return 0;
610 return native_ioctl(file, VIDIOCSWIN, (unsigned long)vw);
611} 681}
682
683#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32)
684#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32)
685#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32)
686#define VIDIOC_G_FBUF32 _IOR ('V', 10, struct v4l2_framebuffer32)
687#define VIDIOC_S_FBUF32 _IOW ('V', 11, struct v4l2_framebuffer32)
688#define VIDIOC_QBUF32 _IOWR('V', 15, struct v4l2_buffer32)
689#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
690#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
691#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
692#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
693#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
694#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
695#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
696
697#define VIDIOC_OVERLAY32 _IOW ('V', 14, s32)
698#ifdef __OLD_VIDIOC_
699#define VIDIOC_OVERLAY32_OLD _IOWR('V', 14, s32)
612#endif 700#endif
701#define VIDIOC_STREAMON32 _IOW ('V', 18, s32)
702#define VIDIOC_STREAMOFF32 _IOW ('V', 19, s32)
703#define VIDIOC_G_INPUT32 _IOR ('V', 38, s32)
704#define VIDIOC_S_INPUT32 _IOWR('V', 39, s32)
705#define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32)
706#define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32)
613 707
614static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 708static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
615{ 709{
@@ -624,53 +718,60 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg
624 struct v4l2_format v2f; 718 struct v4l2_format v2f;
625 struct v4l2_buffer v2b; 719 struct v4l2_buffer v2b;
626 struct v4l2_framebuffer v2fb; 720 struct v4l2_framebuffer v2fb;
627 struct v4l2_standard v2s;
628 struct v4l2_input v2i; 721 struct v4l2_input v2i;
629 struct v4l2_tuner v2t; 722 struct v4l2_standard v2s;
723 struct v4l2_ext_controls v2ecs;
630 unsigned long vx; 724 unsigned long vx;
725 int vi;
631 } karg; 726 } karg;
632 void __user *up = compat_ptr(arg); 727 void __user *up = compat_ptr(arg);
633 int compatible_arg = 1; 728 int compatible_arg = 1;
634 int err = 0; 729 int err = 0;
635 int realcmd = cmd;
636 730
637 /* First, convert the command. */ 731 /* First, convert the command. */
638 switch(cmd) { 732 switch (cmd) {
639#ifdef CONFIG_VIDEO_V4L1_COMPAT 733#ifdef CONFIG_VIDEO_V4L1_COMPAT
640 case VIDIOCGTUNER32: realcmd = cmd = VIDIOCGTUNER; break; 734 case VIDIOCGTUNER32: cmd = VIDIOCGTUNER; break;
641 case VIDIOCSTUNER32: realcmd = cmd = VIDIOCSTUNER; break; 735 case VIDIOCSTUNER32: cmd = VIDIOCSTUNER; break;
642 case VIDIOCGWIN32: realcmd = cmd = VIDIOCGWIN; break; 736 case VIDIOCGWIN32: cmd = VIDIOCGWIN; break;
643 case VIDIOCGFBUF32: realcmd = cmd = VIDIOCGFBUF; break; 737 case VIDIOCSWIN32: cmd = VIDIOCSWIN; break;
644 case VIDIOCSFBUF32: realcmd = cmd = VIDIOCSFBUF; break; 738 case VIDIOCGFBUF32: cmd = VIDIOCGFBUF; break;
645 case VIDIOCGFREQ32: realcmd = cmd = VIDIOCGFREQ; break; 739 case VIDIOCSFBUF32: cmd = VIDIOCSFBUF; break;
646 case VIDIOCSFREQ32: realcmd = cmd = VIDIOCSFREQ; break; 740 case VIDIOCGFREQ32: cmd = VIDIOCGFREQ; break;
647 case VIDIOCSMICROCODE32: realcmd = cmd = VIDIOCSMICROCODE; break; 741 case VIDIOCSFREQ32: cmd = VIDIOCSFREQ; break;
742 case VIDIOCSMICROCODE32: cmd = VIDIOCSMICROCODE; break;
648#endif 743#endif
649 case VIDIOC_G_FMT32: realcmd = cmd = VIDIOC_G_FMT; break; 744 case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
650 case VIDIOC_S_FMT32: realcmd = cmd = VIDIOC_S_FMT; break; 745 case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break;
651 case VIDIOC_QUERYBUF32: realcmd = cmd = VIDIOC_QUERYBUF; break; 746 case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break;
652 case VIDIOC_QBUF32: realcmd = cmd = VIDIOC_QBUF; break; 747 case VIDIOC_G_FBUF32: cmd = VIDIOC_G_FBUF; break;
653 case VIDIOC_DQBUF32: realcmd = cmd = VIDIOC_DQBUF; break; 748 case VIDIOC_S_FBUF32: cmd = VIDIOC_S_FBUF; break;
654 case VIDIOC_STREAMON32: realcmd = cmd = VIDIOC_STREAMON; break; 749 case VIDIOC_QBUF32: cmd = VIDIOC_QBUF; break;
655 case VIDIOC_STREAMOFF32: realcmd = cmd = VIDIOC_STREAMOFF; break; 750 case VIDIOC_DQBUF32: cmd = VIDIOC_DQBUF; break;
656 case VIDIOC_G_FBUF32: realcmd = cmd = VIDIOC_G_FBUF; break; 751 case VIDIOC_ENUMSTD32: cmd = VIDIOC_ENUMSTD; break;
657 case VIDIOC_S_FBUF32: realcmd = cmd = VIDIOC_S_FBUF; break; 752 case VIDIOC_ENUMINPUT32: cmd = VIDIOC_ENUMINPUT; break;
658 case VIDIOC_OVERLAY32: realcmd = cmd = VIDIOC_OVERLAY; break; 753 case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break;
659 case VIDIOC_ENUMSTD32: realcmd = VIDIOC_ENUMSTD; break; 754 case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break;
660 case VIDIOC_ENUMINPUT32: realcmd = VIDIOC_ENUMINPUT; break; 755 case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break;
661 case VIDIOC_S_CTRL32: realcmd = cmd = VIDIOC_S_CTRL; break; 756 case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break;
662 case VIDIOC_G_INPUT32: realcmd = cmd = VIDIOC_G_INPUT; break; 757 case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
663 case VIDIOC_S_INPUT32: realcmd = cmd = VIDIOC_S_INPUT; break; 758#ifdef __OLD_VIDIOC_
664 case VIDIOC_TRY_FMT32: realcmd = cmd = VIDIOC_TRY_FMT; break; 759 case VIDIOC_OVERLAY32_OLD: cmd = VIDIOC_OVERLAY; break;
665 }; 760#endif
666 761 case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
667 switch(cmd) { 762 case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
763 case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break;
764 case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break;
765 case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break;
766 case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;
767 }
768
769 switch (cmd) {
668#ifdef CONFIG_VIDEO_V4L1_COMPAT 770#ifdef CONFIG_VIDEO_V4L1_COMPAT
669 case VIDIOCSTUNER: 771 case VIDIOCSTUNER:
670 case VIDIOCGTUNER: 772 case VIDIOCGTUNER:
671 err = get_video_tuner32(&karg.vt, up); 773 err = get_video_tuner32(&karg.vt, up);
672 compatible_arg = 0; 774 compatible_arg = 0;
673
674 break; 775 break;
675 776
676 case VIDIOCSFBUF: 777 case VIDIOCSFBUF:
@@ -678,19 +779,42 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg
678 compatible_arg = 0; 779 compatible_arg = 0;
679 break; 780 break;
680 781
782 case VIDIOCSWIN:
783 err = get_video_window32(&karg.vw, up);
784 compatible_arg = 0;
785 break;
786
787 case VIDIOCGWIN:
788 case VIDIOCGFBUF:
789 case VIDIOCGFREQ:
790 compatible_arg = 0;
791 break;
792
793 case VIDIOCSMICROCODE:
794 err = get_microcode32(&karg.vc, up);
795 compatible_arg = 0;
796 break;
681 797
682 case VIDIOCSFREQ: 798 case VIDIOCSFREQ:
799 err = get_user(karg.vx, (u32 __user *)up);
800 compatible_arg = 0;
801 break;
802
803 case VIDIOCCAPTURE:
804 case VIDIOCSYNC:
805 case VIDIOCSWRITEMODE:
683#endif 806#endif
684 case VIDIOC_S_INPUT:
685 case VIDIOC_OVERLAY: 807 case VIDIOC_OVERLAY:
686 case VIDIOC_STREAMON: 808 case VIDIOC_STREAMON:
687 case VIDIOC_STREAMOFF: 809 case VIDIOC_STREAMOFF:
688 err = get_user(karg.vx, (u32 __user *)up); 810 case VIDIOC_S_INPUT:
689 compatible_arg = 1; 811 case VIDIOC_S_OUTPUT:
812 err = get_user(karg.vi, (s32 __user *)up);
813 compatible_arg = 0;
690 break; 814 break;
691 815
692 case VIDIOC_S_FBUF: 816 case VIDIOC_G_INPUT:
693 err = get_v4l2_framebuffer32(&karg.v2fb, up); 817 case VIDIOC_G_OUTPUT:
694 compatible_arg = 0; 818 compatible_arg = 0;
695 break; 819 break;
696 820
@@ -708,122 +832,108 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg
708 compatible_arg = 0; 832 compatible_arg = 0;
709 break; 833 break;
710 834
711 case VIDIOC_ENUMSTD: 835 case VIDIOC_S_FBUF:
712 err = get_v4l2_standard(&karg.v2s, up); 836 err = get_v4l2_framebuffer32(&karg.v2fb, up);
713 compatible_arg = 0; 837 compatible_arg = 0;
714 break; 838 break;
715 839
716 case VIDIOC_ENUMSTD32: 840 case VIDIOC_G_FBUF:
717 err = get_v4l2_standard32(&karg.v2s, up);
718 compatible_arg = 0; 841 compatible_arg = 0;
719 break; 842 break;
720 843
721 case VIDIOC_ENUMINPUT: 844 case VIDIOC_ENUMSTD:
722 err = get_v4l2_input(&karg.v2i, up); 845 err = get_v4l2_standard32(&karg.v2s, up);
723 compatible_arg = 0; 846 compatible_arg = 0;
724 break; 847 break;
725 848
726 case VIDIOC_ENUMINPUT32: 849 case VIDIOC_ENUMINPUT:
727 err = get_v4l2_input32(&karg.v2i, up); 850 err = get_v4l2_input32(&karg.v2i, up);
728 compatible_arg = 0; 851 compatible_arg = 0;
729 break; 852 break;
730 853
731 case VIDIOC_G_TUNER: 854 case VIDIOC_G_EXT_CTRLS:
732 case VIDIOC_S_TUNER: 855 case VIDIOC_S_EXT_CTRLS:
733 err = get_v4l2_tuner(&karg.v2t, up); 856 case VIDIOC_TRY_EXT_CTRLS:
857 err = get_v4l2_ext_controls32(&karg.v2ecs, up);
734 compatible_arg = 0; 858 compatible_arg = 0;
735 break; 859 break;
860 }
861 if (err)
862 return err;
736 863
737#ifdef CONFIG_VIDEO_V4L1_COMPAT 864 if (compatible_arg)
738 case VIDIOCGWIN: 865 err = native_ioctl(file, cmd, (unsigned long)up);
739 case VIDIOCGFBUF:
740 case VIDIOCGFREQ:
741#endif
742 case VIDIOC_G_FBUF:
743 case VIDIOC_G_INPUT:
744 compatible_arg = 0;
745 break;
746#ifdef CONFIG_VIDEO_V4L1_COMPAT
747 case VIDIOCSMICROCODE:
748 err = microcode32(&karg.vc, up);
749 compatible_arg = 0;
750 break;
751#endif
752 };
753 if(err)
754 goto out;
755
756 if(compatible_arg)
757 err = native_ioctl(file, realcmd, (unsigned long)up);
758 else { 866 else {
759 mm_segment_t old_fs = get_fs(); 867 mm_segment_t old_fs = get_fs();
760 868
761 set_fs(KERNEL_DS); 869 set_fs(KERNEL_DS);
762 err = native_ioctl(file, realcmd, (unsigned long) &karg); 870 err = native_ioctl(file, cmd, (unsigned long)&karg);
763 set_fs(old_fs); 871 set_fs(old_fs);
764 } 872 }
765 if(err == 0) { 873
766 switch(cmd) { 874 /* Special case: even after an error we need to put the
875 results back for these ioctls since the error_idx will
876 contain information on which control failed. */
877 switch (cmd) {
878 case VIDIOC_G_EXT_CTRLS:
879 case VIDIOC_S_EXT_CTRLS:
880 case VIDIOC_TRY_EXT_CTRLS:
881 if (put_v4l2_ext_controls32(&karg.v2ecs, up))
882 err = -EFAULT;
883 break;
884 }
885 if (err)
886 return err;
887
888 switch (cmd) {
767#ifdef CONFIG_VIDEO_V4L1_COMPAT 889#ifdef CONFIG_VIDEO_V4L1_COMPAT
768 case VIDIOCGTUNER: 890 case VIDIOCGTUNER:
769 err = put_video_tuner32(&karg.vt, up); 891 err = put_video_tuner32(&karg.vt, up);
770 break; 892 break;
771 893
772 case VIDIOCGWIN: 894 case VIDIOCGWIN:
773 err = put_video_window32(&karg.vw, up); 895 err = put_video_window32(&karg.vw, up);
774 break; 896 break;
775 897
776 case VIDIOCGFBUF: 898 case VIDIOCGFBUF:
777 err = put_video_buffer32(&karg.vb, up); 899 err = put_video_buffer32(&karg.vb, up);
778 break; 900 break;
779 901
902 case VIDIOCGFREQ:
903 err = put_user(((u32)karg.vx), (u32 __user *)up);
904 break;
780#endif 905#endif
781 case VIDIOC_G_FBUF: 906 case VIDIOC_S_INPUT:
782 err = put_v4l2_framebuffer32(&karg.v2fb, up); 907 case VIDIOC_S_OUTPUT:
783 break; 908 case VIDIOC_G_INPUT:
784 909 case VIDIOC_G_OUTPUT:
785 case VIDIOC_G_FMT: 910 err = put_user(((s32)karg.vi), (s32 __user *)up);
786 case VIDIOC_S_FMT: 911 break;
787 case VIDIOC_TRY_FMT:
788 err = put_v4l2_format32(&karg.v2f, up);
789 break;
790
791 case VIDIOC_QUERYBUF:
792 case VIDIOC_QBUF:
793 case VIDIOC_DQBUF:
794 err = put_v4l2_buffer32(&karg.v2b, up);
795 break;
796
797 case VIDIOC_ENUMSTD:
798 err = put_v4l2_standard(&karg.v2s, up);
799 break;
800
801 case VIDIOC_ENUMSTD32:
802 err = put_v4l2_standard32(&karg.v2s, up);
803 break;
804
805 case VIDIOC_G_TUNER:
806 case VIDIOC_S_TUNER:
807 err = put_v4l2_tuner(&karg.v2t, up);
808 break;
809
810 case VIDIOC_ENUMINPUT:
811 err = put_v4l2_input(&karg.v2i, up);
812 break;
813
814 case VIDIOC_ENUMINPUT32:
815 err = put_v4l2_input32(&karg.v2i, up);
816 break;
817 912
818#ifdef CONFIG_VIDEO_V4L1_COMPAT 913 case VIDIOC_G_FBUF:
819 case VIDIOCGFREQ: 914 err = put_v4l2_framebuffer32(&karg.v2fb, up);
820#endif 915 break;
821 case VIDIOC_G_INPUT: 916
822 err = put_user(((u32)karg.vx), (u32 __user *)up); 917 case VIDIOC_G_FMT:
823 break; 918 case VIDIOC_S_FMT:
824 }; 919 case VIDIOC_TRY_FMT:
920 err = put_v4l2_format32(&karg.v2f, up);
921 break;
922
923 case VIDIOC_QUERYBUF:
924 case VIDIOC_QBUF:
925 case VIDIOC_DQBUF:
926 err = put_v4l2_buffer32(&karg.v2b, up);
927 break;
928
929 case VIDIOC_ENUMSTD:
930 err = put_v4l2_standard32(&karg.v2s, up);
931 break;
932
933 case VIDIOC_ENUMINPUT:
934 err = put_v4l2_input32(&karg.v2i, up);
935 break;
825 } 936 }
826out:
827 return err; 937 return err;
828} 938}
829 939
@@ -836,26 +946,48 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
836 946
837 switch (cmd) { 947 switch (cmd) {
838#ifdef CONFIG_VIDEO_V4L1_COMPAT 948#ifdef CONFIG_VIDEO_V4L1_COMPAT
839 case VIDIOCSWIN32: 949 case VIDIOCGCAP:
840 ret = do_set_window(file, cmd, arg); 950 case VIDIOCGCHAN:
841 break; 951 case VIDIOCSCHAN:
842 case VIDIOCGTUNER32: 952 case VIDIOCGTUNER32:
843 case VIDIOCSTUNER32: 953 case VIDIOCSTUNER32:
954 case VIDIOCGPICT:
955 case VIDIOCSPICT:
956 case VIDIOCCAPTURE32:
844 case VIDIOCGWIN32: 957 case VIDIOCGWIN32:
958 case VIDIOCSWIN32:
845 case VIDIOCGFBUF32: 959 case VIDIOCGFBUF32:
846 case VIDIOCSFBUF32: 960 case VIDIOCSFBUF32:
961 case VIDIOCKEY:
847 case VIDIOCGFREQ32: 962 case VIDIOCGFREQ32:
848 case VIDIOCSFREQ32: 963 case VIDIOCSFREQ32:
849 case VIDIOCGAUDIO: 964 case VIDIOCGAUDIO:
850 case VIDIOCSAUDIO: 965 case VIDIOCSAUDIO:
966 case VIDIOCSYNC32:
967 case VIDIOCMCAPTURE:
968 case VIDIOCGMBUF:
969 case VIDIOCGUNIT:
970 case VIDIOCGCAPTURE:
971 case VIDIOCSCAPTURE:
972 case VIDIOCSPLAYMODE:
973 case VIDIOCSWRITEMODE32:
974 case VIDIOCGPLAYINFO:
975 case VIDIOCSMICROCODE32:
851 case VIDIOCGVBIFMT: 976 case VIDIOCGVBIFMT:
852 case VIDIOCSVBIFMT: 977 case VIDIOCSVBIFMT:
853#endif 978#endif
979#ifdef __OLD_VIDIOC_
980 case VIDIOC_OVERLAY32_OLD:
981 case VIDIOC_S_PARM_OLD:
982 case VIDIOC_S_CTRL_OLD:
983 case VIDIOC_G_AUDIO_OLD:
984 case VIDIOC_G_AUDOUT_OLD:
985 case VIDIOC_CROPCAP_OLD:
986#endif
854 case VIDIOC_QUERYCAP: 987 case VIDIOC_QUERYCAP:
988 case VIDIOC_RESERVED:
855 case VIDIOC_ENUM_FMT: 989 case VIDIOC_ENUM_FMT:
856 case VIDIOC_G_FMT32: 990 case VIDIOC_G_FMT32:
857 case VIDIOC_CROPCAP:
858 case VIDIOC_S_CROP:
859 case VIDIOC_S_FMT32: 991 case VIDIOC_S_FMT32:
860 case VIDIOC_REQBUFS: 992 case VIDIOC_REQBUFS:
861 case VIDIOC_QUERYBUF32: 993 case VIDIOC_QUERYBUF32:
@@ -870,43 +1002,56 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
870 case VIDIOC_S_PARM: 1002 case VIDIOC_S_PARM:
871 case VIDIOC_G_STD: 1003 case VIDIOC_G_STD:
872 case VIDIOC_S_STD: 1004 case VIDIOC_S_STD:
873 case VIDIOC_G_TUNER:
874 case VIDIOC_S_TUNER:
875 case VIDIOC_ENUMSTD:
876 case VIDIOC_ENUMSTD32: 1005 case VIDIOC_ENUMSTD32:
877 case VIDIOC_ENUMINPUT:
878 case VIDIOC_ENUMINPUT32: 1006 case VIDIOC_ENUMINPUT32:
879 case VIDIOC_G_CTRL: 1007 case VIDIOC_G_CTRL:
880 case VIDIOC_S_CTRL: 1008 case VIDIOC_S_CTRL:
881 case VIDIOC_S_CTRL32: 1009 case VIDIOC_G_TUNER:
882 case VIDIOC_S_FREQUENCY: 1010 case VIDIOC_S_TUNER:
883 case VIDIOC_G_FREQUENCY: 1011 case VIDIOC_G_AUDIO:
1012 case VIDIOC_S_AUDIO:
884 case VIDIOC_QUERYCTRL: 1013 case VIDIOC_QUERYCTRL:
1014 case VIDIOC_QUERYMENU:
885 case VIDIOC_G_INPUT32: 1015 case VIDIOC_G_INPUT32:
886 case VIDIOC_S_INPUT32: 1016 case VIDIOC_S_INPUT32:
1017 case VIDIOC_G_OUTPUT32:
1018 case VIDIOC_S_OUTPUT32:
1019 case VIDIOC_ENUMOUTPUT:
1020 case VIDIOC_G_AUDOUT:
1021 case VIDIOC_S_AUDOUT:
1022 case VIDIOC_G_MODULATOR:
1023 case VIDIOC_S_MODULATOR:
1024 case VIDIOC_S_FREQUENCY:
1025 case VIDIOC_G_FREQUENCY:
1026 case VIDIOC_CROPCAP:
1027 case VIDIOC_G_CROP:
1028 case VIDIOC_S_CROP:
1029 case VIDIOC_G_JPEGCOMP:
1030 case VIDIOC_S_JPEGCOMP:
1031 case VIDIOC_QUERYSTD:
887 case VIDIOC_TRY_FMT32: 1032 case VIDIOC_TRY_FMT32:
888 case VIDIOC_S_HW_FREQ_SEEK: 1033 case VIDIOC_ENUMAUDIO:
1034 case VIDIOC_ENUMAUDOUT:
1035 case VIDIOC_G_PRIORITY:
1036 case VIDIOC_S_PRIORITY:
1037 case VIDIOC_G_SLICED_VBI_CAP:
1038 case VIDIOC_LOG_STATUS:
1039 case VIDIOC_G_EXT_CTRLS32:
1040 case VIDIOC_S_EXT_CTRLS32:
1041 case VIDIOC_TRY_EXT_CTRLS32:
889 case VIDIOC_ENUM_FRAMESIZES: 1042 case VIDIOC_ENUM_FRAMESIZES:
890 case VIDIOC_ENUM_FRAMEINTERVALS: 1043 case VIDIOC_ENUM_FRAMEINTERVALS:
1044 case VIDIOC_G_ENC_INDEX:
1045 case VIDIOC_ENCODER_CMD:
1046 case VIDIOC_TRY_ENCODER_CMD:
1047 case VIDIOC_DBG_S_REGISTER:
1048 case VIDIOC_DBG_G_REGISTER:
1049 case VIDIOC_G_CHIP_IDENT:
1050 case VIDIOC_S_HW_FREQ_SEEK:
891 ret = do_video_ioctl(file, cmd, arg); 1051 ret = do_video_ioctl(file, cmd, arg);
892 break; 1052 break;
893 1053
894#ifdef CONFIG_VIDEO_V4L1_COMPAT 1054#ifdef CONFIG_VIDEO_V4L1_COMPAT
895 /* Little v, the video4linux ioctls (conflict?) */
896 case VIDIOCGCAP:
897 case VIDIOCGCHAN:
898 case VIDIOCSCHAN:
899 case VIDIOCGPICT:
900 case VIDIOCSPICT:
901 case VIDIOCCAPTURE:
902 case VIDIOCKEY:
903 case VIDIOCSYNC:
904 case VIDIOCMCAPTURE:
905 case VIDIOCGMBUF:
906 case VIDIOCGUNIT:
907 case VIDIOCGCAPTURE:
908 case VIDIOCSCAPTURE:
909
910 /* BTTV specific... */ 1055 /* BTTV specific... */
911 case _IOW('v', BASE_VIDIOCPRIVATE+0, char [256]): 1056 case _IOW('v', BASE_VIDIOCPRIVATE+0, char [256]):
912 case _IOR('v', BASE_VIDIOCPRIVATE+1, char [256]): 1057 case _IOR('v', BASE_VIDIOCPRIVATE+1, char [256]):
@@ -921,6 +1066,8 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
921#endif 1066#endif
922 default: 1067 default:
923 v4l_print_ioctl("compat_ioctl32", cmd); 1068 v4l_print_ioctl("compat_ioctl32", cmd);
1069 printk(KERN_CONT "\n");
1070 break;
924 } 1071 }
925 return ret; 1072 return ret;
926} 1073}
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index ccd6566a515e..7ad6711ee327 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -9,7 +9,7 @@
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * Authors: Alan Cox, <alan@redhat.com> (version 1) 12 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
13 * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2) 13 * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
14 * 14 *
15 * Fixes: 20000516 Claudio Matsuoka <claudio@conectiva.com> 15 * Fixes: 20000516 Claudio Matsuoka <claudio@conectiva.com>
@@ -30,6 +30,7 @@
30#include <asm/system.h> 30#include <asm/system.h>
31 31
32#include <media/v4l2-common.h> 32#include <media/v4l2-common.h>
33#include <media/v4l2-device.h>
33 34
34#define VIDEO_NUM_DEVICES 256 35#define VIDEO_NUM_DEVICES 256
35#define VIDEO_NAME "video4linux" 36#define VIDEO_NAME "video4linux"
@@ -41,17 +42,17 @@
41static ssize_t show_index(struct device *cd, 42static ssize_t show_index(struct device *cd,
42 struct device_attribute *attr, char *buf) 43 struct device_attribute *attr, char *buf)
43{ 44{
44 struct video_device *vfd = container_of(cd, struct video_device, dev); 45 struct video_device *vdev = to_video_device(cd);
45 46
46 return sprintf(buf, "%i\n", vfd->index); 47 return sprintf(buf, "%i\n", vdev->index);
47} 48}
48 49
49static ssize_t show_name(struct device *cd, 50static ssize_t show_name(struct device *cd,
50 struct device_attribute *attr, char *buf) 51 struct device_attribute *attr, char *buf)
51{ 52{
52 struct video_device *vfd = container_of(cd, struct video_device, dev); 53 struct video_device *vdev = to_video_device(cd);
53 54
54 return sprintf(buf, "%.*s\n", (int)sizeof(vfd->name), vfd->name); 55 return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name);
55} 56}
56 57
57static struct device_attribute video_device_attrs[] = { 58static struct device_attribute video_device_attrs[] = {
@@ -73,64 +74,64 @@ struct video_device *video_device_alloc(void)
73} 74}
74EXPORT_SYMBOL(video_device_alloc); 75EXPORT_SYMBOL(video_device_alloc);
75 76
76void video_device_release(struct video_device *vfd) 77void video_device_release(struct video_device *vdev)
77{ 78{
78 kfree(vfd); 79 kfree(vdev);
79} 80}
80EXPORT_SYMBOL(video_device_release); 81EXPORT_SYMBOL(video_device_release);
81 82
82void video_device_release_empty(struct video_device *vfd) 83void video_device_release_empty(struct video_device *vdev)
83{ 84{
84 /* Do nothing */ 85 /* Do nothing */
85 /* Only valid when the video_device struct is a static. */ 86 /* Only valid when the video_device struct is a static. */
86} 87}
87EXPORT_SYMBOL(video_device_release_empty); 88EXPORT_SYMBOL(video_device_release_empty);
88 89
89/* Called when the last user of the character device is gone. */ 90static inline void video_get(struct video_device *vdev)
90static void v4l2_chardev_release(struct kobject *kobj)
91{ 91{
92 struct video_device *vfd = container_of(kobj, struct video_device, cdev.kobj); 92 get_device(&vdev->dev);
93}
94
95static inline void video_put(struct video_device *vdev)
96{
97 put_device(&vdev->dev);
98}
99
100/* Called when the last user of the video device exits. */
101static void v4l2_device_release(struct device *cd)
102{
103 struct video_device *vdev = to_video_device(cd);
93 104
94 mutex_lock(&videodev_lock); 105 mutex_lock(&videodev_lock);
95 if (video_device[vfd->minor] != vfd) { 106 if (video_device[vdev->minor] != vdev) {
96 mutex_unlock(&videodev_lock); 107 mutex_unlock(&videodev_lock);
97 BUG(); 108 /* should not happen */
109 WARN_ON(1);
98 return; 110 return;
99 } 111 }
100 112
101 /* Free up this device for reuse */ 113 /* Free up this device for reuse */
102 video_device[vfd->minor] = NULL; 114 video_device[vdev->minor] = NULL;
103 clear_bit(vfd->num, video_nums[vfd->vfl_type]);
104 mutex_unlock(&videodev_lock);
105 115
106 /* Release the character device */ 116 /* Delete the cdev on this minor as well */
107 vfd->cdev_release(kobj); 117 cdev_del(vdev->cdev);
108 /* Release video_device and perform other 118 /* Just in case some driver tries to access this from
109 cleanups as needed. */ 119 the release() callback. */
110 if (vfd->release) 120 vdev->cdev = NULL;
111 vfd->release(vfd);
112}
113 121
114/* The new kobj_type for the character device */ 122 /* Mark minor as free */
115static struct kobj_type v4l2_ktype_cdev_default = { 123 clear_bit(vdev->num, video_nums[vdev->vfl_type]);
116 .release = v4l2_chardev_release,
117};
118 124
119static void video_release(struct device *cd) 125 mutex_unlock(&videodev_lock);
120{
121 struct video_device *vfd = container_of(cd, struct video_device, dev);
122 126
123 /* It's now safe to delete the char device. 127 /* Release video_device and perform other
124 This will either trigger the v4l2_chardev_release immediately (if 128 cleanups as needed. */
125 the refcount goes to 0) or later when the last user of the 129 vdev->release(vdev);
126 character device closes it. */
127 cdev_del(&vfd->cdev);
128} 130}
129 131
130static struct class video_class = { 132static struct class video_class = {
131 .name = VIDEO_NAME, 133 .name = VIDEO_NAME,
132 .dev_attrs = video_device_attrs, 134 .dev_attrs = video_device_attrs,
133 .dev_release = video_release,
134}; 135};
135 136
136struct video_device *video_devdata(struct file *file) 137struct video_device *video_devdata(struct file *file)
@@ -139,13 +140,163 @@ struct video_device *video_devdata(struct file *file)
139} 140}
140EXPORT_SYMBOL(video_devdata); 141EXPORT_SYMBOL(video_devdata);
141 142
143static ssize_t v4l2_read(struct file *filp, char __user *buf,
144 size_t sz, loff_t *off)
145{
146 struct video_device *vdev = video_devdata(filp);
147
148 if (!vdev->fops->read)
149 return -EINVAL;
150 if (video_is_unregistered(vdev))
151 return -EIO;
152 return vdev->fops->read(filp, buf, sz, off);
153}
154
155static ssize_t v4l2_write(struct file *filp, const char __user *buf,
156 size_t sz, loff_t *off)
157{
158 struct video_device *vdev = video_devdata(filp);
159
160 if (!vdev->fops->write)
161 return -EINVAL;
162 if (video_is_unregistered(vdev))
163 return -EIO;
164 return vdev->fops->write(filp, buf, sz, off);
165}
166
167static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
168{
169 struct video_device *vdev = video_devdata(filp);
170
171 if (!vdev->fops->poll || video_is_unregistered(vdev))
172 return DEFAULT_POLLMASK;
173 return vdev->fops->poll(filp, poll);
174}
175
176static int v4l2_ioctl(struct inode *inode, struct file *filp,
177 unsigned int cmd, unsigned long arg)
178{
179 struct video_device *vdev = video_devdata(filp);
180
181 if (!vdev->fops->ioctl)
182 return -ENOTTY;
183 /* Allow ioctl to continue even if the device was unregistered.
184 Things like dequeueing buffers might still be useful. */
185 return vdev->fops->ioctl(inode, filp, cmd, arg);
186}
187
188static long v4l2_unlocked_ioctl(struct file *filp,
189 unsigned int cmd, unsigned long arg)
190{
191 struct video_device *vdev = video_devdata(filp);
192
193 if (!vdev->fops->unlocked_ioctl)
194 return -ENOTTY;
195 /* Allow ioctl to continue even if the device was unregistered.
196 Things like dequeueing buffers might still be useful. */
197 return vdev->fops->unlocked_ioctl(filp, cmd, arg);
198}
199
200#ifdef CONFIG_COMPAT
201static long v4l2_compat_ioctl(struct file *filp,
202 unsigned int cmd, unsigned long arg)
203{
204 struct video_device *vdev = video_devdata(filp);
205
206 if (!vdev->fops->compat_ioctl)
207 return -ENOIOCTLCMD;
208 /* Allow ioctl to continue even if the device was unregistered.
209 Things like dequeueing buffers might still be useful. */
210 return vdev->fops->compat_ioctl(filp, cmd, arg);
211}
212#endif
213
214static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
215{
216 struct video_device *vdev = video_devdata(filp);
217
218 if (!vdev->fops->mmap ||
219 video_is_unregistered(vdev))
220 return -ENODEV;
221 return vdev->fops->mmap(filp, vm);
222}
223
224/* Override for the open function */
225static int v4l2_open(struct inode *inode, struct file *filp)
226{
227 struct video_device *vdev;
228 int ret;
229
230 /* Check if the video device is available */
231 mutex_lock(&videodev_lock);
232 vdev = video_devdata(filp);
233 /* return ENODEV if the video device has been removed
234 already or if it is not registered anymore. */
235 if (vdev == NULL || video_is_unregistered(vdev)) {
236 mutex_unlock(&videodev_lock);
237 return -ENODEV;
238 }
239 /* and increase the device refcount */
240 video_get(vdev);
241 mutex_unlock(&videodev_lock);
242 ret = vdev->fops->open(inode, filp);
243 /* decrease the refcount in case of an error */
244 if (ret)
245 video_put(vdev);
246 return ret;
247}
248
249/* Override for the release function */
250static int v4l2_release(struct inode *inode, struct file *filp)
251{
252 struct video_device *vdev = video_devdata(filp);
253 int ret = vdev->fops->release(inode, filp);
254
255 /* decrease the refcount unconditionally since the release()
256 return value is ignored. */
257 video_put(vdev);
258 return ret;
259}
260
261static const struct file_operations v4l2_unlocked_fops = {
262 .owner = THIS_MODULE,
263 .read = v4l2_read,
264 .write = v4l2_write,
265 .open = v4l2_open,
266 .mmap = v4l2_mmap,
267 .unlocked_ioctl = v4l2_unlocked_ioctl,
268#ifdef CONFIG_COMPAT
269 .compat_ioctl = v4l2_compat_ioctl,
270#endif
271 .release = v4l2_release,
272 .poll = v4l2_poll,
273 .llseek = no_llseek,
274};
275
276static const struct file_operations v4l2_fops = {
277 .owner = THIS_MODULE,
278 .read = v4l2_read,
279 .write = v4l2_write,
280 .open = v4l2_open,
281 .mmap = v4l2_mmap,
282 .ioctl = v4l2_ioctl,
283#ifdef CONFIG_COMPAT
284 .compat_ioctl = v4l2_compat_ioctl,
285#endif
286 .release = v4l2_release,
287 .poll = v4l2_poll,
288 .llseek = no_llseek,
289};
290
142/** 291/**
143 * get_index - assign stream number based on parent device 292 * get_index - assign stream number based on parent device
144 * @vdev: video_device to assign index number to, vdev->dev should be assigned 293 * @vdev: video_device to assign index number to, vdev->parent should be assigned
145 * @num: -1 if auto assign, requested number otherwise 294 * @num: -1 if auto assign, requested number otherwise
146 * 295 *
296 * Note that when this is called the new device has not yet been registered
297 * in the video_device array.
147 * 298 *
148 * returns -ENFILE if num is already in use, a free index number if 299 * Returns -ENFILE if num is already in use, a free index number if
149 * successful. 300 * successful.
150 */ 301 */
151static int get_index(struct video_device *vdev, int num) 302static int get_index(struct video_device *vdev, int num)
@@ -162,9 +313,12 @@ static int get_index(struct video_device *vdev, int num)
162 return -EINVAL; 313 return -EINVAL;
163 } 314 }
164 315
316 /* Some drivers do not set the parent. In that case always return 0. */
317 if (vdev->parent == NULL)
318 return 0;
319
165 for (i = 0; i < VIDEO_NUM_DEVICES; i++) { 320 for (i = 0; i < VIDEO_NUM_DEVICES; i++) {
166 if (video_device[i] != NULL && 321 if (video_device[i] != NULL &&
167 video_device[i] != vdev &&
168 video_device[i]->parent == vdev->parent) { 322 video_device[i]->parent == vdev->parent) {
169 used |= 1 << video_device[i]->index; 323 used |= 1 << video_device[i]->index;
170 } 324 }
@@ -180,17 +334,15 @@ static int get_index(struct video_device *vdev, int num)
180 return i > max_index ? -ENFILE : i; 334 return i > max_index ? -ENFILE : i;
181} 335}
182 336
183static const struct file_operations video_fops; 337int video_register_device(struct video_device *vdev, int type, int nr)
184
185int video_register_device(struct video_device *vfd, int type, int nr)
186{ 338{
187 return video_register_device_index(vfd, type, nr, -1); 339 return video_register_device_index(vdev, type, nr, -1);
188} 340}
189EXPORT_SYMBOL(video_register_device); 341EXPORT_SYMBOL(video_register_device);
190 342
191/** 343/**
192 * video_register_device_index - register video4linux devices 344 * video_register_device_index - register video4linux devices
193 * @vfd: video device structure we want to register 345 * @vdev: video device structure we want to register
194 * @type: type of device to register 346 * @type: type of device to register
195 * @nr: which device number (0 == /dev/video0, 1 == /dev/video1, ... 347 * @nr: which device number (0 == /dev/video0, 1 == /dev/video1, ...
196 * -1 == first free) 348 * -1 == first free)
@@ -214,8 +366,7 @@ EXPORT_SYMBOL(video_register_device);
214 * 366 *
215 * %VFL_TYPE_RADIO - A radio card 367 * %VFL_TYPE_RADIO - A radio card
216 */ 368 */
217 369int video_register_device_index(struct video_device *vdev, int type, int nr,
218int video_register_device_index(struct video_device *vfd, int type, int nr,
219 int index) 370 int index)
220{ 371{
221 int i = 0; 372 int i = 0;
@@ -223,14 +374,19 @@ int video_register_device_index(struct video_device *vfd, int type, int nr,
223 int minor_offset = 0; 374 int minor_offset = 0;
224 int minor_cnt = VIDEO_NUM_DEVICES; 375 int minor_cnt = VIDEO_NUM_DEVICES;
225 const char *name_base; 376 const char *name_base;
226 void *priv = video_get_drvdata(vfd); 377 void *priv = video_get_drvdata(vdev);
227 378
228 /* the release callback MUST be present */ 379 /* A minor value of -1 marks this video device as never
229 BUG_ON(!vfd->release); 380 having been registered */
381 if (vdev)
382 vdev->minor = -1;
230 383
231 if (vfd == NULL) 384 /* the release callback MUST be present */
385 WARN_ON(!vdev || !vdev->release);
386 if (!vdev || !vdev->release)
232 return -EINVAL; 387 return -EINVAL;
233 388
389 /* Part 1: check device type */
234 switch (type) { 390 switch (type) {
235 case VFL_TYPE_GRABBER: 391 case VFL_TYPE_GRABBER:
236 name_base = "video"; 392 name_base = "video";
@@ -250,8 +406,12 @@ int video_register_device_index(struct video_device *vfd, int type, int nr,
250 return -EINVAL; 406 return -EINVAL;
251 } 407 }
252 408
253 vfd->vfl_type = type; 409 vdev->vfl_type = type;
410 vdev->cdev = NULL;
411 if (vdev->v4l2_dev)
412 vdev->parent = vdev->v4l2_dev->dev;
254 413
414 /* Part 2: find a free minor, kernel number and device index. */
255#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES 415#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
256 /* Keep the ranges for the first four types for historical 416 /* Keep the ranges for the first four types for historical
257 * reasons. 417 * reasons.
@@ -282,10 +442,7 @@ int video_register_device_index(struct video_device *vfd, int type, int nr,
282 } 442 }
283#endif 443#endif
284 444
285 /* Initialize the character device */ 445 /* Pick a minor number */
286 cdev_init(&vfd->cdev, vfd->fops);
287 vfd->cdev.owner = vfd->fops->owner;
288 /* pick a minor number */
289 mutex_lock(&videodev_lock); 446 mutex_lock(&videodev_lock);
290 nr = find_next_zero_bit(video_nums[type], minor_cnt, nr == -1 ? 0 : nr); 447 nr = find_next_zero_bit(video_nums[type], minor_cnt, nr == -1 ? 0 : nr);
291 if (nr == minor_cnt) 448 if (nr == minor_cnt)
@@ -309,72 +466,92 @@ int video_register_device_index(struct video_device *vfd, int type, int nr,
309 return -ENFILE; 466 return -ENFILE;
310 } 467 }
311#endif 468#endif
312 vfd->minor = i + minor_offset; 469 vdev->minor = i + minor_offset;
313 vfd->num = nr; 470 vdev->num = nr;
314 set_bit(nr, video_nums[type]); 471 set_bit(nr, video_nums[type]);
315 BUG_ON(video_device[vfd->minor]); 472 /* Should not happen since we thought this minor was free */
316 video_device[vfd->minor] = vfd; 473 WARN_ON(video_device[vdev->minor] != NULL);
317 474 ret = vdev->index = get_index(vdev, index);
318 ret = get_index(vfd, index);
319 vfd->index = ret;
320
321 mutex_unlock(&videodev_lock); 475 mutex_unlock(&videodev_lock);
322 476
323 if (ret < 0) { 477 if (ret < 0) {
324 printk(KERN_ERR "%s: get_index failed\n", __func__); 478 printk(KERN_ERR "%s: get_index failed\n", __func__);
325 goto fail_minor; 479 goto cleanup;
326 } 480 }
327 481
328 ret = cdev_add(&vfd->cdev, MKDEV(VIDEO_MAJOR, vfd->minor), 1); 482 /* Part 3: Initialize the character device */
483 vdev->cdev = cdev_alloc();
484 if (vdev->cdev == NULL) {
485 ret = -ENOMEM;
486 goto cleanup;
487 }
488 if (vdev->fops->unlocked_ioctl)
489 vdev->cdev->ops = &v4l2_unlocked_fops;
490 else
491 vdev->cdev->ops = &v4l2_fops;
492 vdev->cdev->owner = vdev->fops->owner;
493 ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1);
329 if (ret < 0) { 494 if (ret < 0) {
330 printk(KERN_ERR "%s: cdev_add failed\n", __func__); 495 printk(KERN_ERR "%s: cdev_add failed\n", __func__);
331 goto fail_minor; 496 kfree(vdev->cdev);
497 vdev->cdev = NULL;
498 goto cleanup;
332 } 499 }
333 /* sysfs class */ 500
334 memset(&vfd->dev, 0, sizeof(vfd->dev)); 501 /* Part 4: register the device with sysfs */
502 memset(&vdev->dev, 0, sizeof(vdev->dev));
335 /* The memset above cleared the device's drvdata, so 503 /* The memset above cleared the device's drvdata, so
336 put back the copy we made earlier. */ 504 put back the copy we made earlier. */
337 video_set_drvdata(vfd, priv); 505 video_set_drvdata(vdev, priv);
338 vfd->dev.class = &video_class; 506 vdev->dev.class = &video_class;
339 vfd->dev.devt = MKDEV(VIDEO_MAJOR, vfd->minor); 507 vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
340 if (vfd->parent) 508 if (vdev->parent)
341 vfd->dev.parent = vfd->parent; 509 vdev->dev.parent = vdev->parent;
342 sprintf(vfd->dev.bus_id, "%s%d", name_base, nr); 510 dev_set_name(&vdev->dev, "%s%d", name_base, nr);
343 ret = device_register(&vfd->dev); 511 ret = device_register(&vdev->dev);
344 if (ret < 0) { 512 if (ret < 0) {
345 printk(KERN_ERR "%s: device_register failed\n", __func__); 513 printk(KERN_ERR "%s: device_register failed\n", __func__);
346 goto del_cdev; 514 goto cleanup;
347 } 515 }
348 /* Remember the cdev's release function */ 516 /* Register the release callback that will be called when the last
349 vfd->cdev_release = vfd->cdev.kobj.ktype->release; 517 reference to the device goes away. */
350 /* Install our own */ 518 vdev->dev.release = v4l2_device_release;
351 vfd->cdev.kobj.ktype = &v4l2_ktype_cdev_default;
352 return 0;
353 519
354del_cdev: 520 /* Part 5: Activate this minor. The char device can now be used. */
355 cdev_del(&vfd->cdev); 521 mutex_lock(&videodev_lock);
522 video_device[vdev->minor] = vdev;
523 mutex_unlock(&videodev_lock);
524 return 0;
356 525
357fail_minor: 526cleanup:
358 mutex_lock(&videodev_lock); 527 mutex_lock(&videodev_lock);
359 video_device[vfd->minor] = NULL; 528 if (vdev->cdev)
360 clear_bit(vfd->num, video_nums[type]); 529 cdev_del(vdev->cdev);
530 clear_bit(vdev->num, video_nums[type]);
361 mutex_unlock(&videodev_lock); 531 mutex_unlock(&videodev_lock);
362 vfd->minor = -1; 532 /* Mark this video device as never having been registered. */
533 vdev->minor = -1;
363 return ret; 534 return ret;
364} 535}
365EXPORT_SYMBOL(video_register_device_index); 536EXPORT_SYMBOL(video_register_device_index);
366 537
367/** 538/**
368 * video_unregister_device - unregister a video4linux device 539 * video_unregister_device - unregister a video4linux device
369 * @vfd: the device to unregister 540 * @vdev: the device to unregister
370 * 541 *
371 * This unregisters the passed device and deassigns the minor 542 * This unregisters the passed device. Future open calls will
372 * number. Future open calls will be met with errors. 543 * be met with errors.
373 */ 544 */
374 545void video_unregister_device(struct video_device *vdev)
375void video_unregister_device(struct video_device *vfd)
376{ 546{
377 device_unregister(&vfd->dev); 547 /* Check if vdev was ever registered at all */
548 if (!vdev || vdev->minor < 0)
549 return;
550
551 mutex_lock(&videodev_lock);
552 set_bit(V4L2_FL_UNREGISTERED, &vdev->flags);
553 mutex_unlock(&videodev_lock);
554 device_unregister(&vdev->dev);
378} 555}
379EXPORT_SYMBOL(video_unregister_device); 556EXPORT_SYMBOL(video_unregister_device);
380 557
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
new file mode 100644
index 000000000000..9eefde031597
--- /dev/null
+++ b/drivers/media/video/v4l2-device.c
@@ -0,0 +1,86 @@
1/*
2 V4L2 device support.
3
4 Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/types.h>
22#include <linux/ioctl.h>
23#include <linux/i2c.h>
24#include <linux/videodev2.h>
25#include <media/v4l2-device.h>
26
27int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
28{
29 if (dev == NULL || v4l2_dev == NULL)
30 return -EINVAL;
31 /* Warn if we apparently re-register a device */
32 WARN_ON(dev_get_drvdata(dev));
33 INIT_LIST_HEAD(&v4l2_dev->subdevs);
34 spin_lock_init(&v4l2_dev->lock);
35 v4l2_dev->dev = dev;
36 snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s %s",
37 dev->driver->name, dev->bus_id);
38 dev_set_drvdata(dev, v4l2_dev);
39 return 0;
40}
41EXPORT_SYMBOL_GPL(v4l2_device_register);
42
43void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
44{
45 struct v4l2_subdev *sd, *next;
46
47 if (v4l2_dev == NULL || v4l2_dev->dev == NULL)
48 return;
49 dev_set_drvdata(v4l2_dev->dev, NULL);
50 /* unregister subdevs */
51 list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list)
52 v4l2_device_unregister_subdev(sd);
53
54 v4l2_dev->dev = NULL;
55}
56EXPORT_SYMBOL_GPL(v4l2_device_unregister);
57
58int v4l2_device_register_subdev(struct v4l2_device *dev, struct v4l2_subdev *sd)
59{
60 /* Check for valid input */
61 if (dev == NULL || sd == NULL || !sd->name[0])
62 return -EINVAL;
63 /* Warn if we apparently re-register a subdev */
64 WARN_ON(sd->dev);
65 if (!try_module_get(sd->owner))
66 return -ENODEV;
67 sd->dev = dev;
68 spin_lock(&dev->lock);
69 list_add_tail(&sd->list, &dev->subdevs);
70 spin_unlock(&dev->lock);
71 return 0;
72}
73EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
74
75void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
76{
77 /* return if it isn't registered */
78 if (sd == NULL || sd->dev == NULL)
79 return;
80 spin_lock(&sd->dev->lock);
81 list_del(&sd->list);
82 spin_unlock(&sd->dev->lock);
83 sd->dev = NULL;
84 module_put(sd->owner);
85}
86EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 710e1a40c422..b063381f4b3b 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -8,7 +8,7 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * Authors: Alan Cox, <alan@redhat.com> (version 1) 11 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
12 * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2) 12 * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
13 */ 13 */
14 14
@@ -393,10 +393,8 @@ video_fix_command(unsigned int cmd)
393 * Obsolete usercopy function - Should be removed soon 393 * Obsolete usercopy function - Should be removed soon
394 */ 394 */
395int 395int
396video_usercopy(struct inode *inode, struct file *file, 396video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
397 unsigned int cmd, unsigned long arg, 397 v4l2_kioctl func)
398 int (*func)(struct inode *inode, struct file *file,
399 unsigned int cmd, void *arg))
400{ 398{
401 char sbuf[128]; 399 char sbuf[128];
402 void *mbuf = NULL; 400 void *mbuf = NULL;
@@ -458,7 +456,7 @@ video_usercopy(struct inode *inode, struct file *file,
458 } 456 }
459 457
460 /* call driver */ 458 /* call driver */
461 err = func(inode, file, cmd, parg); 459 err = func(file, cmd, parg);
462 if (err == -ENOIOCTLCMD) 460 if (err == -ENOIOCTLCMD)
463 err = -EINVAL; 461 err = -EINVAL;
464 if (is_ext_ctrl) { 462 if (is_ext_ctrl) {
@@ -1481,9 +1479,15 @@ static int __video_do_ioctl(struct file *file,
1481 case VIDIOC_G_CROP: 1479 case VIDIOC_G_CROP:
1482 { 1480 {
1483 struct v4l2_crop *p = arg; 1481 struct v4l2_crop *p = arg;
1482 __u32 type;
1484 1483
1485 if (!ops->vidioc_g_crop) 1484 if (!ops->vidioc_g_crop)
1486 break; 1485 break;
1486
1487 type = p->type;
1488 memset(p, 0, sizeof(*p));
1489 p->type = type;
1490
1487 dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names)); 1491 dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
1488 ret = ops->vidioc_g_crop(file, fh, p); 1492 ret = ops->vidioc_g_crop(file, fh, p);
1489 if (!ret) 1493 if (!ret)
@@ -1504,10 +1508,16 @@ static int __video_do_ioctl(struct file *file,
1504 case VIDIOC_CROPCAP: 1508 case VIDIOC_CROPCAP:
1505 { 1509 {
1506 struct v4l2_cropcap *p = arg; 1510 struct v4l2_cropcap *p = arg;
1511 __u32 type;
1507 1512
1508 /*FIXME: Should also show v4l2_fract pixelaspect */ 1513 /*FIXME: Should also show v4l2_fract pixelaspect */
1509 if (!ops->vidioc_cropcap) 1514 if (!ops->vidioc_cropcap)
1510 break; 1515 break;
1516
1517 type = p->type;
1518 memset(p, 0, sizeof(*p));
1519 p->type = type;
1520
1511 dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names)); 1521 dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
1512 ret = ops->vidioc_cropcap(file, fh, p); 1522 ret = ops->vidioc_cropcap(file, fh, p);
1513 if (!ret) { 1523 if (!ret) {
@@ -1522,6 +1532,9 @@ static int __video_do_ioctl(struct file *file,
1522 1532
1523 if (!ops->vidioc_g_jpegcomp) 1533 if (!ops->vidioc_g_jpegcomp)
1524 break; 1534 break;
1535
1536 memset(p, 0, sizeof(*p));
1537
1525 ret = ops->vidioc_g_jpegcomp(file, fh, p); 1538 ret = ops->vidioc_g_jpegcomp(file, fh, p);
1526 if (!ret) 1539 if (!ret)
1527 dbgarg(cmd, "quality=%d, APPn=%d, " 1540 dbgarg(cmd, "quality=%d, APPn=%d, "
@@ -1749,6 +1762,77 @@ static int __video_do_ioctl(struct file *file,
1749 ret = ops->vidioc_s_hw_freq_seek(file, fh, p); 1762 ret = ops->vidioc_s_hw_freq_seek(file, fh, p);
1750 break; 1763 break;
1751 } 1764 }
1765 case VIDIOC_ENUM_FRAMESIZES:
1766 {
1767 struct v4l2_frmsizeenum *p = arg;
1768
1769 if (!ops->vidioc_enum_framesizes)
1770 break;
1771
1772 memset(p, 0, sizeof(*p));
1773
1774 ret = ops->vidioc_enum_framesizes(file, fh, p);
1775 dbgarg(cmd,
1776 "index=%d, pixelformat=%d, type=%d ",
1777 p->index, p->pixel_format, p->type);
1778 switch (p->type) {
1779 case V4L2_FRMSIZE_TYPE_DISCRETE:
1780 dbgarg2("width = %d, height=%d\n",
1781 p->discrete.width, p->discrete.height);
1782 break;
1783 case V4L2_FRMSIZE_TYPE_STEPWISE:
1784 dbgarg2("min %dx%d, max %dx%d, step %dx%d\n",
1785 p->stepwise.min_width, p->stepwise.min_height,
1786 p->stepwise.step_width, p->stepwise.step_height,
1787 p->stepwise.max_width, p->stepwise.max_height);
1788 break;
1789 case V4L2_FRMSIZE_TYPE_CONTINUOUS:
1790 dbgarg2("continuous\n");
1791 break;
1792 default:
1793 dbgarg2("- Unknown type!\n");
1794 }
1795
1796 break;
1797 }
1798 case VIDIOC_ENUM_FRAMEINTERVALS:
1799 {
1800 struct v4l2_frmivalenum *p = arg;
1801
1802 if (!ops->vidioc_enum_frameintervals)
1803 break;
1804
1805 memset(p, 0, sizeof(*p));
1806
1807 ret = ops->vidioc_enum_frameintervals(file, fh, p);
1808 dbgarg(cmd,
1809 "index=%d, pixelformat=%d, width=%d, height=%d, type=%d ",
1810 p->index, p->pixel_format,
1811 p->width, p->height, p->type);
1812 switch (p->type) {
1813 case V4L2_FRMIVAL_TYPE_DISCRETE:
1814 dbgarg2("fps=%d/%d\n",
1815 p->discrete.numerator,
1816 p->discrete.denominator);
1817 break;
1818 case V4L2_FRMIVAL_TYPE_STEPWISE:
1819 dbgarg2("min=%d/%d, max=%d/%d, step=%d/%d\n",
1820 p->stepwise.min.numerator,
1821 p->stepwise.min.denominator,
1822 p->stepwise.max.numerator,
1823 p->stepwise.max.denominator,
1824 p->stepwise.step.numerator,
1825 p->stepwise.step.denominator);
1826 break;
1827 case V4L2_FRMIVAL_TYPE_CONTINUOUS:
1828 dbgarg2("continuous\n");
1829 break;
1830 default:
1831 dbgarg2("- Unknown type!\n");
1832 }
1833 break;
1834 }
1835
1752 default: 1836 default:
1753 { 1837 {
1754 if (!ops->vidioc_default) 1838 if (!ops->vidioc_default)
@@ -1768,7 +1852,7 @@ static int __video_do_ioctl(struct file *file,
1768 return ret; 1852 return ret;
1769} 1853}
1770 1854
1771int __video_ioctl2(struct file *file, 1855long __video_ioctl2(struct file *file,
1772 unsigned int cmd, unsigned long arg) 1856 unsigned int cmd, unsigned long arg)
1773{ 1857{
1774 char sbuf[128]; 1858 char sbuf[128];
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
new file mode 100644
index 000000000000..e3612f29d0df
--- /dev/null
+++ b/drivers/media/video/v4l2-subdev.c
@@ -0,0 +1,110 @@
1/*
2 V4L2 sub-device support.
3
4 Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/types.h>
22#include <linux/ioctl.h>
23#include <linux/i2c.h>
24#include <linux/videodev2.h>
25#include <media/v4l2-subdev.h>
26
27int v4l2_subdev_command(struct v4l2_subdev *sd, unsigned cmd, void *arg)
28{
29 switch (cmd) {
30 case VIDIOC_QUERYCTRL:
31 return v4l2_subdev_call(sd, core, querymenu, arg);
32 case VIDIOC_G_CTRL:
33 return v4l2_subdev_call(sd, core, g_ctrl, arg);
34 case VIDIOC_S_CTRL:
35 return v4l2_subdev_call(sd, core, s_ctrl, arg);
36 case VIDIOC_QUERYMENU:
37 return v4l2_subdev_call(sd, core, queryctrl, arg);
38 case VIDIOC_LOG_STATUS:
39 return v4l2_subdev_call(sd, core, log_status);
40 case VIDIOC_G_CHIP_IDENT:
41 return v4l2_subdev_call(sd, core, g_chip_ident, arg);
42 case VIDIOC_INT_S_STANDBY:
43 return v4l2_subdev_call(sd, core, s_standby, arg ? (*(u32 *)arg) : 0);
44 case VIDIOC_INT_RESET:
45 return v4l2_subdev_call(sd, core, reset, arg ? (*(u32 *)arg) : 0);
46 case VIDIOC_INT_S_GPIO:
47 return v4l2_subdev_call(sd, core, s_gpio, arg ? (*(u32 *)arg) : 0);
48 case VIDIOC_INT_INIT:
49 return v4l2_subdev_call(sd, core, init, arg ? (*(u32 *)arg) : 0);
50#ifdef CONFIG_VIDEO_ADV_DEBUG
51 case VIDIOC_DBG_G_REGISTER:
52 return v4l2_subdev_call(sd, core, g_register, arg);
53 case VIDIOC_DBG_S_REGISTER:
54 return v4l2_subdev_call(sd, core, s_register, arg);
55#endif
56
57 case VIDIOC_INT_S_TUNER_MODE:
58 return v4l2_subdev_call(sd, tuner, s_mode, *(enum v4l2_tuner_type *)arg);
59 case AUDC_SET_RADIO:
60 return v4l2_subdev_call(sd, tuner, s_radio);
61 case VIDIOC_S_TUNER:
62 return v4l2_subdev_call(sd, tuner, s_tuner, arg);
63 case VIDIOC_G_TUNER:
64 return v4l2_subdev_call(sd, tuner, g_tuner, arg);
65 case VIDIOC_S_STD:
66 return v4l2_subdev_call(sd, tuner, s_std, *(v4l2_std_id *)arg);
67 case VIDIOC_S_FREQUENCY:
68 return v4l2_subdev_call(sd, tuner, s_frequency, arg);
69 case VIDIOC_G_FREQUENCY:
70 return v4l2_subdev_call(sd, tuner, g_frequency, arg);
71 case TUNER_SET_TYPE_ADDR:
72 return v4l2_subdev_call(sd, tuner, s_type_addr, arg);
73 case TUNER_SET_CONFIG:
74 return v4l2_subdev_call(sd, tuner, s_config, arg);
75
76 case VIDIOC_INT_AUDIO_CLOCK_FREQ:
77 return v4l2_subdev_call(sd, audio, s_clock_freq, *(u32 *)arg);
78 case VIDIOC_INT_S_AUDIO_ROUTING:
79 return v4l2_subdev_call(sd, audio, s_routing, arg);
80 case VIDIOC_INT_I2S_CLOCK_FREQ:
81 return v4l2_subdev_call(sd, audio, s_i2s_clock_freq, *(u32 *)arg);
82
83 case VIDIOC_INT_S_VIDEO_ROUTING:
84 return v4l2_subdev_call(sd, video, s_routing, arg);
85 case VIDIOC_INT_S_CRYSTAL_FREQ:
86 return v4l2_subdev_call(sd, video, s_crystal_freq, arg);
87 case VIDIOC_INT_DECODE_VBI_LINE:
88 return v4l2_subdev_call(sd, video, decode_vbi_line, arg);
89 case VIDIOC_INT_S_VBI_DATA:
90 return v4l2_subdev_call(sd, video, s_vbi_data, arg);
91 case VIDIOC_INT_G_VBI_DATA:
92 return v4l2_subdev_call(sd, video, g_vbi_data, arg);
93 case VIDIOC_G_SLICED_VBI_CAP:
94 return v4l2_subdev_call(sd, video, g_sliced_vbi_cap, arg);
95 case VIDIOC_S_FMT:
96 return v4l2_subdev_call(sd, video, s_fmt, arg);
97 case VIDIOC_G_FMT:
98 return v4l2_subdev_call(sd, video, g_fmt, arg);
99 case VIDIOC_INT_S_STD_OUTPUT:
100 return v4l2_subdev_call(sd, video, s_std_output, *(v4l2_std_id *)arg);
101 case VIDIOC_STREAMON:
102 return v4l2_subdev_call(sd, video, s_stream, 1);
103 case VIDIOC_STREAMOFF:
104 return v4l2_subdev_call(sd, video, s_stream, 0);
105
106 default:
107 return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
108 }
109}
110EXPORT_SYMBOL_GPL(v4l2_subdev_command);
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 1efc5f3462c6..a72a361daade 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -4237,8 +4237,7 @@ error:
4237 return ret; 4237 return ret;
4238} 4238}
4239 4239
4240static int vino_do_ioctl(struct inode *inode, struct file *file, 4240static int vino_do_ioctl(struct file *file, unsigned int cmd, void *arg)
4241 unsigned int cmd, void *arg)
4242{ 4241{
4243 struct vino_channel_settings *vcs = video_drvdata(file); 4242 struct vino_channel_settings *vcs = video_drvdata(file);
4244 4243
@@ -4353,7 +4352,7 @@ static int vino_ioctl(struct inode *inode, struct file *file,
4353 if (mutex_lock_interruptible(&vcs->mutex)) 4352 if (mutex_lock_interruptible(&vcs->mutex))
4354 return -EINTR; 4353 return -EINTR;
4355 4354
4356 ret = video_usercopy(inode, file, cmd, arg, vino_do_ioctl); 4355 ret = video_usercopy(file, cmd, arg, vino_do_ioctl);
4357 4356
4358 mutex_unlock(&vcs->mutex); 4357 mutex_unlock(&vcs->mutex);
4359 4358
diff --git a/drivers/media/video/vp27smpx.c b/drivers/media/video/vp27smpx.c
index 577956c5410b..f72b859486ad 100644
--- a/drivers/media/video/vp27smpx.c
+++ b/drivers/media/video/vp27smpx.c
@@ -28,7 +28,7 @@
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/i2c-id.h> 29#include <linux/i2c-id.h>
30#include <linux/videodev2.h> 30#include <linux/videodev2.h>
31#include <media/v4l2-common.h> 31#include <media/v4l2-device.h>
32#include <media/v4l2-chip-ident.h> 32#include <media/v4l2-chip-ident.h>
33#include <media/v4l2-i2c-drv.h> 33#include <media/v4l2-i2c-drv.h>
34 34
@@ -40,13 +40,20 @@ MODULE_LICENSE("GPL");
40/* ----------------------------------------------------------------------- */ 40/* ----------------------------------------------------------------------- */
41 41
42struct vp27smpx_state { 42struct vp27smpx_state {
43 struct v4l2_subdev sd;
43 int radio; 44 int radio;
44 u32 audmode; 45 u32 audmode;
45}; 46};
46 47
47static void vp27smpx_set_audmode(struct i2c_client *client, u32 audmode) 48static inline struct vp27smpx_state *to_state(struct v4l2_subdev *sd)
48{ 49{
49 struct vp27smpx_state *state = i2c_get_clientdata(client); 50 return container_of(sd, struct vp27smpx_state, sd);
51}
52
53static void vp27smpx_set_audmode(struct v4l2_subdev *sd, u32 audmode)
54{
55 struct vp27smpx_state *state = to_state(sd);
56 struct i2c_client *client = v4l2_get_subdevdata(sd);
50 u8 data[3] = { 0x00, 0x00, 0x04 }; 57 u8 data[3] = { 0x00, 0x00, 0x04 };
51 58
52 switch (audmode) { 59 switch (audmode) {
@@ -63,55 +70,89 @@ static void vp27smpx_set_audmode(struct i2c_client *client, u32 audmode)
63 } 70 }
64 71
65 if (i2c_master_send(client, data, sizeof(data)) != sizeof(data)) 72 if (i2c_master_send(client, data, sizeof(data)) != sizeof(data))
66 v4l_err(client, "%s: I/O error setting audmode\n", 73 v4l2_err(sd, "I/O error setting audmode\n");
67 client->name);
68 else 74 else
69 state->audmode = audmode; 75 state->audmode = audmode;
70} 76}
71 77
72static int vp27smpx_command(struct i2c_client *client, unsigned cmd, void *arg) 78static int vp27smpx_s_radio(struct v4l2_subdev *sd)
73{ 79{
74 struct vp27smpx_state *state = i2c_get_clientdata(client); 80 struct vp27smpx_state *state = to_state(sd);
75 struct v4l2_tuner *vt = arg;
76 81
77 switch (cmd) { 82 state->radio = 1;
78 case AUDC_SET_RADIO: 83 return 0;
79 state->radio = 1; 84}
80 break;
81 85
82 case VIDIOC_S_STD: 86static int vp27smpx_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
83 state->radio = 0; 87{
84 break; 88 struct vp27smpx_state *state = to_state(sd);
85 89
86 case VIDIOC_S_TUNER: 90 state->radio = 0;
87 if (!state->radio) 91 return 0;
88 vp27smpx_set_audmode(client, vt->audmode); 92}
89 break;
90 93
91 case VIDIOC_G_TUNER: 94static int vp27smpx_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
92 if (state->radio) 95{
93 break; 96 struct vp27smpx_state *state = to_state(sd);
94 vt->audmode = state->audmode;
95 vt->capability = V4L2_TUNER_CAP_STEREO |
96 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
97 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
98 break;
99 97
100 case VIDIOC_G_CHIP_IDENT: 98 if (!state->radio)
101 return v4l2_chip_ident_i2c_client(client, arg, 99 vp27smpx_set_audmode(sd, vt->audmode);
102 V4L2_IDENT_VP27SMPX, 0); 100 return 0;
101}
103 102
104 case VIDIOC_LOG_STATUS: 103static int vp27smpx_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
105 v4l_info(client, "Audio Mode: %u%s\n", state->audmode, 104{
106 state->radio ? " (Radio)" : ""); 105 struct vp27smpx_state *state = to_state(sd);
107 break; 106
107 if (state->radio)
108 return 0;
109 vt->audmode = state->audmode;
110 vt->capability = V4L2_TUNER_CAP_STEREO |
111 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
112 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
113 return 0;
114}
108 115
109 default: 116static int vp27smpx_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
110 return -EINVAL; 117{
111 } 118 struct i2c_client *client = v4l2_get_subdevdata(sd);
119
120 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_VP27SMPX, 0);
121}
122
123static int vp27smpx_log_status(struct v4l2_subdev *sd)
124{
125 struct vp27smpx_state *state = to_state(sd);
126
127 v4l2_info(sd, "Audio Mode: %u%s\n", state->audmode,
128 state->radio ? " (Radio)" : "");
112 return 0; 129 return 0;
113} 130}
114 131
132static int vp27smpx_command(struct i2c_client *client, unsigned cmd, void *arg)
133{
134 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
135}
136
137/* ----------------------------------------------------------------------- */
138
139static const struct v4l2_subdev_core_ops vp27smpx_core_ops = {
140 .log_status = vp27smpx_log_status,
141 .g_chip_ident = vp27smpx_g_chip_ident,
142};
143
144static const struct v4l2_subdev_tuner_ops vp27smpx_tuner_ops = {
145 .s_radio = vp27smpx_s_radio,
146 .s_std = vp27smpx_s_std,
147 .s_tuner = vp27smpx_s_tuner,
148 .g_tuner = vp27smpx_g_tuner,
149};
150
151static const struct v4l2_subdev_ops vp27smpx_ops = {
152 .core = &vp27smpx_core_ops,
153 .tuner = &vp27smpx_tuner_ops,
154};
155
115/* ----------------------------------------------------------------------- */ 156/* ----------------------------------------------------------------------- */
116 157
117/* i2c implementation */ 158/* i2c implementation */
@@ -125,6 +166,7 @@ static int vp27smpx_probe(struct i2c_client *client,
125 const struct i2c_device_id *id) 166 const struct i2c_device_id *id)
126{ 167{
127 struct vp27smpx_state *state; 168 struct vp27smpx_state *state;
169 struct v4l2_subdev *sd;
128 170
129 /* Check if the adapter supports the needed features */ 171 /* Check if the adapter supports the needed features */
130 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 172 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -136,17 +178,21 @@ static int vp27smpx_probe(struct i2c_client *client,
136 state = kzalloc(sizeof(struct vp27smpx_state), GFP_KERNEL); 178 state = kzalloc(sizeof(struct vp27smpx_state), GFP_KERNEL);
137 if (state == NULL) 179 if (state == NULL)
138 return -ENOMEM; 180 return -ENOMEM;
181 sd = &state->sd;
182 v4l2_i2c_subdev_init(sd, client, &vp27smpx_ops);
139 state->audmode = V4L2_TUNER_MODE_STEREO; 183 state->audmode = V4L2_TUNER_MODE_STEREO;
140 i2c_set_clientdata(client, state);
141 184
142 /* initialize vp27smpx */ 185 /* initialize vp27smpx */
143 vp27smpx_set_audmode(client, state->audmode); 186 vp27smpx_set_audmode(sd, state->audmode);
144 return 0; 187 return 0;
145} 188}
146 189
147static int vp27smpx_remove(struct i2c_client *client) 190static int vp27smpx_remove(struct i2c_client *client)
148{ 191{
149 kfree(i2c_get_clientdata(client)); 192 struct v4l2_subdev *sd = i2c_get_clientdata(client);
193
194 v4l2_device_unregister_subdev(sd);
195 kfree(to_state(sd));
150 return 0; 196 return 0;
151} 197}
152 198
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index b2dbe48a92bb..56c570c267ea 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -727,8 +727,7 @@ static int w9966_wReg_i2c(struct w9966_dev* cam, int reg, int data)
727 * Video4linux interfacing 727 * Video4linux interfacing
728 */ 728 */
729 729
730static int w9966_v4l_do_ioctl(struct inode *inode, struct file *file, 730static int w9966_v4l_do_ioctl(struct file *file, unsigned int cmd, void *arg)
731 unsigned int cmd, void *arg)
732{ 731{
733 struct w9966_dev *cam = video_drvdata(file); 732 struct w9966_dev *cam = video_drvdata(file);
734 733
@@ -881,7 +880,7 @@ static int w9966_v4l_do_ioctl(struct inode *inode, struct file *file,
881static int w9966_v4l_ioctl(struct inode *inode, struct file *file, 880static int w9966_v4l_ioctl(struct inode *inode, struct file *file,
882 unsigned int cmd, unsigned long arg) 881 unsigned int cmd, unsigned long arg)
883{ 882{
884 return video_usercopy(inode, file, cmd, arg, w9966_v4l_do_ioctl); 883 return video_usercopy(file, cmd, arg, w9966_v4l_do_ioctl);
885} 884}
886 885
887// Capture data 886// Capture data
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
index 54ac3fe26ec2..12a31e7a5f6d 100644
--- a/drivers/media/video/wm8739.c
+++ b/drivers/media/video/wm8739.c
@@ -28,7 +28,7 @@
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/i2c-id.h> 29#include <linux/i2c-id.h>
30#include <linux/videodev2.h> 30#include <linux/videodev2.h>
31#include <media/v4l2-common.h> 31#include <media/v4l2-device.h>
32#include <media/v4l2-chip-ident.h> 32#include <media/v4l2-chip-ident.h>
33#include <media/v4l2-i2c-drv.h> 33#include <media/v4l2-i2c-drv.h>
34 34
@@ -52,6 +52,7 @@ enum {
52}; 52};
53 53
54struct wm8739_state { 54struct wm8739_state {
55 struct v4l2_subdev sd;
55 u32 clock_freq; 56 u32 clock_freq;
56 u8 muted; 57 u8 muted;
57 u16 volume; 58 u16 volume;
@@ -60,43 +61,49 @@ struct wm8739_state {
60 u8 vol_r; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */ 61 u8 vol_r; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */
61}; 62};
62 63
64static inline struct wm8739_state *to_state(struct v4l2_subdev *sd)
65{
66 return container_of(sd, struct wm8739_state, sd);
67}
68
63/* ------------------------------------------------------------------------ */ 69/* ------------------------------------------------------------------------ */
64 70
65static int wm8739_write(struct i2c_client *client, int reg, u16 val) 71static int wm8739_write(struct v4l2_subdev *sd, int reg, u16 val)
66{ 72{
73 struct i2c_client *client = v4l2_get_subdevdata(sd);
67 int i; 74 int i;
68 75
69 if (reg < 0 || reg >= TOT_REGS) { 76 if (reg < 0 || reg >= TOT_REGS) {
70 v4l_err(client, "Invalid register R%d\n", reg); 77 v4l2_err(sd, "Invalid register R%d\n", reg);
71 return -1; 78 return -1;
72 } 79 }
73 80
74 v4l_dbg(1, debug, client, "write: %02x %02x\n", reg, val); 81 v4l2_dbg(1, debug, sd, "write: %02x %02x\n", reg, val);
75 82
76 for (i = 0; i < 3; i++) 83 for (i = 0; i < 3; i++)
77 if (i2c_smbus_write_byte_data(client, 84 if (i2c_smbus_write_byte_data(client,
78 (reg << 1) | (val >> 8), val & 0xff) == 0) 85 (reg << 1) | (val >> 8), val & 0xff) == 0)
79 return 0; 86 return 0;
80 v4l_err(client, "I2C: cannot write %03x to register R%d\n", val, reg); 87 v4l2_err(sd, "I2C: cannot write %03x to register R%d\n", val, reg);
81 return -1; 88 return -1;
82} 89}
83 90
84/* write regs to set audio volume etc */ 91/* write regs to set audio volume etc */
85static void wm8739_set_audio(struct i2c_client *client) 92static void wm8739_set_audio(struct v4l2_subdev *sd)
86{ 93{
87 struct wm8739_state *state = i2c_get_clientdata(client); 94 struct wm8739_state *state = to_state(sd);
88 u16 mute = state->muted ? 0x80 : 0; 95 u16 mute = state->muted ? 0x80 : 0;
89 96
90 /* Volume setting: bits 0-4, 0x1f = 12 dB, 0x00 = -34.5 dB 97 /* Volume setting: bits 0-4, 0x1f = 12 dB, 0x00 = -34.5 dB
91 * Default setting: 0x17 = 0 dB 98 * Default setting: 0x17 = 0 dB
92 */ 99 */
93 wm8739_write(client, R0, (state->vol_l & 0x1f) | mute); 100 wm8739_write(sd, R0, (state->vol_l & 0x1f) | mute);
94 wm8739_write(client, R1, (state->vol_r & 0x1f) | mute); 101 wm8739_write(sd, R1, (state->vol_r & 0x1f) | mute);
95} 102}
96 103
97static int wm8739_get_ctrl(struct i2c_client *client, struct v4l2_control *ctrl) 104static int wm8739_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
98{ 105{
99 struct wm8739_state *state = i2c_get_clientdata(client); 106 struct wm8739_state *state = to_state(sd);
100 107
101 switch (ctrl->id) { 108 switch (ctrl->id) {
102 case V4L2_CID_AUDIO_MUTE: 109 case V4L2_CID_AUDIO_MUTE:
@@ -117,9 +124,9 @@ static int wm8739_get_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
117 return 0; 124 return 0;
118} 125}
119 126
120static int wm8739_set_ctrl(struct i2c_client *client, struct v4l2_control *ctrl) 127static int wm8739_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
121{ 128{
122 struct wm8739_state *state = i2c_get_clientdata(client); 129 struct wm8739_state *state = to_state(sd);
123 unsigned int work_l, work_r; 130 unsigned int work_l, work_r;
124 131
125 switch (ctrl->id) { 132 switch (ctrl->id) {
@@ -147,7 +154,7 @@ static int wm8739_set_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
147 state->vol_r = (long)work_r * 31 / 65535; 154 state->vol_r = (long)work_r * 31 / 65535;
148 155
149 /* set audio volume etc. */ 156 /* set audio volume etc. */
150 wm8739_set_audio(client); 157 wm8739_set_audio(sd);
151 return 0; 158 return 0;
152} 159}
153 160
@@ -186,77 +193,89 @@ static struct v4l2_queryctrl wm8739_qctrl[] = {
186 193
187/* ------------------------------------------------------------------------ */ 194/* ------------------------------------------------------------------------ */
188 195
189static int wm8739_command(struct i2c_client *client, unsigned cmd, void *arg) 196static int wm8739_s_clock_freq(struct v4l2_subdev *sd, u32 audiofreq)
190{ 197{
191 struct wm8739_state *state = i2c_get_clientdata(client); 198 struct wm8739_state *state = to_state(sd);
192 199
193 switch (cmd) { 200 state->clock_freq = audiofreq;
194 case VIDIOC_INT_AUDIO_CLOCK_FREQ: 201 /* de-activate */
195 { 202 wm8739_write(sd, R9, 0x000);
196 u32 audiofreq = *(u32 *)arg; 203 switch (audiofreq) {
197 204 case 44100:
198 state->clock_freq = audiofreq; 205 /* 256fps, fs=44.1k */
199 /* de-activate */ 206 wm8739_write(sd, R8, 0x020);
200 wm8739_write(client, R9, 0x000); 207 break;
201 switch (audiofreq) { 208 case 48000:
202 case 44100: 209 /* 256fps, fs=48k */
203 /* 256fps, fs=44.1k */ 210 wm8739_write(sd, R8, 0x000);
204 wm8739_write(client, R8, 0x020); 211 break;
205 break; 212 case 32000:
206 case 48000: 213 /* 256fps, fs=32k */
207 /* 256fps, fs=48k */ 214 wm8739_write(sd, R8, 0x018);
208 wm8739_write(client, R8, 0x000); 215 break;
209 break; 216 default:
210 case 32000:
211 /* 256fps, fs=32k */
212 wm8739_write(client, R8, 0x018);
213 break;
214 default:
215 break;
216 }
217 /* activate */
218 wm8739_write(client, R9, 0x001);
219 break; 217 break;
220 } 218 }
219 /* activate */
220 wm8739_write(sd, R9, 0x001);
221 return 0;
222}
221 223
222 case VIDIOC_G_CTRL: 224static int wm8739_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
223 return wm8739_get_ctrl(client, arg); 225{
224 226 int i;
225 case VIDIOC_S_CTRL:
226 return wm8739_set_ctrl(client, arg);
227 227
228 case VIDIOC_QUERYCTRL: 228 for (i = 0; i < ARRAY_SIZE(wm8739_qctrl); i++)
229 { 229 if (qc->id && qc->id == wm8739_qctrl[i].id) {
230 struct v4l2_queryctrl *qc = arg; 230 memcpy(qc, &wm8739_qctrl[i], sizeof(*qc));
231 int i; 231 return 0;
232 232 }
233 for (i = 0; i < ARRAY_SIZE(wm8739_qctrl); i++) 233 return -EINVAL;
234 if (qc->id && qc->id == wm8739_qctrl[i].id) { 234}
235 memcpy(qc, &wm8739_qctrl[i], sizeof(*qc));
236 return 0;
237 }
238 return -EINVAL;
239 }
240 235
241 case VIDIOC_G_CHIP_IDENT: 236static int wm8739_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
242 return v4l2_chip_ident_i2c_client(client, 237{
243 arg, V4L2_IDENT_WM8739, 0); 238 struct i2c_client *client = v4l2_get_subdevdata(sd);
244 239
245 case VIDIOC_LOG_STATUS: 240 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_WM8739, 0);
246 v4l_info(client, "Frequency: %u Hz\n", state->clock_freq); 241}
247 v4l_info(client, "Volume L: %02x%s\n", state->vol_l & 0x1f,
248 state->muted ? " (muted)" : "");
249 v4l_info(client, "Volume R: %02x%s\n", state->vol_r & 0x1f,
250 state->muted ? " (muted)" : "");
251 break;
252 242
253 default: 243static int wm8739_log_status(struct v4l2_subdev *sd)
254 return -EINVAL; 244{
255 } 245 struct wm8739_state *state = to_state(sd);
256 246
247 v4l2_info(sd, "Frequency: %u Hz\n", state->clock_freq);
248 v4l2_info(sd, "Volume L: %02x%s\n", state->vol_l & 0x1f,
249 state->muted ? " (muted)" : "");
250 v4l2_info(sd, "Volume R: %02x%s\n", state->vol_r & 0x1f,
251 state->muted ? " (muted)" : "");
257 return 0; 252 return 0;
258} 253}
259 254
255static int wm8739_command(struct i2c_client *client, unsigned cmd, void *arg)
256{
257 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
258}
259
260/* ----------------------------------------------------------------------- */
261
262static const struct v4l2_subdev_core_ops wm8739_core_ops = {
263 .log_status = wm8739_log_status,
264 .g_chip_ident = wm8739_g_chip_ident,
265 .queryctrl = wm8739_queryctrl,
266 .g_ctrl = wm8739_g_ctrl,
267 .s_ctrl = wm8739_s_ctrl,
268};
269
270static const struct v4l2_subdev_audio_ops wm8739_audio_ops = {
271 .s_clock_freq = wm8739_s_clock_freq,
272};
273
274static const struct v4l2_subdev_ops wm8739_ops = {
275 .core = &wm8739_core_ops,
276 .audio = &wm8739_audio_ops,
277};
278
260/* ------------------------------------------------------------------------ */ 279/* ------------------------------------------------------------------------ */
261 280
262/* i2c implementation */ 281/* i2c implementation */
@@ -265,6 +284,7 @@ static int wm8739_probe(struct i2c_client *client,
265 const struct i2c_device_id *id) 284 const struct i2c_device_id *id)
266{ 285{
267 struct wm8739_state *state; 286 struct wm8739_state *state;
287 struct v4l2_subdev *sd;
268 288
269 /* Check if the adapter supports the needed features */ 289 /* Check if the adapter supports the needed features */
270 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 290 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -276,6 +296,8 @@ static int wm8739_probe(struct i2c_client *client,
276 state = kmalloc(sizeof(struct wm8739_state), GFP_KERNEL); 296 state = kmalloc(sizeof(struct wm8739_state), GFP_KERNEL);
277 if (state == NULL) 297 if (state == NULL)
278 return -ENOMEM; 298 return -ENOMEM;
299 sd = &state->sd;
300 v4l2_i2c_subdev_init(sd, client, &wm8739_ops);
279 state->vol_l = 0x17; /* 0dB */ 301 state->vol_l = 0x17; /* 0dB */
280 state->vol_r = 0x17; /* 0dB */ 302 state->vol_r = 0x17; /* 0dB */
281 state->muted = 0; 303 state->muted = 0;
@@ -283,31 +305,33 @@ static int wm8739_probe(struct i2c_client *client,
283 /* normalize (12dB(31) to -34.5dB(0) [0dB(23)] -> 65535 to 0) */ 305 /* normalize (12dB(31) to -34.5dB(0) [0dB(23)] -> 65535 to 0) */
284 state->volume = ((long)state->vol_l + 1) * 65535 / 31; 306 state->volume = ((long)state->vol_l + 1) * 65535 / 31;
285 state->clock_freq = 48000; 307 state->clock_freq = 48000;
286 i2c_set_clientdata(client, state);
287 308
288 /* Initialize wm8739 */ 309 /* Initialize wm8739 */
289 310
290 /* reset */ 311 /* reset */
291 wm8739_write(client, R15, 0x00); 312 wm8739_write(sd, R15, 0x00);
292 /* filter setting, high path, offet clear */ 313 /* filter setting, high path, offet clear */
293 wm8739_write(client, R5, 0x000); 314 wm8739_write(sd, R5, 0x000);
294 /* ADC, OSC, Power Off mode Disable */ 315 /* ADC, OSC, Power Off mode Disable */
295 wm8739_write(client, R6, 0x000); 316 wm8739_write(sd, R6, 0x000);
296 /* Digital Audio interface format: 317 /* Digital Audio interface format:
297 Enable Master mode, 24 bit, MSB first/left justified */ 318 Enable Master mode, 24 bit, MSB first/left justified */
298 wm8739_write(client, R7, 0x049); 319 wm8739_write(sd, R7, 0x049);
299 /* sampling control: normal, 256fs, 48KHz sampling rate */ 320 /* sampling control: normal, 256fs, 48KHz sampling rate */
300 wm8739_write(client, R8, 0x000); 321 wm8739_write(sd, R8, 0x000);
301 /* activate */ 322 /* activate */
302 wm8739_write(client, R9, 0x001); 323 wm8739_write(sd, R9, 0x001);
303 /* set volume/mute */ 324 /* set volume/mute */
304 wm8739_set_audio(client); 325 wm8739_set_audio(sd);
305 return 0; 326 return 0;
306} 327}
307 328
308static int wm8739_remove(struct i2c_client *client) 329static int wm8739_remove(struct i2c_client *client)
309{ 330{
310 kfree(i2c_get_clientdata(client)); 331 struct v4l2_subdev *sd = i2c_get_clientdata(client);
332
333 v4l2_device_unregister_subdev(sd);
334 kfree(to_state(sd));
311 return 0; 335 return 0;
312} 336}
313 337
diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c
index 48df661d4fc3..d0220b0ec0bc 100644
--- a/drivers/media/video/wm8775.c
+++ b/drivers/media/video/wm8775.c
@@ -32,7 +32,7 @@
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/i2c-id.h> 33#include <linux/i2c-id.h>
34#include <linux/videodev2.h> 34#include <linux/videodev2.h>
35#include <media/v4l2-common.h> 35#include <media/v4l2-device.h>
36#include <media/v4l2-chip-ident.h> 36#include <media/v4l2-chip-ident.h>
37#include <media/v4l2-i2c-drv-legacy.h> 37#include <media/v4l2-i2c-drv-legacy.h>
38 38
@@ -54,16 +54,23 @@ enum {
54}; 54};
55 55
56struct wm8775_state { 56struct wm8775_state {
57 struct v4l2_subdev sd;
57 u8 input; /* Last selected input (0-0xf) */ 58 u8 input; /* Last selected input (0-0xf) */
58 u8 muted; 59 u8 muted;
59}; 60};
60 61
61static int wm8775_write(struct i2c_client *client, int reg, u16 val) 62static inline struct wm8775_state *to_state(struct v4l2_subdev *sd)
62{ 63{
64 return container_of(sd, struct wm8775_state, sd);
65}
66
67static int wm8775_write(struct v4l2_subdev *sd, int reg, u16 val)
68{
69 struct i2c_client *client = v4l2_get_subdevdata(sd);
63 int i; 70 int i;
64 71
65 if (reg < 0 || reg >= TOT_REGS) { 72 if (reg < 0 || reg >= TOT_REGS) {
66 v4l_err(client, "Invalid register R%d\n", reg); 73 v4l2_err(sd, "Invalid register R%d\n", reg);
67 return -1; 74 return -1;
68 } 75 }
69 76
@@ -71,84 +78,117 @@ static int wm8775_write(struct i2c_client *client, int reg, u16 val)
71 if (i2c_smbus_write_byte_data(client, 78 if (i2c_smbus_write_byte_data(client,
72 (reg << 1) | (val >> 8), val & 0xff) == 0) 79 (reg << 1) | (val >> 8), val & 0xff) == 0)
73 return 0; 80 return 0;
74 v4l_err(client, "I2C: cannot write %03x to register R%d\n", val, reg); 81 v4l2_err(sd, "I2C: cannot write %03x to register R%d\n", val, reg);
75 return -1; 82 return -1;
76} 83}
77 84
78static int wm8775_command(struct i2c_client *client, unsigned cmd, void *arg) 85static int wm8775_s_routing(struct v4l2_subdev *sd, const struct v4l2_routing *route)
79{ 86{
80 struct wm8775_state *state = i2c_get_clientdata(client); 87 struct wm8775_state *state = to_state(sd);
81 struct v4l2_routing *route = arg; 88
82 struct v4l2_control *ctrl = arg; 89 /* There are 4 inputs and one output. Zero or more inputs
83 90 are multiplexed together to the output. Hence there are
84 switch (cmd) { 91 16 combinations.
85 case VIDIOC_INT_G_AUDIO_ROUTING: 92 If only one input is active (the normal case) then the
86 route->input = state->input; 93 input values 1, 2, 4 or 8 should be used. */
87 route->output = 0; 94 if (route->input > 15) {
88 break; 95 v4l2_err(sd, "Invalid input %d.\n", route->input);
89
90 case VIDIOC_INT_S_AUDIO_ROUTING:
91 /* There are 4 inputs and one output. Zero or more inputs
92 are multiplexed together to the output. Hence there are
93 16 combinations.
94 If only one input is active (the normal case) then the
95 input values 1, 2, 4 or 8 should be used. */
96 if (route->input > 15) {
97 v4l_err(client, "Invalid input %d.\n", route->input);
98 return -EINVAL;
99 }
100 state->input = route->input;
101 if (state->muted)
102 break;
103 wm8775_write(client, R21, 0x0c0);
104 wm8775_write(client, R14, 0x1d4);
105 wm8775_write(client, R15, 0x1d4);
106 wm8775_write(client, R21, 0x100 + state->input);
107 break;
108
109 case VIDIOC_G_CTRL:
110 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
111 return -EINVAL;
112 ctrl->value = state->muted;
113 break;
114
115 case VIDIOC_S_CTRL:
116 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
117 return -EINVAL;
118 state->muted = ctrl->value;
119 wm8775_write(client, R21, 0x0c0);
120 wm8775_write(client, R14, 0x1d4);
121 wm8775_write(client, R15, 0x1d4);
122 if (!state->muted)
123 wm8775_write(client, R21, 0x100 + state->input);
124 break;
125
126 case VIDIOC_G_CHIP_IDENT:
127 return v4l2_chip_ident_i2c_client(client,
128 arg, V4L2_IDENT_WM8775, 0);
129
130 case VIDIOC_LOG_STATUS:
131 v4l_info(client, "Input: %d%s\n", state->input,
132 state->muted ? " (muted)" : "");
133 break;
134
135 case VIDIOC_S_FREQUENCY:
136 /* If I remove this, then it can happen that I have no
137 sound the first time I tune from static to a valid channel.
138 It's difficult to reproduce and is almost certainly related
139 to the zero cross detect circuit. */
140 wm8775_write(client, R21, 0x0c0);
141 wm8775_write(client, R14, 0x1d4);
142 wm8775_write(client, R15, 0x1d4);
143 wm8775_write(client, R21, 0x100 + state->input);
144 break;
145
146 default:
147 return -EINVAL; 96 return -EINVAL;
148 } 97 }
98 state->input = route->input;
99 if (state->muted)
100 return 0;
101 wm8775_write(sd, R21, 0x0c0);
102 wm8775_write(sd, R14, 0x1d4);
103 wm8775_write(sd, R15, 0x1d4);
104 wm8775_write(sd, R21, 0x100 + state->input);
105 return 0;
106}
107
108static int wm8775_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
109{
110 struct wm8775_state *state = to_state(sd);
111
112 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
113 return -EINVAL;
114 ctrl->value = state->muted;
115 return 0;
116}
117
118static int wm8775_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
119{
120 struct wm8775_state *state = to_state(sd);
121
122 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
123 return -EINVAL;
124 state->muted = ctrl->value;
125 wm8775_write(sd, R21, 0x0c0);
126 wm8775_write(sd, R14, 0x1d4);
127 wm8775_write(sd, R15, 0x1d4);
128 if (!state->muted)
129 wm8775_write(sd, R21, 0x100 + state->input);
130 return 0;
131}
132
133static int wm8775_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_chip_ident *chip)
134{
135 struct i2c_client *client = v4l2_get_subdevdata(sd);
136
137 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_WM8775, 0);
138}
139
140static int wm8775_log_status(struct v4l2_subdev *sd)
141{
142 struct wm8775_state *state = to_state(sd);
143
144 v4l2_info(sd, "Input: %d%s\n", state->input,
145 state->muted ? " (muted)" : "");
149 return 0; 146 return 0;
150} 147}
151 148
149static int wm8775_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
150{
151 struct wm8775_state *state = to_state(sd);
152
153 /* If I remove this, then it can happen that I have no
154 sound the first time I tune from static to a valid channel.
155 It's difficult to reproduce and is almost certainly related
156 to the zero cross detect circuit. */
157 wm8775_write(sd, R21, 0x0c0);
158 wm8775_write(sd, R14, 0x1d4);
159 wm8775_write(sd, R15, 0x1d4);
160 wm8775_write(sd, R21, 0x100 + state->input);
161 return 0;
162}
163
164static int wm8775_command(struct i2c_client *client, unsigned cmd, void *arg)
165{
166 return v4l2_subdev_command(i2c_get_clientdata(client), cmd, arg);
167}
168
169/* ----------------------------------------------------------------------- */
170
171static const struct v4l2_subdev_core_ops wm8775_core_ops = {
172 .log_status = wm8775_log_status,
173 .g_chip_ident = wm8775_g_chip_ident,
174 .g_ctrl = wm8775_g_ctrl,
175 .s_ctrl = wm8775_s_ctrl,
176};
177
178static const struct v4l2_subdev_tuner_ops wm8775_tuner_ops = {
179 .s_frequency = wm8775_s_frequency,
180};
181
182static const struct v4l2_subdev_audio_ops wm8775_audio_ops = {
183 .s_routing = wm8775_s_routing,
184};
185
186static const struct v4l2_subdev_ops wm8775_ops = {
187 .core = &wm8775_core_ops,
188 .tuner = &wm8775_tuner_ops,
189 .audio = &wm8775_audio_ops,
190};
191
152/* ----------------------------------------------------------------------- */ 192/* ----------------------------------------------------------------------- */
153 193
154/* i2c implementation */ 194/* i2c implementation */
@@ -162,56 +202,61 @@ static int wm8775_probe(struct i2c_client *client,
162 const struct i2c_device_id *id) 202 const struct i2c_device_id *id)
163{ 203{
164 struct wm8775_state *state; 204 struct wm8775_state *state;
205 struct v4l2_subdev *sd;
165 206
166 /* Check if the adapter supports the needed features */ 207 /* Check if the adapter supports the needed features */
167 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 208 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
168 return -EIO; 209 return -EIO;
169 210
170 v4l_info(client, "chip found @ 0x%x (%s)\n", 211 v4l_info(client, "chip found @ 0x%02x (%s)\n",
171 client->addr << 1, client->adapter->name); 212 client->addr << 1, client->adapter->name);
172 213
173 state = kmalloc(sizeof(struct wm8775_state), GFP_KERNEL); 214 state = kmalloc(sizeof(struct wm8775_state), GFP_KERNEL);
174 if (state == NULL) 215 if (state == NULL)
175 return -ENOMEM; 216 return -ENOMEM;
217 sd = &state->sd;
218 v4l2_i2c_subdev_init(sd, client, &wm8775_ops);
176 state->input = 2; 219 state->input = 2;
177 state->muted = 0; 220 state->muted = 0;
178 i2c_set_clientdata(client, state);
179 221
180 /* Initialize wm8775 */ 222 /* Initialize wm8775 */
181 223
182 /* RESET */ 224 /* RESET */
183 wm8775_write(client, R23, 0x000); 225 wm8775_write(sd, R23, 0x000);
184 /* Disable zero cross detect timeout */ 226 /* Disable zero cross detect timeout */
185 wm8775_write(client, R7, 0x000); 227 wm8775_write(sd, R7, 0x000);
186 /* Left justified, 24-bit mode */ 228 /* Left justified, 24-bit mode */
187 wm8775_write(client, R11, 0x021); 229 wm8775_write(sd, R11, 0x021);
188 /* Master mode, clock ratio 256fs */ 230 /* Master mode, clock ratio 256fs */
189 wm8775_write(client, R12, 0x102); 231 wm8775_write(sd, R12, 0x102);
190 /* Powered up */ 232 /* Powered up */
191 wm8775_write(client, R13, 0x000); 233 wm8775_write(sd, R13, 0x000);
192 /* ADC gain +2.5dB, enable zero cross */ 234 /* ADC gain +2.5dB, enable zero cross */
193 wm8775_write(client, R14, 0x1d4); 235 wm8775_write(sd, R14, 0x1d4);
194 /* ADC gain +2.5dB, enable zero cross */ 236 /* ADC gain +2.5dB, enable zero cross */
195 wm8775_write(client, R15, 0x1d4); 237 wm8775_write(sd, R15, 0x1d4);
196 /* ALC Stereo, ALC target level -1dB FS max gain +8dB */ 238 /* ALC Stereo, ALC target level -1dB FS max gain +8dB */
197 wm8775_write(client, R16, 0x1bf); 239 wm8775_write(sd, R16, 0x1bf);
198 /* Enable gain control, use zero cross detection, 240 /* Enable gain control, use zero cross detection,
199 ALC hold time 42.6 ms */ 241 ALC hold time 42.6 ms */
200 wm8775_write(client, R17, 0x185); 242 wm8775_write(sd, R17, 0x185);
201 /* ALC gain ramp up delay 34 s, ALC gain ramp down delay 33 ms */ 243 /* ALC gain ramp up delay 34 s, ALC gain ramp down delay 33 ms */
202 wm8775_write(client, R18, 0x0a2); 244 wm8775_write(sd, R18, 0x0a2);
203 /* Enable noise gate, threshold -72dBfs */ 245 /* Enable noise gate, threshold -72dBfs */
204 wm8775_write(client, R19, 0x005); 246 wm8775_write(sd, R19, 0x005);
205 /* Transient window 4ms, lower PGA gain limit -1dB */ 247 /* Transient window 4ms, lower PGA gain limit -1dB */
206 wm8775_write(client, R20, 0x07a); 248 wm8775_write(sd, R20, 0x07a);
207 /* LRBOTH = 1, use input 2. */ 249 /* LRBOTH = 1, use input 2. */
208 wm8775_write(client, R21, 0x102); 250 wm8775_write(sd, R21, 0x102);
209 return 0; 251 return 0;
210} 252}
211 253
212static int wm8775_remove(struct i2c_client *client) 254static int wm8775_remove(struct i2c_client *client)
213{ 255{
214 kfree(i2c_get_clientdata(client)); 256 struct v4l2_subdev *sd = i2c_get_clientdata(client);
257
258 v4l2_device_unregister_subdev(sd);
259 kfree(to_state(sd));
215 return 0; 260 return 0;
216} 261}
217 262
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index 9fc581707638..9d00e6056491 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -1020,7 +1020,7 @@ zc0301_vidioc_querycap(struct zc0301_device* cam, void __user * arg)
1020 1020
1021 strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card)); 1021 strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card));
1022 if (usb_make_path(cam->usbdev, cap.bus_info, sizeof(cap.bus_info)) < 0) 1022 if (usb_make_path(cam->usbdev, cap.bus_info, sizeof(cap.bus_info)) < 0)
1023 strlcpy(cap.bus_info, cam->usbdev->dev.bus_id, 1023 strlcpy(cap.bus_info, dev_name(&cam->usbdev->dev),
1024 sizeof(cap.bus_info)); 1024 sizeof(cap.bus_info));
1025 1025
1026 if (copy_to_user(arg, &cap, sizeof(cap))) 1026 if (copy_to_user(arg, &cap, sizeof(cap)))
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index fa5f2f8f518a..05f39195372e 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -153,12 +153,6 @@ MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver");
153MODULE_AUTHOR("Serguei Miridonov"); 153MODULE_AUTHOR("Serguei Miridonov");
154MODULE_LICENSE("GPL"); 154MODULE_LICENSE("GPL");
155 155
156static struct pci_device_id zr36067_pci_tbl[] = {
157 {PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36057,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
159 {0}
160};
161MODULE_DEVICE_TABLE(pci, zr36067_pci_tbl);
162 156
163int zoran_num; /* number of Buzs in use */ 157int zoran_num; /* number of Buzs in use */
164struct zoran *zoran[BUZ_MAX]; 158struct zoran *zoran[BUZ_MAX];
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index db11ab9e60da..00b97d97aeaa 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1940,11 +1940,7 @@ zoran_set_input (struct zoran *zr,
1940 * ioctl routine 1940 * ioctl routine
1941 */ 1941 */
1942 1942
1943static int 1943static int zoran_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1944zoran_do_ioctl (struct inode *inode,
1945 struct file *file,
1946 unsigned int cmd,
1947 void *arg)
1948{ 1944{
1949 struct zoran_fh *fh = file->private_data; 1945 struct zoran_fh *fh = file->private_data;
1950 struct zoran *zr = fh->zr; 1946 struct zoran *zr = fh->zr;
@@ -4201,7 +4197,7 @@ zoran_ioctl (struct inode *inode,
4201 unsigned int cmd, 4197 unsigned int cmd,
4202 unsigned long arg) 4198 unsigned long arg)
4203{ 4199{
4204 return video_usercopy(inode, file, cmd, arg, zoran_do_ioctl); 4200 return video_usercopy(file, cmd, arg, zoran_do_ioctl);
4205} 4201}
4206 4202
4207static unsigned int 4203static unsigned int
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index d6a0074b9dc3..c4e8b9aa3827 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -952,7 +952,6 @@ mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
952/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 952/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
953/** 953/**
954 * mpt_free_msg_frame - Place MPT request frame back on FreeQ. 954 * mpt_free_msg_frame - Place MPT request frame back on FreeQ.
955 * @handle: Handle of registered MPT protocol driver
956 * @ioc: Pointer to MPT adapter structure 955 * @ioc: Pointer to MPT adapter structure
957 * @mf: Pointer to MPT request frame 956 * @mf: Pointer to MPT request frame
958 * 957 *
@@ -4563,7 +4562,7 @@ WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
4563 failcnt++; 4562 failcnt++;
4564 hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF); 4563 hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
4565 /* don't overflow our IOC hs_reply[] buffer! */ 4564 /* don't overflow our IOC hs_reply[] buffer! */
4566 if (u16cnt < sizeof(ioc->hs_reply) / sizeof(ioc->hs_reply[0])) 4565 if (u16cnt < ARRAY_SIZE(ioc->hs_reply))
4567 hs_reply[u16cnt] = hword; 4566 hs_reply[u16cnt] = hword;
4568 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 4567 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
4569 } 4568 }
@@ -5422,7 +5421,6 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t
5422/** 5421/**
5423 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes 5422 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
5424 * @ioc: Pointer to a Adapter Strucutre 5423 * @ioc: Pointer to a Adapter Strucutre
5425 * @portnum: IOC port number
5426 * 5424 *
5427 * Return: 5425 * Return:
5428 * 0 on success 5426 * 0 on success
@@ -6939,7 +6937,6 @@ mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
6939/** 6937/**
6940 * mpt_spi_log_info - Log information returned from SCSI Parallel IOC. 6938 * mpt_spi_log_info - Log information returned from SCSI Parallel IOC.
6941 * @ioc: Pointer to MPT_ADAPTER structure 6939 * @ioc: Pointer to MPT_ADAPTER structure
6942 * @mr: Pointer to MPT reply frame
6943 * @log_info: U32 LogInfo word from the IOC 6940 * @log_info: U32 LogInfo word from the IOC
6944 * 6941 *
6945 * Refer to lsi/sp_log.h. 6942 * Refer to lsi/sp_log.h.
@@ -7176,7 +7173,7 @@ union loginfo_type {
7176 7173
7177 sas_loginfo.loginfo = log_info; 7174 sas_loginfo.loginfo = log_info;
7178 if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) && 7175 if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) &&
7179 (sas_loginfo.dw.originator < sizeof(originator_str)/sizeof(char*))) 7176 (sas_loginfo.dw.originator < ARRAY_SIZE(originator_str)))
7180 return; 7177 return;
7181 7178
7182 originator_desc = originator_str[sas_loginfo.dw.originator]; 7179 originator_desc = originator_str[sas_loginfo.dw.originator];
@@ -7185,21 +7182,21 @@ union loginfo_type {
7185 7182
7186 case 0: /* IOP */ 7183 case 0: /* IOP */
7187 if (sas_loginfo.dw.code < 7184 if (sas_loginfo.dw.code <
7188 sizeof(iop_code_str)/sizeof(char*)) 7185 ARRAY_SIZE(iop_code_str))
7189 code_desc = iop_code_str[sas_loginfo.dw.code]; 7186 code_desc = iop_code_str[sas_loginfo.dw.code];
7190 break; 7187 break;
7191 case 1: /* PL */ 7188 case 1: /* PL */
7192 if (sas_loginfo.dw.code < 7189 if (sas_loginfo.dw.code <
7193 sizeof(pl_code_str)/sizeof(char*)) 7190 ARRAY_SIZE(pl_code_str))
7194 code_desc = pl_code_str[sas_loginfo.dw.code]; 7191 code_desc = pl_code_str[sas_loginfo.dw.code];
7195 break; 7192 break;
7196 case 2: /* IR */ 7193 case 2: /* IR */
7197 if (sas_loginfo.dw.code >= 7194 if (sas_loginfo.dw.code >=
7198 sizeof(ir_code_str)/sizeof(char*)) 7195 ARRAY_SIZE(ir_code_str))
7199 break; 7196 break;
7200 code_desc = ir_code_str[sas_loginfo.dw.code]; 7197 code_desc = ir_code_str[sas_loginfo.dw.code];
7201 if (sas_loginfo.dw.subcode >= 7198 if (sas_loginfo.dw.subcode >=
7202 sizeof(raid_sub_code_str)/sizeof(char*)) 7199 ARRAY_SIZE(raid_sub_code_str))
7203 break; 7200 break;
7204 if (sas_loginfo.dw.code == 0) 7201 if (sas_loginfo.dw.code == 0)
7205 sub_code_desc = 7202 sub_code_desc =
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index e4c0db4dc7b1..9e485459f63b 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -474,9 +474,9 @@ static __init int asic3_gpio_probe(struct platform_device *pdev,
474 u16 dir_reg[ASIC3_NUM_GPIO_BANKS]; 474 u16 dir_reg[ASIC3_NUM_GPIO_BANKS];
475 int i; 475 int i;
476 476
477 memzero(alt_reg, ASIC3_NUM_GPIO_BANKS * sizeof(u16)); 477 memset(alt_reg, 0, ASIC3_NUM_GPIO_BANKS * sizeof(u16));
478 memzero(out_reg, ASIC3_NUM_GPIO_BANKS * sizeof(u16)); 478 memset(out_reg, 0, ASIC3_NUM_GPIO_BANKS * sizeof(u16));
479 memzero(dir_reg, ASIC3_NUM_GPIO_BANKS * sizeof(u16)); 479 memset(dir_reg, 0, ASIC3_NUM_GPIO_BANKS * sizeof(u16));
480 480
481 /* Enable all GPIOs */ 481 /* Enable all GPIOs */
482 asic3_write_register(asic, ASIC3_GPIO_OFFSET(A, MASK), 0xffff); 482 asic3_write_register(asic, ASIC3_GPIO_OFFSET(A, MASK), 0xffff);
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
index b4ed57e02729..6063dc2b52e8 100644
--- a/drivers/mfd/mcp-core.c
+++ b/drivers/mfd/mcp-core.c
@@ -18,7 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/string.h> 19#include <linux/string.h>
20 20
21#include <asm/dma.h> 21#include <mach/dma.h>
22#include <asm/system.h> 22#include <asm/system.h>
23 23
24#include "mcp.h" 24#include "mcp.h"
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index 28380b20bc70..62b32dabf629 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -20,7 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22 22
23#include <asm/dma.h> 23#include <mach/dma.h>
24#include <mach/hardware.h> 24#include <mach/hardware.h>
25#include <asm/mach-types.h> 25#include <asm/mach-types.h>
26#include <asm/system.h> 26#include <asm/system.h>
diff --git a/drivers/mfd/ucb1x00-assabet.c b/drivers/mfd/ucb1x00-assabet.c
index 61aeaf79640d..86fed4870f93 100644
--- a/drivers/mfd/ucb1x00-assabet.c
+++ b/drivers/mfd/ucb1x00-assabet.c
@@ -15,7 +15,7 @@
15#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
16#include <linux/device.h> 16#include <linux/device.h>
17 17
18#include <asm/dma.h> 18#include <mach/dma.h>
19 19
20#include "ucb1x00.h" 20#include "ucb1x00.h"
21 21
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index a316f1b75933..6860c924f364 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -25,7 +25,7 @@
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27 27
28#include <asm/dma.h> 28#include <mach/dma.h>
29#include <mach/hardware.h> 29#include <mach/hardware.h>
30 30
31#include "ucb1x00.h" 31#include "ucb1x00.h"
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 44762ca86a8d..61b7d3eb9a2f 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -31,7 +31,7 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/kthread.h> 32#include <linux/kthread.h>
33 33
34#include <asm/dma.h> 34#include <mach/dma.h>
35#include <mach/collie.h> 35#include <mach/collie.h>
36#include <asm/mach-types.h> 36#include <asm/mach-types.h>
37 37
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 2f0fcdb869b7..eb29b1d933ac 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -10,20 +10,6 @@
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * 2005-04-17 Pavel Pisa <pisa@cmp.felk.cvut.cz>
14 * Changed to conform redesigned i.MX scatter gather DMA interface
15 *
16 * 2005-11-04 Pavel Pisa <pisa@cmp.felk.cvut.cz>
17 * Updated for 2.6.14 kernel
18 *
19 * 2005-12-13 Jay Monkman <jtm@smoothsmoothie.com>
20 * Found and corrected problems in the write path
21 *
22 * 2005-12-30 Pavel Pisa <pisa@cmp.felk.cvut.cz>
23 * The event handling rewritten right way in softirq.
24 * Added many ugly hacks and delays to overcome SDHC
25 * deficiencies
26 *
27 */ 13 */
28 14
29#include <linux/module.h> 15#include <linux/module.h>
@@ -37,9 +23,9 @@
37#include <linux/mmc/card.h> 23#include <linux/mmc/card.h>
38#include <linux/delay.h> 24#include <linux/delay.h>
39#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/io.h>
40 27
41#include <asm/dma.h> 28#include <asm/dma.h>
42#include <asm/io.h>
43#include <asm/irq.h> 29#include <asm/irq.h>
44#include <asm/sizes.h> 30#include <asm/sizes.h>
45#include <mach/mmc.h> 31#include <mach/mmc.h>
@@ -50,17 +36,16 @@
50#define DRIVER_NAME "imx-mmc" 36#define DRIVER_NAME "imx-mmc"
51 37
52#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \ 38#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
53 INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \ 39 INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
54 INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO) 40 INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
55 41
56struct imxmci_host { 42struct imxmci_host {
57 struct mmc_host *mmc; 43 struct mmc_host *mmc;
58 spinlock_t lock; 44 spinlock_t lock;
59 struct resource *res; 45 struct resource *res;
46 void __iomem *base;
60 int irq; 47 int irq;
61 imx_dmach_t dma; 48 imx_dmach_t dma;
62 unsigned int clkrt;
63 unsigned int cmdat;
64 volatile unsigned int imask; 49 volatile unsigned int imask;
65 unsigned int power_mode; 50 unsigned int power_mode;
66 unsigned int present; 51 unsigned int present;
@@ -74,7 +59,7 @@ struct imxmci_host {
74 struct tasklet_struct tasklet; 59 struct tasklet_struct tasklet;
75 unsigned int status_reg; 60 unsigned int status_reg;
76 unsigned long pending_events; 61 unsigned long pending_events;
77 /* Next to fields are there for CPU driven transfers to overcome SDHC deficiencies */ 62 /* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */
78 u16 *data_ptr; 63 u16 *data_ptr;
79 unsigned int data_cnt; 64 unsigned int data_cnt;
80 atomic_t stuck_timeout; 65 atomic_t stuck_timeout;
@@ -114,14 +99,22 @@ struct imxmci_host {
114static void imxmci_stop_clock(struct imxmci_host *host) 99static void imxmci_stop_clock(struct imxmci_host *host)
115{ 100{
116 int i = 0; 101 int i = 0;
117 MMC_STR_STP_CLK &= ~STR_STP_CLK_START_CLK; 102 u16 reg;
118 while(i < 0x1000) { 103
119 if(!(i & 0x7f)) 104 reg = readw(host->base + MMC_REG_STR_STP_CLK);
120 MMC_STR_STP_CLK |= STR_STP_CLK_STOP_CLK; 105 writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
106 while (i < 0x1000) {
107 if (!(i & 0x7f)) {
108 reg = readw(host->base + MMC_REG_STR_STP_CLK);
109 writew(reg | STR_STP_CLK_STOP_CLK,
110 host->base + MMC_REG_STR_STP_CLK);
111 }
121 112
122 if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)) { 113 reg = readw(host->base + MMC_REG_STATUS);
114 if (!(reg & STATUS_CARD_BUS_CLK_RUN)) {
123 /* Check twice before cut */ 115 /* Check twice before cut */
124 if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)) 116 reg = readw(host->base + MMC_REG_STATUS);
117 if (!(reg & STATUS_CARD_BUS_CLK_RUN))
125 return; 118 return;
126 } 119 }
127 120
@@ -135,8 +128,10 @@ static int imxmci_start_clock(struct imxmci_host *host)
135 unsigned int trials = 0; 128 unsigned int trials = 0;
136 unsigned int delay_limit = 128; 129 unsigned int delay_limit = 128;
137 unsigned long flags; 130 unsigned long flags;
131 u16 reg;
138 132
139 MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK; 133 reg = readw(host->base + MMC_REG_STR_STP_CLK);
134 writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
140 135
141 clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events); 136 clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
142 137
@@ -145,18 +140,21 @@ static int imxmci_start_clock(struct imxmci_host *host)
145 * then 6 delay loops, but during card detection (low clockrate) 140 * then 6 delay loops, but during card detection (low clockrate)
146 * it takes up to 5000 delay loops and sometimes fails for the first time 141 * it takes up to 5000 delay loops and sometimes fails for the first time
147 */ 142 */
148 MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK; 143 reg = readw(host->base + MMC_REG_STR_STP_CLK);
144 writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
149 145
150 do { 146 do {
151 unsigned int delay = delay_limit; 147 unsigned int delay = delay_limit;
152 148
153 while(delay--){ 149 while (delay--) {
154 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) 150 reg = readw(host->base + MMC_REG_STATUS);
151 if (reg & STATUS_CARD_BUS_CLK_RUN)
155 /* Check twice before cut */ 152 /* Check twice before cut */
156 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) 153 reg = readw(host->base + MMC_REG_STATUS);
154 if (reg & STATUS_CARD_BUS_CLK_RUN)
157 return 0; 155 return 0;
158 156
159 if(test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) 157 if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
160 return 0; 158 return 0;
161 } 159 }
162 160
@@ -167,58 +165,59 @@ static int imxmci_start_clock(struct imxmci_host *host)
167 * IRQ or schedule delays this function execution and the clocks has 165 * IRQ or schedule delays this function execution and the clocks has
168 * been already stopped by other means (response processing, SDHC HW) 166 * been already stopped by other means (response processing, SDHC HW)
169 */ 167 */
170 if(!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) 168 if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) {
171 MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK; 169 reg = readw(host->base + MMC_REG_STR_STP_CLK);
170 writew(reg | STR_STP_CLK_START_CLK,
171 host->base + MMC_REG_STR_STP_CLK);
172 }
172 local_irq_restore(flags); 173 local_irq_restore(flags);
173 174
174 } while(++trials<256); 175 } while (++trials < 256);
175 176
176 dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n"); 177 dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
177 178
178 return -1; 179 return -1;
179} 180}
180 181
181static void imxmci_softreset(void) 182static void imxmci_softreset(struct imxmci_host *host)
182{ 183{
184 int i;
185
183 /* reset sequence */ 186 /* reset sequence */
184 MMC_STR_STP_CLK = 0x8; 187 writew(0x08, host->base + MMC_REG_STR_STP_CLK);
185 MMC_STR_STP_CLK = 0xD; 188 writew(0x0D, host->base + MMC_REG_STR_STP_CLK);
186 MMC_STR_STP_CLK = 0x5; 189
187 MMC_STR_STP_CLK = 0x5; 190 for (i = 0; i < 8; i++)
188 MMC_STR_STP_CLK = 0x5; 191 writew(0x05, host->base + MMC_REG_STR_STP_CLK);
189 MMC_STR_STP_CLK = 0x5; 192
190 MMC_STR_STP_CLK = 0x5; 193 writew(0xff, host->base + MMC_REG_RES_TO);
191 MMC_STR_STP_CLK = 0x5; 194 writew(512, host->base + MMC_REG_BLK_LEN);
192 MMC_STR_STP_CLK = 0x5; 195 writew(1, host->base + MMC_REG_NOB);
193 MMC_STR_STP_CLK = 0x5;
194
195 MMC_RES_TO = 0xff;
196 MMC_BLK_LEN = 512;
197 MMC_NOB = 1;
198} 196}
199 197
200static int imxmci_busy_wait_for_status(struct imxmci_host *host, 198static int imxmci_busy_wait_for_status(struct imxmci_host *host,
201 unsigned int *pstat, unsigned int stat_mask, 199 unsigned int *pstat, unsigned int stat_mask,
202 int timeout, const char *where) 200 int timeout, const char *where)
203{ 201{
204 int loops=0; 202 int loops = 0;
205 while(!(*pstat & stat_mask)) { 203
206 loops+=2; 204 while (!(*pstat & stat_mask)) {
207 if(loops >= timeout) { 205 loops += 2;
206 if (loops >= timeout) {
208 dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n", 207 dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
209 where, *pstat, stat_mask); 208 where, *pstat, stat_mask);
210 return -1; 209 return -1;
211 } 210 }
212 udelay(2); 211 udelay(2);
213 *pstat |= MMC_STATUS; 212 *pstat |= readw(host->base + MMC_REG_STATUS);
214 } 213 }
215 if(!loops) 214 if (!loops)
216 return 0; 215 return 0;
217 216
218 /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */ 217 /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
219 if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000)) 218 if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000))
220 dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", 219 dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
221 loops, where, *pstat, stat_mask); 220 loops, where, *pstat, stat_mask);
222 return loops; 221 return loops;
223} 222}
224 223
@@ -235,8 +234,8 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
235 host->data = data; 234 host->data = data;
236 data->bytes_xfered = 0; 235 data->bytes_xfered = 0;
237 236
238 MMC_NOB = nob; 237 writew(nob, host->base + MMC_REG_NOB);
239 MMC_BLK_LEN = blksz; 238 writew(blksz, host->base + MMC_REG_BLK_LEN);
240 239
241 /* 240 /*
242 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise. 241 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
@@ -252,14 +251,14 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
252 host->dma_dir = DMA_FROM_DEVICE; 251 host->dma_dir = DMA_FROM_DEVICE;
253 252
254 /* Hack to enable read SCR */ 253 /* Hack to enable read SCR */
255 MMC_NOB = 1; 254 writew(1, host->base + MMC_REG_NOB);
256 MMC_BLK_LEN = 512; 255 writew(512, host->base + MMC_REG_BLK_LEN);
257 } else { 256 } else {
258 host->dma_dir = DMA_TO_DEVICE; 257 host->dma_dir = DMA_TO_DEVICE;
259 } 258 }
260 259
261 /* Convert back to virtual address */ 260 /* Convert back to virtual address */
262 host->data_ptr = (u16*)sg_virt(data->sg); 261 host->data_ptr = (u16 *)sg_virt(data->sg);
263 host->data_cnt = 0; 262 host->data_cnt = 0;
264 263
265 clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); 264 clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
@@ -271,10 +270,11 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
271 if (data->flags & MMC_DATA_READ) { 270 if (data->flags & MMC_DATA_READ) {
272 host->dma_dir = DMA_FROM_DEVICE; 271 host->dma_dir = DMA_FROM_DEVICE;
273 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, 272 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
274 data->sg_len, host->dma_dir); 273 data->sg_len, host->dma_dir);
275 274
276 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, 275 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
277 host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ); 276 host->res->start + MMC_REG_BUFFER_ACCESS,
277 DMA_MODE_READ);
278 278
279 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/ 279 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
280 CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN; 280 CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
@@ -282,10 +282,11 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
282 host->dma_dir = DMA_TO_DEVICE; 282 host->dma_dir = DMA_TO_DEVICE;
283 283
284 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, 284 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
285 data->sg_len, host->dma_dir); 285 data->sg_len, host->dma_dir);
286 286
287 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, 287 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
288 host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE); 288 host->res->start + MMC_REG_BUFFER_ACCESS,
289 DMA_MODE_WRITE);
289 290
290 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/ 291 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
291 CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN; 292 CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
@@ -293,12 +294,12 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
293 294
294#if 1 /* This code is there only for consistency checking and can be disabled in future */ 295#if 1 /* This code is there only for consistency checking and can be disabled in future */
295 host->dma_size = 0; 296 host->dma_size = 0;
296 for(i=0; i<host->dma_nents; i++) 297 for (i = 0; i < host->dma_nents; i++)
297 host->dma_size+=data->sg[i].length; 298 host->dma_size += data->sg[i].length;
298 299
299 if (datasz > host->dma_size) { 300 if (datasz > host->dma_size) {
300 dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n", 301 dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
301 datasz, host->dma_size); 302 datasz, host->dma_size);
302 } 303 }
303#endif 304#endif
304 305
@@ -306,7 +307,7 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
306 307
307 wmb(); 308 wmb();
308 309
309 if(host->actual_bus_width == MMC_BUS_WIDTH_4) 310 if (host->actual_bus_width == MMC_BUS_WIDTH_4)
310 BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */ 311 BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */
311 else 312 else
312 BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */ 313 BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */
@@ -317,9 +318,8 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
317 clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); 318 clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
318 319
319 /* start DMA engine for read, write is delayed after initial response */ 320 /* start DMA engine for read, write is delayed after initial response */
320 if (host->dma_dir == DMA_FROM_DEVICE) { 321 if (host->dma_dir == DMA_FROM_DEVICE)
321 imx_dma_enable(host->dma); 322 imx_dma_enable(host->dma);
322 }
323} 323}
324 324
325static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat) 325static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
@@ -351,16 +351,16 @@ static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd,
351 break; 351 break;
352 } 352 }
353 353
354 if ( test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events) ) 354 if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events))
355 cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */ 355 cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
356 356
357 if ( host->actual_bus_width == MMC_BUS_WIDTH_4 ) 357 if (host->actual_bus_width == MMC_BUS_WIDTH_4)
358 cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; 358 cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
359 359
360 MMC_CMD = cmd->opcode; 360 writew(cmd->opcode, host->base + MMC_REG_CMD);
361 MMC_ARGH = cmd->arg >> 16; 361 writew(cmd->arg >> 16, host->base + MMC_REG_ARGH);
362 MMC_ARGL = cmd->arg & 0xffff; 362 writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL);
363 MMC_CMD_DAT_CONT = cmdat; 363 writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
364 364
365 atomic_set(&host->stuck_timeout, 0); 365 atomic_set(&host->stuck_timeout, 0);
366 set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events); 366 set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
@@ -368,18 +368,18 @@ static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd,
368 368
369 imask = IMXMCI_INT_MASK_DEFAULT; 369 imask = IMXMCI_INT_MASK_DEFAULT;
370 imask &= ~INT_MASK_END_CMD_RES; 370 imask &= ~INT_MASK_END_CMD_RES;
371 if ( cmdat & CMD_DAT_CONT_DATA_ENABLE ) { 371 if (cmdat & CMD_DAT_CONT_DATA_ENABLE) {
372 /*imask &= ~INT_MASK_BUF_READY;*/ 372 /* imask &= ~INT_MASK_BUF_READY; */
373 imask &= ~INT_MASK_DATA_TRAN; 373 imask &= ~INT_MASK_DATA_TRAN;
374 if ( cmdat & CMD_DAT_CONT_WRITE ) 374 if (cmdat & CMD_DAT_CONT_WRITE)
375 imask &= ~INT_MASK_WRITE_OP_DONE; 375 imask &= ~INT_MASK_WRITE_OP_DONE;
376 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) 376 if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
377 imask &= ~INT_MASK_BUF_READY; 377 imask &= ~INT_MASK_BUF_READY;
378 } 378 }
379 379
380 spin_lock_irqsave(&host->lock, flags); 380 spin_lock_irqsave(&host->lock, flags);
381 host->imask = imask; 381 host->imask = imask;
382 MMC_INT_MASK = host->imask; 382 writew(host->imask, host->base + MMC_REG_INT_MASK);
383 spin_unlock_irqrestore(&host->lock, flags); 383 spin_unlock_irqrestore(&host->lock, flags);
384 384
385 dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n", 385 dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
@@ -395,14 +395,14 @@ static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *
395 spin_lock_irqsave(&host->lock, flags); 395 spin_lock_irqsave(&host->lock, flags);
396 396
397 host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m | 397 host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
398 IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m); 398 IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
399 399
400 host->imask = IMXMCI_INT_MASK_DEFAULT; 400 host->imask = IMXMCI_INT_MASK_DEFAULT;
401 MMC_INT_MASK = host->imask; 401 writew(host->imask, host->base + MMC_REG_INT_MASK);
402 402
403 spin_unlock_irqrestore(&host->lock, flags); 403 spin_unlock_irqrestore(&host->lock, flags);
404 404
405 if(req && req->cmd) 405 if (req && req->cmd)
406 host->prev_cmd_code = req->cmd->opcode; 406 host->prev_cmd_code = req->cmd->opcode;
407 407
408 host->req = NULL; 408 host->req = NULL;
@@ -416,17 +416,17 @@ static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
416 struct mmc_data *data = host->data; 416 struct mmc_data *data = host->data;
417 int data_error; 417 int data_error;
418 418
419 if(test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)){ 419 if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
420 imx_dma_disable(host->dma); 420 imx_dma_disable(host->dma);
421 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents, 421 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
422 host->dma_dir); 422 host->dma_dir);
423 } 423 }
424 424
425 if ( stat & STATUS_ERR_MASK ) { 425 if (stat & STATUS_ERR_MASK) {
426 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",stat); 426 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat);
427 if(stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR)) 427 if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
428 data->error = -EILSEQ; 428 data->error = -EILSEQ;
429 else if(stat & STATUS_TIME_OUT_READ) 429 else if (stat & STATUS_TIME_OUT_READ)
430 data->error = -ETIMEDOUT; 430 data->error = -ETIMEDOUT;
431 else 431 else
432 data->error = -EIO; 432 data->error = -EIO;
@@ -445,7 +445,7 @@ static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
445{ 445{
446 struct mmc_command *cmd = host->cmd; 446 struct mmc_command *cmd = host->cmd;
447 int i; 447 int i;
448 u32 a,b,c; 448 u32 a, b, c;
449 struct mmc_data *data = host->data; 449 struct mmc_data *data = host->data;
450 450
451 if (!cmd) 451 if (!cmd)
@@ -461,18 +461,18 @@ static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
461 cmd->error = -EILSEQ; 461 cmd->error = -EILSEQ;
462 } 462 }
463 463
464 if(cmd->flags & MMC_RSP_PRESENT) { 464 if (cmd->flags & MMC_RSP_PRESENT) {
465 if(cmd->flags & MMC_RSP_136) { 465 if (cmd->flags & MMC_RSP_136) {
466 for (i = 0; i < 4; i++) { 466 for (i = 0; i < 4; i++) {
467 u32 a = MMC_RES_FIFO & 0xffff; 467 a = readw(host->base + MMC_REG_RES_FIFO);
468 u32 b = MMC_RES_FIFO & 0xffff; 468 b = readw(host->base + MMC_REG_RES_FIFO);
469 cmd->resp[i] = a<<16 | b; 469 cmd->resp[i] = a << 16 | b;
470 } 470 }
471 } else { 471 } else {
472 a = MMC_RES_FIFO & 0xffff; 472 a = readw(host->base + MMC_REG_RES_FIFO);
473 b = MMC_RES_FIFO & 0xffff; 473 b = readw(host->base + MMC_REG_RES_FIFO);
474 c = MMC_RES_FIFO & 0xffff; 474 c = readw(host->base + MMC_REG_RES_FIFO);
475 cmd->resp[0] = a<<24 | b<<8 | c>>8; 475 cmd->resp[0] = a << 24 | b << 8 | c >> 8;
476 } 476 }
477 } 477 }
478 478
@@ -484,36 +484,34 @@ static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
484 484
485 /* Wait for FIFO to be empty before starting DMA write */ 485 /* Wait for FIFO to be empty before starting DMA write */
486 486
487 stat = MMC_STATUS; 487 stat = readw(host->base + MMC_REG_STATUS);
488 if(imxmci_busy_wait_for_status(host, &stat, 488 if (imxmci_busy_wait_for_status(host, &stat,
489 STATUS_APPL_BUFF_FE, 489 STATUS_APPL_BUFF_FE,
490 40, "imxmci_cmd_done DMA WR") < 0) { 490 40, "imxmci_cmd_done DMA WR") < 0) {
491 cmd->error = -EIO; 491 cmd->error = -EIO;
492 imxmci_finish_data(host, stat); 492 imxmci_finish_data(host, stat);
493 if(host->req) 493 if (host->req)
494 imxmci_finish_request(host, host->req); 494 imxmci_finish_request(host, host->req);
495 dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n", 495 dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
496 stat); 496 stat);
497 return 0; 497 return 0;
498 } 498 }
499 499
500 if(test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { 500 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
501 imx_dma_enable(host->dma); 501 imx_dma_enable(host->dma);
502 }
503 } 502 }
504 } else { 503 } else {
505 struct mmc_request *req; 504 struct mmc_request *req;
506 imxmci_stop_clock(host); 505 imxmci_stop_clock(host);
507 req = host->req; 506 req = host->req;
508 507
509 if(data) 508 if (data)
510 imxmci_finish_data(host, stat); 509 imxmci_finish_data(host, stat);
511 510
512 if( req ) { 511 if (req)
513 imxmci_finish_request(host, req); 512 imxmci_finish_request(host, req);
514 } else { 513 else
515 dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n"); 514 dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
516 }
517 } 515 }
518 516
519 return 1; 517 return 1;
@@ -535,11 +533,10 @@ static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
535 } else { 533 } else {
536 struct mmc_request *req; 534 struct mmc_request *req;
537 req = host->req; 535 req = host->req;
538 if( req ) { 536 if (req)
539 imxmci_finish_request(host, req); 537 imxmci_finish_request(host, req);
540 } else { 538 else
541 dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n"); 539 dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
542 }
543 } 540 }
544 541
545 return 1; 542 return 1;
@@ -552,7 +549,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
552 int trans_done = 0; 549 int trans_done = 0;
553 unsigned int stat = *pstat; 550 unsigned int stat = *pstat;
554 551
555 if(host->actual_bus_width != MMC_BUS_WIDTH_4) 552 if (host->actual_bus_width != MMC_BUS_WIDTH_4)
556 burst_len = 16; 553 burst_len = 16;
557 else 554 else
558 burst_len = 64; 555 burst_len = 64;
@@ -563,44 +560,44 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
563 560
564 udelay(20); /* required for clocks < 8MHz*/ 561 udelay(20); /* required for clocks < 8MHz*/
565 562
566 if(host->dma_dir == DMA_FROM_DEVICE) { 563 if (host->dma_dir == DMA_FROM_DEVICE) {
567 imxmci_busy_wait_for_status(host, &stat, 564 imxmci_busy_wait_for_status(host, &stat,
568 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE | 565 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
569 STATUS_TIME_OUT_READ, 566 STATUS_TIME_OUT_READ,
570 50, "imxmci_cpu_driven_data read"); 567 50, "imxmci_cpu_driven_data read");
571 568
572 while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) && 569 while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
573 !(stat & STATUS_TIME_OUT_READ) && 570 !(stat & STATUS_TIME_OUT_READ) &&
574 (host->data_cnt < 512)) { 571 (host->data_cnt < 512)) {
575 572
576 udelay(20); /* required for clocks < 8MHz*/ 573 udelay(20); /* required for clocks < 8MHz*/
577 574
578 for(i = burst_len; i>=2 ; i-=2) { 575 for (i = burst_len; i >= 2 ; i -= 2) {
579 u16 data; 576 u16 data;
580 data = MMC_BUFFER_ACCESS; 577 data = readw(host->base + MMC_REG_BUFFER_ACCESS);
581 udelay(10); /* required for clocks < 8MHz*/ 578 udelay(10); /* required for clocks < 8MHz*/
582 if(host->data_cnt+2 <= host->dma_size) { 579 if (host->data_cnt+2 <= host->dma_size) {
583 *(host->data_ptr++) = data; 580 *(host->data_ptr++) = data;
584 } else { 581 } else {
585 if(host->data_cnt < host->dma_size) 582 if (host->data_cnt < host->dma_size)
586 *(u8*)(host->data_ptr) = data; 583 *(u8 *)(host->data_ptr) = data;
587 } 584 }
588 host->data_cnt += 2; 585 host->data_cnt += 2;
589 } 586 }
590 587
591 stat = MMC_STATUS; 588 stat = readw(host->base + MMC_REG_STATUS);
592 589
593 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n", 590 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
594 host->data_cnt, burst_len, stat); 591 host->data_cnt, burst_len, stat);
595 } 592 }
596 593
597 if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512)) 594 if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
598 trans_done = 1; 595 trans_done = 1;
599 596
600 if(host->dma_size & 0x1ff) 597 if (host->dma_size & 0x1ff)
601 stat &= ~STATUS_CRC_READ_ERR; 598 stat &= ~STATUS_CRC_READ_ERR;
602 599
603 if(stat & STATUS_TIME_OUT_READ) { 600 if (stat & STATUS_TIME_OUT_READ) {
604 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n", 601 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
605 stat); 602 stat);
606 trans_done = -1; 603 trans_done = -1;
@@ -608,12 +605,12 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
608 605
609 } else { 606 } else {
610 imxmci_busy_wait_for_status(host, &stat, 607 imxmci_busy_wait_for_status(host, &stat,
611 STATUS_APPL_BUFF_FE, 608 STATUS_APPL_BUFF_FE,
612 20, "imxmci_cpu_driven_data write"); 609 20, "imxmci_cpu_driven_data write");
613 610
614 while((stat & STATUS_APPL_BUFF_FE) && 611 while ((stat & STATUS_APPL_BUFF_FE) &&
615 (host->data_cnt < host->dma_size)) { 612 (host->data_cnt < host->dma_size)) {
616 if(burst_len >= host->dma_size - host->data_cnt) { 613 if (burst_len >= host->dma_size - host->data_cnt) {
617 burst_len = host->dma_size - host->data_cnt; 614 burst_len = host->dma_size - host->data_cnt;
618 host->data_cnt = host->dma_size; 615 host->data_cnt = host->dma_size;
619 trans_done = 1; 616 trans_done = 1;
@@ -621,10 +618,10 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
621 host->data_cnt += burst_len; 618 host->data_cnt += burst_len;
622 } 619 }
623 620
624 for(i = burst_len; i>0 ; i-=2) 621 for (i = burst_len; i > 0 ; i -= 2)
625 MMC_BUFFER_ACCESS = *(host->data_ptr++); 622 writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS);
626 623
627 stat = MMC_STATUS; 624 stat = readw(host->base + MMC_REG_STATUS);
628 625
629 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n", 626 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
630 burst_len, stat); 627 burst_len, stat);
@@ -639,7 +636,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
639static void imxmci_dma_irq(int dma, void *devid) 636static void imxmci_dma_irq(int dma, void *devid)
640{ 637{
641 struct imxmci_host *host = devid; 638 struct imxmci_host *host = devid;
642 uint32_t stat = MMC_STATUS; 639 u32 stat = readw(host->base + MMC_REG_STATUS);
643 640
644 atomic_set(&host->stuck_timeout, 0); 641 atomic_set(&host->stuck_timeout, 0);
645 host->status_reg = stat; 642 host->status_reg = stat;
@@ -650,10 +647,11 @@ static void imxmci_dma_irq(int dma, void *devid)
650static irqreturn_t imxmci_irq(int irq, void *devid) 647static irqreturn_t imxmci_irq(int irq, void *devid)
651{ 648{
652 struct imxmci_host *host = devid; 649 struct imxmci_host *host = devid;
653 uint32_t stat = MMC_STATUS; 650 u32 stat = readw(host->base + MMC_REG_STATUS);
654 int handled = 1; 651 int handled = 1;
655 652
656 MMC_INT_MASK = host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT; 653 writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT,
654 host->base + MMC_REG_INT_MASK);
657 655
658 atomic_set(&host->stuck_timeout, 0); 656 atomic_set(&host->stuck_timeout, 0);
659 host->status_reg = stat; 657 host->status_reg = stat;
@@ -671,10 +669,10 @@ static void imxmci_tasklet_fnc(unsigned long data)
671 unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */ 669 unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
672 int timeout = 0; 670 int timeout = 0;
673 671
674 if(atomic_read(&host->stuck_timeout) > 4) { 672 if (atomic_read(&host->stuck_timeout) > 4) {
675 char *what; 673 char *what;
676 timeout = 1; 674 timeout = 1;
677 stat = MMC_STATUS; 675 stat = readw(host->base + MMC_REG_STATUS);
678 host->status_reg = stat; 676 host->status_reg = stat;
679 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) 677 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
680 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) 678 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
@@ -683,29 +681,37 @@ static void imxmci_tasklet_fnc(unsigned long data)
683 what = "RESP"; 681 what = "RESP";
684 else 682 else
685 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) 683 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
686 if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events)) 684 if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
687 what = "DATA"; 685 what = "DATA";
688 else 686 else
689 what = "DMA"; 687 what = "DMA";
690 else 688 else
691 what = "???"; 689 what = "???";
692 690
693 dev_err(mmc_dev(host->mmc), "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n", 691 dev_err(mmc_dev(host->mmc),
694 what, stat, MMC_INT_MASK); 692 "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
695 dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n", 693 what, stat,
696 MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma)); 694 readw(host->base + MMC_REG_INT_MASK));
695 dev_err(mmc_dev(host->mmc),
696 "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
697 readw(host->base + MMC_REG_CMD_DAT_CONT),
698 readw(host->base + MMC_REG_BLK_LEN),
699 readw(host->base + MMC_REG_NOB),
700 CCR(host->dma));
697 dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n", 701 dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
698 host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size); 702 host->cmd ? host->cmd->opcode : 0,
703 host->prev_cmd_code,
704 1 << host->actual_bus_width, host->dma_size);
699 } 705 }
700 706
701 if(!host->present || timeout) 707 if (!host->present || timeout)
702 host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ | 708 host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
703 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR; 709 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
704 710
705 if(test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) { 711 if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
706 clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); 712 clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
707 713
708 stat = MMC_STATUS; 714 stat = readw(host->base + MMC_REG_STATUS);
709 /* 715 /*
710 * This is not required in theory, but there is chance to miss some flag 716 * This is not required in theory, but there is chance to miss some flag
711 * which clears automatically by mask write, FreeScale original code keeps 717 * which clears automatically by mask write, FreeScale original code keeps
@@ -713,63 +719,62 @@ static void imxmci_tasklet_fnc(unsigned long data)
713 */ 719 */
714 stat |= host->status_reg; 720 stat |= host->status_reg;
715 721
716 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) 722 if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
717 stat &= ~STATUS_CRC_READ_ERR; 723 stat &= ~STATUS_CRC_READ_ERR;
718 724
719 if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { 725 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
720 imxmci_busy_wait_for_status(host, &stat, 726 imxmci_busy_wait_for_status(host, &stat,
721 STATUS_END_CMD_RESP | STATUS_ERR_MASK, 727 STATUS_END_CMD_RESP | STATUS_ERR_MASK,
722 20, "imxmci_tasklet_fnc resp (ERRATUM #4)"); 728 20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
723 } 729 }
724 730
725 if(stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) { 731 if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
726 if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) 732 if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
727 imxmci_cmd_done(host, stat); 733 imxmci_cmd_done(host, stat);
728 if(host->data && (stat & STATUS_ERR_MASK)) 734 if (host->data && (stat & STATUS_ERR_MASK))
729 imxmci_data_done(host, stat); 735 imxmci_data_done(host, stat);
730 } 736 }
731 737
732 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) { 738 if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
733 stat |= MMC_STATUS; 739 stat |= readw(host->base + MMC_REG_STATUS);
734 if(imxmci_cpu_driven_data(host, &stat)){ 740 if (imxmci_cpu_driven_data(host, &stat)) {
735 if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) 741 if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
736 imxmci_cmd_done(host, stat); 742 imxmci_cmd_done(host, stat);
737 atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m, 743 atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
738 &host->pending_events); 744 &host->pending_events);
739 imxmci_data_done(host, stat); 745 imxmci_data_done(host, stat);
740 } 746 }
741 } 747 }
742 } 748 }
743 749
744 if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) && 750 if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
745 !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { 751 !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
746 752
747 stat = MMC_STATUS; 753 stat = readw(host->base + MMC_REG_STATUS);
748 /* Same as above */ 754 /* Same as above */
749 stat |= host->status_reg; 755 stat |= host->status_reg;
750 756
751 if(host->dma_dir == DMA_TO_DEVICE) { 757 if (host->dma_dir == DMA_TO_DEVICE)
752 data_dir_mask = STATUS_WRITE_OP_DONE; 758 data_dir_mask = STATUS_WRITE_OP_DONE;
753 } else { 759 else
754 data_dir_mask = STATUS_DATA_TRANS_DONE; 760 data_dir_mask = STATUS_DATA_TRANS_DONE;
755 }
756 761
757 if(stat & data_dir_mask) { 762 if (stat & data_dir_mask) {
758 clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); 763 clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
759 imxmci_data_done(host, stat); 764 imxmci_data_done(host, stat);
760 } 765 }
761 } 766 }
762 767
763 if(test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) { 768 if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
764 769
765 if(host->cmd) 770 if (host->cmd)
766 imxmci_cmd_done(host, STATUS_TIME_OUT_RESP); 771 imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
767 772
768 if(host->data) 773 if (host->data)
769 imxmci_data_done(host, STATUS_TIME_OUT_READ | 774 imxmci_data_done(host, STATUS_TIME_OUT_READ |
770 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR); 775 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
771 776
772 if(host->req) 777 if (host->req)
773 imxmci_finish_request(host, host->req); 778 imxmci_finish_request(host, host->req);
774 779
775 mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 780 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
@@ -796,9 +801,8 @@ static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
796 if (req->data->flags & MMC_DATA_WRITE) 801 if (req->data->flags & MMC_DATA_WRITE)
797 cmdat |= CMD_DAT_CONT_WRITE; 802 cmdat |= CMD_DAT_CONT_WRITE;
798 803
799 if (req->data->flags & MMC_DATA_STREAM) { 804 if (req->data->flags & MMC_DATA_STREAM)
800 cmdat |= CMD_DAT_CONT_STREAM_BLOCK; 805 cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
801 }
802 } 806 }
803 807
804 imxmci_start_cmd(host, req->cmd, cmdat); 808 imxmci_start_cmd(host, req->cmd, cmdat);
@@ -811,36 +815,37 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
811 struct imxmci_host *host = mmc_priv(mmc); 815 struct imxmci_host *host = mmc_priv(mmc);
812 int prescaler; 816 int prescaler;
813 817
814 if( ios->bus_width==MMC_BUS_WIDTH_4 ) { 818 if (ios->bus_width == MMC_BUS_WIDTH_4) {
815 host->actual_bus_width = MMC_BUS_WIDTH_4; 819 host->actual_bus_width = MMC_BUS_WIDTH_4;
816 imx_gpio_mode(PB11_PF_SD_DAT3); 820 imx_gpio_mode(PB11_PF_SD_DAT3);
817 }else{ 821 } else {
818 host->actual_bus_width = MMC_BUS_WIDTH_1; 822 host->actual_bus_width = MMC_BUS_WIDTH_1;
819 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); 823 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
820 } 824 }
821 825
822 if ( host->power_mode != ios->power_mode ) { 826 if (host->power_mode != ios->power_mode) {
823 switch (ios->power_mode) { 827 switch (ios->power_mode) {
824 case MMC_POWER_OFF: 828 case MMC_POWER_OFF:
825 break; 829 break;
826 case MMC_POWER_UP: 830 case MMC_POWER_UP:
827 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); 831 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
828 break; 832 break;
829 case MMC_POWER_ON: 833 case MMC_POWER_ON:
830 break; 834 break;
831 } 835 }
832 host->power_mode = ios->power_mode; 836 host->power_mode = ios->power_mode;
833 } 837 }
834 838
835 if ( ios->clock ) { 839 if (ios->clock) {
836 unsigned int clk; 840 unsigned int clk;
841 u16 reg;
837 842
838 /* The prescaler is 5 for PERCLK2 equal to 96MHz 843 /* The prescaler is 5 for PERCLK2 equal to 96MHz
839 * then 96MHz / 5 = 19.2 MHz 844 * then 96MHz / 5 = 19.2 MHz
840 */ 845 */
841 clk = clk_get_rate(host->clk); 846 clk = clk_get_rate(host->clk);
842 prescaler=(clk+(CLK_RATE*7)/8)/CLK_RATE; 847 prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE;
843 switch(prescaler) { 848 switch (prescaler) {
844 case 0: 849 case 0:
845 case 1: prescaler = 0; 850 case 1: prescaler = 0;
846 break; 851 break;
@@ -858,24 +863,29 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
858 dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n", 863 dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
859 clk, prescaler); 864 clk, prescaler);
860 865
861 for(clk=0; clk<8; clk++) { 866 for (clk = 0; clk < 8; clk++) {
862 int x; 867 int x;
863 x = CLK_RATE / (1<<clk); 868 x = CLK_RATE / (1 << clk);
864 if( x <= ios->clock) 869 if (x <= ios->clock)
865 break; 870 break;
866 } 871 }
867 872
868 MMC_STR_STP_CLK |= STR_STP_CLK_ENABLE; /* enable controller */ 873 /* enable controller */
874 reg = readw(host->base + MMC_REG_STR_STP_CLK);
875 writew(reg | STR_STP_CLK_ENABLE,
876 host->base + MMC_REG_STR_STP_CLK);
869 877
870 imxmci_stop_clock(host); 878 imxmci_stop_clock(host);
871 MMC_CLK_RATE = (prescaler<<3) | clk; 879 writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE);
872 /* 880 /*
873 * Under my understanding, clock should not be started there, because it would 881 * Under my understanding, clock should not be started there, because it would
874 * initiate SDHC sequencer and send last or random command into card 882 * initiate SDHC sequencer and send last or random command into card
875 */ 883 */
876 /*imxmci_start_clock(host);*/ 884 /* imxmci_start_clock(host); */
877 885
878 dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE); 886 dev_dbg(mmc_dev(host->mmc),
887 "MMC_CLK_RATE: 0x%08x\n",
888 readw(host->base + MMC_REG_CLK_RATE));
879 } else { 889 } else {
880 imxmci_stop_clock(host); 890 imxmci_stop_clock(host);
881 } 891 }
@@ -915,10 +925,10 @@ static void imxmci_check_status(unsigned long data)
915 tasklet_schedule(&host->tasklet); 925 tasklet_schedule(&host->tasklet);
916 } 926 }
917 927
918 if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) || 928 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
919 test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { 929 test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
920 atomic_inc(&host->stuck_timeout); 930 atomic_inc(&host->stuck_timeout);
921 if(atomic_read(&host->stuck_timeout) > 4) 931 if (atomic_read(&host->stuck_timeout) > 4)
922 tasklet_schedule(&host->tasklet); 932 tasklet_schedule(&host->tasklet);
923 } else { 933 } else {
924 atomic_set(&host->stuck_timeout, 0); 934 atomic_set(&host->stuck_timeout, 0);
@@ -934,6 +944,7 @@ static int imxmci_probe(struct platform_device *pdev)
934 struct imxmci_host *host = NULL; 944 struct imxmci_host *host = NULL;
935 struct resource *r; 945 struct resource *r;
936 int ret = 0, irq; 946 int ret = 0, irq;
947 u16 rev_no;
937 948
938 printk(KERN_INFO "i.MX mmc driver\n"); 949 printk(KERN_INFO "i.MX mmc driver\n");
939 950
@@ -942,7 +953,8 @@ static int imxmci_probe(struct platform_device *pdev)
942 if (!r || irq < 0) 953 if (!r || irq < 0)
943 return -ENXIO; 954 return -ENXIO;
944 955
945 if (!request_mem_region(r->start, 0x100, pdev->name)) 956 r = request_mem_region(r->start, resource_size(r), pdev->name);
957 if (!r)
946 return -EBUSY; 958 return -EBUSY;
947 959
948 mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev); 960 mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
@@ -966,6 +978,12 @@ static int imxmci_probe(struct platform_device *pdev)
966 mmc->max_blk_count = 65535; 978 mmc->max_blk_count = 65535;
967 979
968 host = mmc_priv(mmc); 980 host = mmc_priv(mmc);
981 host->base = ioremap(r->start, resource_size(r));
982 if (!host->base) {
983 ret = -ENOMEM;
984 goto out;
985 }
986
969 host->mmc = mmc; 987 host->mmc = mmc;
970 host->dma_allocated = 0; 988 host->dma_allocated = 0;
971 host->pdata = pdev->dev.platform_data; 989 host->pdata = pdev->dev.platform_data;
@@ -993,18 +1011,20 @@ static int imxmci_probe(struct platform_device *pdev)
993 imx_gpio_mode(PB12_PF_SD_CLK); 1011 imx_gpio_mode(PB12_PF_SD_CLK);
994 imx_gpio_mode(PB13_PF_SD_CMD); 1012 imx_gpio_mode(PB13_PF_SD_CMD);
995 1013
996 imxmci_softreset(); 1014 imxmci_softreset(host);
997 1015
998 if ( MMC_REV_NO != 0x390 ) { 1016 rev_no = readw(host->base + MMC_REG_REV_NO);
1017 if (rev_no != 0x390) {
999 dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", 1018 dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
1000 MMC_REV_NO); 1019 readw(host->base + MMC_REG_REV_NO));
1001 goto out; 1020 goto out;
1002 } 1021 }
1003 1022
1004 MMC_READ_TO = 0x2db4; /* recommended in data sheet */ 1023 /* recommended in data sheet */
1024 writew(0x2db4, host->base + MMC_REG_READ_TO);
1005 1025
1006 host->imask = IMXMCI_INT_MASK_DEFAULT; 1026 host->imask = IMXMCI_INT_MASK_DEFAULT;
1007 MMC_INT_MASK = host->imask; 1027 writew(host->imask, host->base + MMC_REG_INT_MASK);
1008 1028
1009 host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW); 1029 host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
1010 if(host->dma < 0) { 1030 if(host->dma < 0) {
@@ -1012,7 +1032,7 @@ static int imxmci_probe(struct platform_device *pdev)
1012 ret = -EBUSY; 1032 ret = -EBUSY;
1013 goto out; 1033 goto out;
1014 } 1034 }
1015 host->dma_allocated=1; 1035 host->dma_allocated = 1;
1016 imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host); 1036 imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
1017 1037
1018 tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host); 1038 tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
@@ -1032,7 +1052,7 @@ static int imxmci_probe(struct platform_device *pdev)
1032 host->timer.data = (unsigned long)host; 1052 host->timer.data = (unsigned long)host;
1033 host->timer.function = imxmci_check_status; 1053 host->timer.function = imxmci_check_status;
1034 add_timer(&host->timer); 1054 add_timer(&host->timer);
1035 mod_timer(&host->timer, jiffies + (HZ>>1)); 1055 mod_timer(&host->timer, jiffies + (HZ >> 1));
1036 1056
1037 platform_set_drvdata(pdev, mmc); 1057 platform_set_drvdata(pdev, mmc);
1038 1058
@@ -1042,18 +1062,20 @@ static int imxmci_probe(struct platform_device *pdev)
1042 1062
1043out: 1063out:
1044 if (host) { 1064 if (host) {
1045 if(host->dma_allocated){ 1065 if (host->dma_allocated) {
1046 imx_dma_free(host->dma); 1066 imx_dma_free(host->dma);
1047 host->dma_allocated=0; 1067 host->dma_allocated = 0;
1048 } 1068 }
1049 if (host->clk) { 1069 if (host->clk) {
1050 clk_disable(host->clk); 1070 clk_disable(host->clk);
1051 clk_put(host->clk); 1071 clk_put(host->clk);
1052 } 1072 }
1073 if (host->base)
1074 iounmap(host->base);
1053 } 1075 }
1054 if (mmc) 1076 if (mmc)
1055 mmc_free_host(mmc); 1077 mmc_free_host(mmc);
1056 release_mem_region(r->start, 0x100); 1078 release_mem_region(r->start, resource_size(r));
1057 return ret; 1079 return ret;
1058} 1080}
1059 1081
@@ -1072,9 +1094,10 @@ static int imxmci_remove(struct platform_device *pdev)
1072 mmc_remove_host(mmc); 1094 mmc_remove_host(mmc);
1073 1095
1074 free_irq(host->irq, host); 1096 free_irq(host->irq, host);
1075 if(host->dma_allocated){ 1097 iounmap(host->base);
1098 if (host->dma_allocated) {
1076 imx_dma_free(host->dma); 1099 imx_dma_free(host->dma);
1077 host->dma_allocated=0; 1100 host->dma_allocated = 0;
1078 } 1101 }
1079 1102
1080 tasklet_kill(&host->tasklet); 1103 tasklet_kill(&host->tasklet);
@@ -1082,7 +1105,7 @@ static int imxmci_remove(struct platform_device *pdev)
1082 clk_disable(host->clk); 1105 clk_disable(host->clk);
1083 clk_put(host->clk); 1106 clk_put(host->clk);
1084 1107
1085 release_mem_region(host->res->start, 0x100); 1108 release_mem_region(host->res->start, resource_size(host->res));
1086 1109
1087 mmc_free_host(mmc); 1110 mmc_free_host(mmc);
1088 } 1111 }
@@ -1109,7 +1132,7 @@ static int imxmci_resume(struct platform_device *dev)
1109 1132
1110 if (mmc) { 1133 if (mmc) {
1111 host = mmc_priv(mmc); 1134 host = mmc_priv(mmc);
1112 if(host) 1135 if (host)
1113 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); 1136 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
1114 ret = mmc_resume_host(mmc); 1137 ret = mmc_resume_host(mmc);
1115 } 1138 }
diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h
index e5339e334dbb..09d5d4ee3a77 100644
--- a/drivers/mmc/host/imxmmc.h
+++ b/drivers/mmc/host/imxmmc.h
@@ -1,24 +1,21 @@
1#define MMC_REG_STR_STP_CLK 0x00
2#define MMC_REG_STATUS 0x04
3#define MMC_REG_CLK_RATE 0x08
4#define MMC_REG_CMD_DAT_CONT 0x0C
5#define MMC_REG_RES_TO 0x10
6#define MMC_REG_READ_TO 0x14
7#define MMC_REG_BLK_LEN 0x18
8#define MMC_REG_NOB 0x1C
9#define MMC_REG_REV_NO 0x20
10#define MMC_REG_INT_MASK 0x24
11#define MMC_REG_CMD 0x28
12#define MMC_REG_ARGH 0x2C
13#define MMC_REG_ARGL 0x30
14#define MMC_REG_RES_FIFO 0x34
15#define MMC_REG_BUFFER_ACCESS 0x38
1 16
2# define __REG16(x) (*((volatile u16 *)IO_ADDRESS(x))) 17#define STR_STP_CLK_IPG_CLK_GATE_DIS (1<<15)
3 18#define STR_STP_CLK_IPG_PERCLK_GATE_DIS (1<<14)
4#define MMC_STR_STP_CLK __REG16(IMX_MMC_BASE + 0x00)
5#define MMC_STATUS __REG16(IMX_MMC_BASE + 0x04)
6#define MMC_CLK_RATE __REG16(IMX_MMC_BASE + 0x08)
7#define MMC_CMD_DAT_CONT __REG16(IMX_MMC_BASE + 0x0C)
8#define MMC_RES_TO __REG16(IMX_MMC_BASE + 0x10)
9#define MMC_READ_TO __REG16(IMX_MMC_BASE + 0x14)
10#define MMC_BLK_LEN __REG16(IMX_MMC_BASE + 0x18)
11#define MMC_NOB __REG16(IMX_MMC_BASE + 0x1C)
12#define MMC_REV_NO __REG16(IMX_MMC_BASE + 0x20)
13#define MMC_INT_MASK __REG16(IMX_MMC_BASE + 0x24)
14#define MMC_CMD __REG16(IMX_MMC_BASE + 0x28)
15#define MMC_ARGH __REG16(IMX_MMC_BASE + 0x2C)
16#define MMC_ARGL __REG16(IMX_MMC_BASE + 0x30)
17#define MMC_RES_FIFO __REG16(IMX_MMC_BASE + 0x34)
18#define MMC_BUFFER_ACCESS __REG16(IMX_MMC_BASE + 0x38)
19#define MMC_BUFFER_ACCESS_OFS 0x38
20
21
22#define STR_STP_CLK_ENDIAN (1<<5) 19#define STR_STP_CLK_ENDIAN (1<<5)
23#define STR_STP_CLK_RESET (1<<3) 20#define STR_STP_CLK_RESET (1<<3)
24#define STR_STP_CLK_ENABLE (1<<2) 21#define STR_STP_CLK_ENABLE (1<<2)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2fadf323c696..1bcbdd6763ac 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -500,7 +500,7 @@ static int mmci_probe(struct amba_device *dev, void *id)
500 } 500 }
501 501
502 host = mmc_priv(mmc); 502 host = mmc_priv(mmc);
503 host->clk = clk_get(&dev->dev, "MCLK"); 503 host->clk = clk_get(&dev->dev, NULL);
504 if (IS_ERR(host->clk)) { 504 if (IS_ERR(host->clk)) {
505 ret = PTR_ERR(host->clk); 505 ret = PTR_ERR(host->clk);
506 host->clk = NULL; 506 host->clk = NULL;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 1b9fc3c6b875..67d7b7fef084 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1015,7 +1015,7 @@ static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data
1015 } 1015 }
1016 1016
1017 if (is_read) { 1017 if (is_read) {
1018 if (host->id == 1) { 1018 if (host->id == 0) {
1019 sync_dev = OMAP_DMA_MMC_RX; 1019 sync_dev = OMAP_DMA_MMC_RX;
1020 dma_dev_name = "MMC1 read"; 1020 dma_dev_name = "MMC1 read";
1021 } else { 1021 } else {
@@ -1023,7 +1023,7 @@ static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data
1023 dma_dev_name = "MMC2 read"; 1023 dma_dev_name = "MMC2 read";
1024 } 1024 }
1025 } else { 1025 } else {
1026 if (host->id == 1) { 1026 if (host->id == 0) {
1027 sync_dev = OMAP_DMA_MMC_TX; 1027 sync_dev = OMAP_DMA_MMC_TX;
1028 dma_dev_name = "MMC1 write"; 1028 dma_dev_name = "MMC1 write";
1029 } else { 1029 } else {
@@ -1317,7 +1317,7 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1317 host->slots[id] = slot; 1317 host->slots[id] = slot;
1318 1318
1319 mmc->caps = 0; 1319 mmc->caps = 0;
1320 if (host->pdata->conf.wire4) 1320 if (host->pdata->slots[id].wires >= 4)
1321 mmc->caps |= MMC_CAP_4_BIT_DATA; 1321 mmc->caps |= MMC_CAP_4_BIT_DATA;
1322 1322
1323 mmc->ops = &mmc_omap_ops; 1323 mmc->ops = &mmc_omap_ops;
@@ -1451,6 +1451,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1451 host->irq = irq; 1451 host->irq = irq;
1452 1452
1453 host->use_dma = 1; 1453 host->use_dma = 1;
1454 host->dev->dma_mask = &pdata->dma_mask;
1454 host->dma_ch = -1; 1455 host->dma_ch = -1;
1455 1456
1456 host->irq = irq; 1457 host->irq = irq;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index ebfaa9960939..f88cc7406354 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -26,11 +26,12 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/mmc/host.h> 28#include <linux/mmc/host.h>
29#include <linux/io.h>
29 30
30#include <asm/dma.h>
31#include <asm/io.h>
32#include <asm/sizes.h> 31#include <asm/sizes.h>
33 32
33#include <mach/dma.h>
34#include <mach/hardware.h>
34#include <mach/pxa-regs.h> 35#include <mach/pxa-regs.h>
35#include <mach/mmc.h> 36#include <mach/mmc.h>
36 37
@@ -533,7 +534,7 @@ static int pxamci_probe(struct platform_device *pdev)
533 host->pdata = pdev->dev.platform_data; 534 host->pdata = pdev->dev.platform_data;
534 host->clkrt = CLKRT_OFF; 535 host->clkrt = CLKRT_OFF;
535 536
536 host->clk = clk_get(&pdev->dev, "MMCCLK"); 537 host->clk = clk_get(&pdev->dev, NULL);
537 if (IS_ERR(host->clk)) { 538 if (IS_ERR(host->clk)) {
538 ret = PTR_ERR(host->clk); 539 ret = PTR_ERR(host->clk);
539 host->clk = NULL; 540 host->clk = NULL;
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 3b2085b57769..fcc98a4cce3c 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -25,7 +25,7 @@
25#include <mach/regs-sdi.h> 25#include <mach/regs-sdi.h>
26#include <mach/regs-gpio.h> 26#include <mach/regs-gpio.h>
27 27
28#include <asm/plat-s3c24xx/mci.h> 28#include <plat/mci.h>
29 29
30#include "s3cmci.h" 30#include "s3cmci.h"
31 31
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 3aa018c092f8..42969fe051b2 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -32,16 +32,15 @@ static struct mtd_info *dc21285_mtd;
32 */ 32 */
33static void nw_en_write(void) 33static void nw_en_write(void)
34{ 34{
35 extern spinlock_t gpio_lock;
36 unsigned long flags; 35 unsigned long flags;
37 36
38 /* 37 /*
39 * we want to write a bit pattern XXX1 to Xilinx to enable 38 * we want to write a bit pattern XXX1 to Xilinx to enable
40 * the write gate, which will be open for about the next 2ms. 39 * the write gate, which will be open for about the next 2ms.
41 */ 40 */
42 spin_lock_irqsave(&gpio_lock, flags); 41 spin_lock_irqsave(&nw_gpio_lock, flags);
43 cpld_modify(1, 1); 42 nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
44 spin_unlock_irqrestore(&gpio_lock, flags); 43 spin_unlock_irqrestore(&nw_gpio_lock, flags);
45 44
46 /* 45 /*
47 * let the ISA bus to catch on... 46 * let the ISA bus to catch on...
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index dcdb1f17577d..3ea1de9be720 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -170,7 +170,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
170 err = -ENOMEM; 170 err = -ENOMEM;
171 goto Error; 171 goto Error;
172 } 172 }
173 memzero(info, sizeof(struct ixp2000_flash_info)); 173 memset(info, 0, sizeof(struct ixp2000_flash_info));
174 174
175 platform_set_drvdata(dev, info); 175 platform_set_drvdata(dev, info);
176 176
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 9c7a5fbd4e51..16555cbeaea4 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -201,7 +201,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
201 err = -ENOMEM; 201 err = -ENOMEM;
202 goto Error; 202 goto Error;
203 } 203 }
204 memzero(info, sizeof(struct ixp4xx_flash_info)); 204 memset(info, 0, sizeof(struct ixp4xx_flash_info));
205 205
206 platform_set_drvdata(dev, info); 206 platform_set_drvdata(dev, info);
207 207
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 1c2e9450d663..f8ae0400c49c 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -408,7 +408,7 @@ config MTD_NAND_FSL_UPM
408 408
409config MTD_NAND_MXC 409config MTD_NAND_MXC
410 tristate "MXC NAND support" 410 tristate "MXC NAND support"
411 depends on ARCH_MX2 411 depends on ARCH_MX2 || ARCH_MX3
412 help 412 help
413 This enables the driver for the NAND flash controller on the 413 This enables the driver for the NAND flash controller on the
414 MXC processors. 414 MXC processors.
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 15f0a26730ae..fc4144495610 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -20,8 +20,8 @@
20#include <linux/mtd/partitions.h> 20#include <linux/mtd/partitions.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <asm/dma.h>
24 23
24#include <mach/dma.h>
25#include <mach/pxa-regs.h> 25#include <mach/pxa-regs.h>
26#include <mach/pxa3xx_nand.h> 26#include <mach/pxa3xx_nand.h>
27 27
@@ -1080,7 +1080,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1080 this = &info->nand_chip; 1080 this = &info->nand_chip;
1081 mtd->priv = info; 1081 mtd->priv = info;
1082 1082
1083 info->clk = clk_get(&pdev->dev, "NANDCLK"); 1083 info->clk = clk_get(&pdev->dev, NULL);
1084 if (IS_ERR(info->clk)) { 1084 if (IS_ERR(info->clk)) {
1085 dev_err(&pdev->dev, "failed to get nand clock\n"); 1085 dev_err(&pdev->dev, "failed to get nand clock\n");
1086 ret = PTR_ERR(info->clk); 1086 ret = PTR_ERR(info->clk);
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 556139ed1fdf..8e375d5fe231 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -45,8 +45,8 @@
45 45
46#include <asm/io.h> 46#include <asm/io.h>
47 47
48#include <asm/plat-s3c/regs-nand.h> 48#include <plat/regs-nand.h>
49#include <asm/plat-s3c/nand.h> 49#include <plat/nand.h>
50 50
51#ifdef CONFIG_MTD_NAND_S3C2410_HWECC 51#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
52static int hardware_ecc = 1; 52static int hardware_ecc = 1;
@@ -818,7 +818,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
818 goto exit_error; 818 goto exit_error;
819 } 819 }
820 820
821 memzero(info, sizeof(*info)); 821 memset(info, 0, sizeof(*info));
822 platform_set_drvdata(pdev, info); 822 platform_set_drvdata(pdev, info);
823 823
824 spin_lock_init(&info->controller.lock); 824 spin_lock_init(&info->controller.lock);
@@ -883,7 +883,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
883 goto exit_error; 883 goto exit_error;
884 } 884 }
885 885
886 memzero(info->mtds, size); 886 memset(info->mtds, 0, size);
887 887
888 /* initialise all possible chips */ 888 /* initialise all possible chips */
889 889
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index a7e4d985f5ef..d1e0b8e7224b 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -149,7 +149,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
149 149
150 INIT_COMPLETION(c->irq_done); 150 INIT_COMPLETION(c->irq_done);
151 if (c->gpio_irq) { 151 if (c->gpio_irq) {
152 result = omap_get_gpio_datain(c->gpio_irq); 152 result = gpio_get_value(c->gpio_irq);
153 if (result == -1) { 153 if (result == -1) {
154 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); 154 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
155 intr = read_reg(c, ONENAND_REG_INTERRUPT); 155 intr = read_reg(c, ONENAND_REG_INTERRUPT);
@@ -634,9 +634,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
634 "OneNAND\n", c->gpio_irq); 634 "OneNAND\n", c->gpio_irq);
635 goto err_iounmap; 635 goto err_iounmap;
636 } 636 }
637 omap_set_gpio_direction(c->gpio_irq, 1); 637 gpio_direction_input(c->gpio_irq);
638 638
639 if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq), 639 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
640 omap2_onenand_interrupt, IRQF_TRIGGER_RISING, 640 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
641 pdev->dev.driver->name, c)) < 0) 641 pdev->dev.driver->name, c)) < 0)
642 goto err_release_gpio; 642 goto err_release_gpio;
@@ -723,7 +723,7 @@ err_release_dma:
723 if (c->dma_channel != -1) 723 if (c->dma_channel != -1)
724 omap_free_dma(c->dma_channel); 724 omap_free_dma(c->dma_channel);
725 if (c->gpio_irq) 725 if (c->gpio_irq)
726 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); 726 free_irq(gpio_to_irq(c->gpio_irq), c);
727err_release_gpio: 727err_release_gpio:
728 if (c->gpio_irq) 728 if (c->gpio_irq)
729 omap_free_gpio(c->gpio_irq); 729 omap_free_gpio(c->gpio_irq);
@@ -760,7 +760,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
760 omap2_onenand_shutdown(pdev); 760 omap2_onenand_shutdown(pdev);
761 platform_set_drvdata(pdev, NULL); 761 platform_set_drvdata(pdev, NULL);
762 if (c->gpio_irq) { 762 if (c->gpio_irq) {
763 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); 763 free_irq(gpio_to_irq(c->gpio_irq), c);
764 omap_free_gpio(c->gpio_irq); 764 omap_free_gpio(c->gpio_irq);
765 } 765 }
766 iounmap(c->onenand.base); 766 iounmap(c->onenand.base);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 72a9212da865..9a18270c1081 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2399,9 +2399,14 @@ config CHELSIO_T1_1G
2399 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2399 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2400 are using only 10G cards say 'N' here. 2400 are using only 10G cards say 'N' here.
2401 2401
2402config CHELSIO_T3_DEPENDS
2403 tristate
2404 depends on PCI && INET
2405 default y
2406
2402config CHELSIO_T3 2407config CHELSIO_T3
2403 tristate "Chelsio Communications T3 10Gb Ethernet support" 2408 tristate "Chelsio Communications T3 10Gb Ethernet support"
2404 depends on PCI && INET 2409 depends on CHELSIO_T3_DEPENDS
2405 select FW_LOADER 2410 select FW_LOADER
2406 select INET_LRO 2411 select INET_LRO
2407 help 2412 help
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 6ecc600c1bcc..3ec20cc18b0c 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -307,7 +307,7 @@ poll_some_more:
307 } 307 }
308 spin_unlock_irq(&ep->rx_lock); 308 spin_unlock_irq(&ep->rx_lock);
309 309
310 if (more && netif_rx_reschedule(dev, napi)) 310 if (more && netif_rx_reschedule(napi))
311 goto poll_some_more; 311 goto poll_some_more;
312 } 312 }
313 313
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 26af411fc428..5fce1d5c1a1a 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -504,7 +504,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
504 netif_rx_complete(napi); 504 netif_rx_complete(napi);
505 qmgr_enable_irq(rxq); 505 qmgr_enable_irq(rxq);
506 if (!qmgr_stat_empty(rxq) && 506 if (!qmgr_stat_empty(rxq) &&
507 netif_rx_reschedule(dev, napi)) { 507 netif_rx_reschedule(napi)) {
508#if DEBUG_RX 508#if DEBUG_RX
509 printk(KERN_DEBUG "%s: eth_poll" 509 printk(KERN_DEBUG "%s: eth_poll"
510 " netif_rx_reschedule successed\n", 510 " netif_rx_reschedule successed\n",
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 592daee9dc28..9ad22d1b00fd 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -29,7 +29,6 @@
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/irq.h> 31#include <linux/irq.h>
32#include <linux/delay.h>
33#include <linux/io.h> 32#include <linux/io.h>
34 33
35#include <asm/irq.h> 34#include <asm/irq.h>
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index d548a45d59d5..ff6497658a45 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -170,11 +170,7 @@ static char version[] __initdata =
170/* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps 170/* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps
171 them to system IRQ numbers. This mapping is card specific and is set to 171 them to system IRQ numbers. This mapping is card specific and is set to
172 the configuration of the Cirrus Eval board for this chip. */ 172 the configuration of the Cirrus Eval board for this chip. */
173#ifdef CONFIG_ARCH_CLPS7500 173#if defined(CONFIG_SH_HICOSH4)
174static unsigned int netcard_portlist[] __used __initdata =
175 { 0x80090303, 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
176static unsigned int cs8900_irq_map[] = {12,0,0,0};
177#elif defined(CONFIG_SH_HICOSH4)
178static unsigned int netcard_portlist[] __used __initdata = 174static unsigned int netcard_portlist[] __used __initdata =
179 { 0x0300, 0}; 175 { 0x0300, 0};
180static unsigned int cs8900_irq_map[] = {1,0,0,0}; 176static unsigned int cs8900_irq_map[] = {1,0,0,0};
diff --git a/drivers/net/eexpress.h b/drivers/net/eexpress.h
index 707df3fcfe40..dc9c6ea289e9 100644
--- a/drivers/net/eexpress.h
+++ b/drivers/net/eexpress.h
@@ -68,17 +68,17 @@
68 */ 68 */
69 69
70/* these functions take the SCB status word and test the relevant status bit */ 70/* these functions take the SCB status word and test the relevant status bit */
71#define SCB_complete(s) ((s&0x8000)!=0) 71#define SCB_complete(s) (((s) & 0x8000) != 0)
72#define SCB_rxdframe(s) ((s&0x4000)!=0) 72#define SCB_rxdframe(s) (((s) & 0x4000) != 0)
73#define SCB_CUdead(s) ((s&0x2000)!=0) 73#define SCB_CUdead(s) (((s) & 0x2000) != 0)
74#define SCB_RUdead(s) ((s&0x1000)!=0) 74#define SCB_RUdead(s) (((s) & 0x1000) != 0)
75#define SCB_ack(s) (s & 0xf000) 75#define SCB_ack(s) ((s) & 0xf000)
76 76
77/* Command unit status: 0=idle, 1=suspended, 2=active */ 77/* Command unit status: 0=idle, 1=suspended, 2=active */
78#define SCB_CUstat(s) ((s&0x0300)>>8) 78#define SCB_CUstat(s) (((s)&0x0300)>>8)
79 79
80/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */ 80/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */
81#define SCB_RUstat(s) ((s&0x0070)>>4) 81#define SCB_RUstat(s) (((s)&0x0070)>>4)
82 82
83/* SCB commands */ 83/* SCB commands */
84#define SCB_CUnop 0x0000 84#define SCB_CUnop 0x0000
@@ -98,18 +98,18 @@
98 * Command block defines 98 * Command block defines
99 */ 99 */
100 100
101#define Stat_Done(s) ((s&0x8000)!=0) 101#define Stat_Done(s) (((s) & 0x8000) != 0)
102#define Stat_Busy(s) ((s&0x4000)!=0) 102#define Stat_Busy(s) (((s) & 0x4000) != 0)
103#define Stat_OK(s) ((s&0x2000)!=0) 103#define Stat_OK(s) (((s) & 0x2000) != 0)
104#define Stat_Abort(s) ((s&0x1000)!=0) 104#define Stat_Abort(s) (((s) & 0x1000) != 0)
105#define Stat_STFail ((s&0x0800)!=0) 105#define Stat_STFail (((s) & 0x0800) != 0)
106#define Stat_TNoCar(s) ((s&0x0400)!=0) 106#define Stat_TNoCar(s) (((s) & 0x0400) != 0)
107#define Stat_TNoCTS(s) ((s&0x0200)!=0) 107#define Stat_TNoCTS(s) (((s) & 0x0200) != 0)
108#define Stat_TNoDMA(s) ((s&0x0100)!=0) 108#define Stat_TNoDMA(s) (((s) & 0x0100) != 0)
109#define Stat_TDefer(s) ((s&0x0080)!=0) 109#define Stat_TDefer(s) (((s) & 0x0080) != 0)
110#define Stat_TColl(s) ((s&0x0040)!=0) 110#define Stat_TColl(s) (((s) & 0x0040) != 0)
111#define Stat_TXColl(s) ((s&0x0020)!=0) 111#define Stat_TXColl(s) (((s) & 0x0020) != 0)
112#define Stat_NoColl(s) (s&0x000f) 112#define Stat_NoColl(s) ((s) & 0x000f)
113 113
114/* Cmd_END will end AFTER the command if this is the first 114/* Cmd_END will end AFTER the command if this is the first
115 * command block after an SCB_CUstart, but BEFORE the command 115 * command block after an SCB_CUstart, but BEFORE the command
@@ -136,16 +136,16 @@
136 * Frame Descriptor (Receive block) defines 136 * Frame Descriptor (Receive block) defines
137 */ 137 */
138 138
139#define FD_Done(s) ((s&0x8000)!=0) 139#define FD_Done(s) (((s) & 0x8000) != 0)
140#define FD_Busy(s) ((s&0x4000)!=0) 140#define FD_Busy(s) (((s) & 0x4000) != 0)
141#define FD_OK(s) ((s&0x2000)!=0) 141#define FD_OK(s) (((s) & 0x2000) != 0)
142 142
143#define FD_CRC(s) ((s&0x0800)!=0) 143#define FD_CRC(s) (((s) & 0x0800) != 0)
144#define FD_Align(s) ((s&0x0400)!=0) 144#define FD_Align(s) (((s) & 0x0400) != 0)
145#define FD_Resrc(s) ((s&0x0200)!=0) 145#define FD_Resrc(s) (((s) & 0x0200) != 0)
146#define FD_DMA(s) ((s&0x0100)!=0) 146#define FD_DMA(s) (((s) & 0x0100) != 0)
147#define FD_Short(s) ((s&0x0080)!=0) 147#define FD_Short(s) (((s) & 0x0080) != 0)
148#define FD_NoEOF(s) ((s&0x0040)!=0) 148#define FD_NoEOF(s) (((s) & 0x0040) != 0)
149 149
150struct rfd_header { 150struct rfd_header {
151 volatile unsigned long flags; 151 volatile unsigned long flags;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index a0ee05318155..004a9aab3a50 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -22,9 +22,53 @@
22#include <net/irda/wrapper.h> 22#include <net/irda/wrapper.h>
23#include <net/irda/irda_device.h> 23#include <net/irda/irda_device.h>
24 24
25#include <asm/dma.h> 25#include <mach/dma.h>
26#include <mach/irda.h> 26#include <mach/irda.h>
27#include <mach/hardware.h>
27#include <mach/pxa-regs.h> 28#include <mach/pxa-regs.h>
29#include <mach/regs-uart.h>
30
31#define FICP __REG(0x40800000) /* Start of FICP area */
32#define ICCR0 __REG(0x40800000) /* ICP Control Register 0 */
33#define ICCR1 __REG(0x40800004) /* ICP Control Register 1 */
34#define ICCR2 __REG(0x40800008) /* ICP Control Register 2 */
35#define ICDR __REG(0x4080000c) /* ICP Data Register */
36#define ICSR0 __REG(0x40800014) /* ICP Status Register 0 */
37#define ICSR1 __REG(0x40800018) /* ICP Status Register 1 */
38
39#define ICCR0_AME (1 << 7) /* Address match enable */
40#define ICCR0_TIE (1 << 6) /* Transmit FIFO interrupt enable */
41#define ICCR0_RIE (1 << 5) /* Recieve FIFO interrupt enable */
42#define ICCR0_RXE (1 << 4) /* Receive enable */
43#define ICCR0_TXE (1 << 3) /* Transmit enable */
44#define ICCR0_TUS (1 << 2) /* Transmit FIFO underrun select */
45#define ICCR0_LBM (1 << 1) /* Loopback mode */
46#define ICCR0_ITR (1 << 0) /* IrDA transmission */
47
48#define ICCR2_RXP (1 << 3) /* Receive Pin Polarity select */
49#define ICCR2_TXP (1 << 2) /* Transmit Pin Polarity select */
50#define ICCR2_TRIG (3 << 0) /* Receive FIFO Trigger threshold */
51#define ICCR2_TRIG_8 (0 << 0) /* >= 8 bytes */
52#define ICCR2_TRIG_16 (1 << 0) /* >= 16 bytes */
53#define ICCR2_TRIG_32 (2 << 0) /* >= 32 bytes */
54
55#ifdef CONFIG_PXA27x
56#define ICSR0_EOC (1 << 6) /* DMA End of Descriptor Chain */
57#endif
58#define ICSR0_FRE (1 << 5) /* Framing error */
59#define ICSR0_RFS (1 << 4) /* Receive FIFO service request */
60#define ICSR0_TFS (1 << 3) /* Transnit FIFO service request */
61#define ICSR0_RAB (1 << 2) /* Receiver abort */
62#define ICSR0_TUR (1 << 1) /* Trunsmit FIFO underun */
63#define ICSR0_EIF (1 << 0) /* End/Error in FIFO */
64
65#define ICSR1_ROR (1 << 6) /* Receiver FIFO underrun */
66#define ICSR1_CRE (1 << 5) /* CRC error */
67#define ICSR1_EOF (1 << 4) /* End of frame */
68#define ICSR1_TNF (1 << 3) /* Transmit FIFO not full */
69#define ICSR1_RNE (1 << 2) /* Receive FIFO not empty */
70#define ICSR1_TBY (1 << 1) /* Tramsmiter busy flag */
71#define ICSR1_RSY (1 << 0) /* Recevier synchronized flag */
28 72
29#define IrSR_RXPL_NEG_IS_ZERO (1<<4) 73#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
30#define IrSR_RXPL_POS_IS_ZERO 0x0 74#define IrSR_RXPL_POS_IS_ZERO 0x0
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index ccde5829ba21..d302bcf4c148 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -36,7 +36,7 @@
36#include <net/irda/irda_device.h> 36#include <net/irda/irda_device.h>
37 37
38#include <asm/irq.h> 38#include <asm/irq.h>
39#include <asm/dma.h> 39#include <mach/dma.h>
40#include <mach/hardware.h> 40#include <mach/hardware.h>
41#include <asm/mach/irda.h> 41#include <asm/mach/irda.h>
42 42
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index c1c05852a95e..eda72dd2120f 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -169,13 +169,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
169 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 169 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
170 mlx4_info(mdev, "Using %d tx rings for port:%d\n", 170 mlx4_info(mdev, "Using %d tx rings for port:%d\n",
171 mdev->profile.prof[i].tx_ring_num, i); 171 mdev->profile.prof[i].tx_ring_num, i);
172 if (!mdev->profile.prof[i].rx_ring_num) { 172 mdev->profile.prof[i].rx_ring_num =
173 mdev->profile.prof[i].rx_ring_num = dev->caps.num_comp_vectors; 173 min_t(int, dev->caps.num_comp_vectors, MAX_RX_RINGS);
174 mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", 174 mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
175 mdev->profile.prof[i].rx_ring_num, i); 175 mdev->profile.prof[i].rx_ring_num, i);
176 } else
177 mlx4_info(mdev, "Using %d rx rings for port:%d\n",
178 mdev->profile.prof[i].rx_ring_num, i);
179 } 176 }
180 177
181 /* Create our own workqueue for reset/multicast tasks 178 /* Create our own workqueue for reset/multicast tasks
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index ebada3c7aff2..15bb38d99304 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -552,7 +552,7 @@ static void mlx4_en_linkstate(struct work_struct *work)
552} 552}
553 553
554 554
555static int mlx4_en_start_port(struct net_device *dev) 555int mlx4_en_start_port(struct net_device *dev)
556{ 556{
557 struct mlx4_en_priv *priv = netdev_priv(dev); 557 struct mlx4_en_priv *priv = netdev_priv(dev);
558 struct mlx4_en_dev *mdev = priv->mdev; 558 struct mlx4_en_dev *mdev = priv->mdev;
@@ -707,7 +707,7 @@ cq_err:
707} 707}
708 708
709 709
710static void mlx4_en_stop_port(struct net_device *dev) 710void mlx4_en_stop_port(struct net_device *dev)
711{ 711{
712 struct mlx4_en_priv *priv = netdev_priv(dev); 712 struct mlx4_en_priv *priv = netdev_priv(dev);
713 struct mlx4_en_dev *mdev = priv->mdev; 713 struct mlx4_en_dev *mdev = priv->mdev;
@@ -826,7 +826,7 @@ static int mlx4_en_close(struct net_device *dev)
826 return 0; 826 return 0;
827} 827}
828 828
829static void mlx4_en_free_resources(struct mlx4_en_priv *priv) 829void mlx4_en_free_resources(struct mlx4_en_priv *priv)
830{ 830{
831 int i; 831 int i;
832 832
@@ -845,7 +845,7 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
845 } 845 }
846} 846}
847 847
848static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 848int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
849{ 849{
850 struct mlx4_en_dev *mdev = priv->mdev; 850 struct mlx4_en_dev *mdev = priv->mdev;
851 struct mlx4_en_port_profile *prof = priv->prof; 851 struct mlx4_en_port_profile *prof = priv->prof;
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
index 047b37f5a747..cfeef0f1bacc 100644
--- a/drivers/net/mlx4/en_params.c
+++ b/drivers/net/mlx4/en_params.c
@@ -65,15 +65,6 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
65MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." 65MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
66 " Per priority bit mask"); 66 " Per priority bit mask");
67 67
68MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
69MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
70
71MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
72MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
73MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
74MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
75
76
77int mlx4_en_get_profile(struct mlx4_en_dev *mdev) 68int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
78{ 69{
79 struct mlx4_en_profile *params = &mdev->profile; 70 struct mlx4_en_profile *params = &mdev->profile;
@@ -87,6 +78,8 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
87 params->prof[i].rx_ppp = pfcrx; 78 params->prof[i].rx_ppp = pfcrx;
88 params->prof[i].tx_pause = 1; 79 params->prof[i].tx_pause = 1;
89 params->prof[i].tx_ppp = pfctx; 80 params->prof[i].tx_ppp = pfctx;
81 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
82 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
90 } 83 }
91 if (pfcrx || pfctx) { 84 if (pfcrx || pfctx) {
92 params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM; 85 params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
@@ -95,32 +88,7 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
95 params->prof[1].tx_ring_num = 1; 88 params->prof[1].tx_ring_num = 1;
96 params->prof[2].tx_ring_num = 1; 89 params->prof[2].tx_ring_num = 1;
97 } 90 }
98 params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS); 91
99 params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
100
101 if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
102 tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
103 params->prof[1].tx_ring_size =
104 (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
105 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
106
107 if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
108 tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
109 params->prof[2].tx_ring_size =
110 (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
111 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
112
113 if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
114 rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
115 params->prof[1].rx_ring_size =
116 (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
117 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
118
119 if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
120 rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
121 params->prof[2].rx_ring_size =
122 (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
123 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
124 return 0; 92 return 0;
125} 93}
126 94
@@ -417,6 +385,54 @@ static void mlx4_en_get_pauseparam(struct net_device *dev,
417 pause->rx_pause = priv->prof->rx_pause; 385 pause->rx_pause = priv->prof->rx_pause;
418} 386}
419 387
388static int mlx4_en_set_ringparam(struct net_device *dev,
389 struct ethtool_ringparam *param)
390{
391 struct mlx4_en_priv *priv = netdev_priv(dev);
392 struct mlx4_en_dev *mdev = priv->mdev;
393 u32 rx_size, tx_size;
394 int port_up = 0;
395 int err = 0;
396
397 if (param->rx_jumbo_pending || param->rx_mini_pending)
398 return -EINVAL;
399
400 rx_size = roundup_pow_of_two(param->rx_pending);
401 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
402 tx_size = roundup_pow_of_two(param->tx_pending);
403 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
404
405 if (rx_size == priv->prof->rx_ring_size &&
406 tx_size == priv->prof->tx_ring_size)
407 return 0;
408
409 mutex_lock(&mdev->state_lock);
410 if (priv->port_up) {
411 port_up = 1;
412 mlx4_en_stop_port(dev);
413 }
414
415 mlx4_en_free_resources(priv);
416
417 priv->prof->tx_ring_size = tx_size;
418 priv->prof->rx_ring_size = rx_size;
419
420 err = mlx4_en_alloc_resources(priv);
421 if (err) {
422 mlx4_err(mdev, "Failed reallocating port resources\n");
423 goto out;
424 }
425 if (port_up) {
426 err = mlx4_en_start_port(dev);
427 if (err)
428 mlx4_err(mdev, "Failed starting port\n");
429 }
430
431out:
432 mutex_unlock(&mdev->state_lock);
433 return err;
434}
435
420static void mlx4_en_get_ringparam(struct net_device *dev, 436static void mlx4_en_get_ringparam(struct net_device *dev,
421 struct ethtool_ringparam *param) 437 struct ethtool_ringparam *param)
422{ 438{
@@ -456,6 +472,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
456 .get_pauseparam = mlx4_en_get_pauseparam, 472 .get_pauseparam = mlx4_en_get_pauseparam,
457 .set_pauseparam = mlx4_en_set_pauseparam, 473 .set_pauseparam = mlx4_en_set_pauseparam,
458 .get_ringparam = mlx4_en_get_ringparam, 474 .get_ringparam = mlx4_en_get_ringparam,
475 .set_ringparam = mlx4_en_set_ringparam,
459 .get_flags = ethtool_op_get_flags, 476 .get_flags = ethtool_op_get_flags,
460 .set_flags = ethtool_op_set_flags, 477 .set_flags = ethtool_op_set_flags,
461}; 478};
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index e78209768def..2e96c7b2180a 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -489,6 +489,12 @@ void mlx4_en_destroy_netdev(struct net_device *dev);
489int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 489int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
490 struct mlx4_en_port_profile *prof); 490 struct mlx4_en_port_profile *prof);
491 491
492int mlx4_en_start_port(struct net_device *dev);
493void mlx4_en_stop_port(struct net_device *dev);
494
495void mlx4_en_free_resources(struct mlx4_en_priv *priv);
496int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
497
492int mlx4_en_get_profile(struct mlx4_en_dev *mdev); 498int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
493 499
494int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 500int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index dcd199045613..5b7a574ce571 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -954,7 +954,6 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
954{ 954{
955 const struct pasemi_mac_rxring *rxring = data; 955 const struct pasemi_mac_rxring *rxring = data;
956 struct pasemi_mac *mac = rxring->mac; 956 struct pasemi_mac *mac = rxring->mac;
957 struct net_device *dev = mac->netdev;
958 const struct pasemi_dmachan *chan = &rxring->chan; 957 const struct pasemi_dmachan *chan = &rxring->chan;
959 unsigned int reg; 958 unsigned int reg;
960 959
@@ -1634,7 +1633,6 @@ static void pasemi_mac_set_rx_mode(struct net_device *dev)
1634static int pasemi_mac_poll(struct napi_struct *napi, int budget) 1633static int pasemi_mac_poll(struct napi_struct *napi, int budget)
1635{ 1634{
1636 struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); 1635 struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
1637 struct net_device *dev = mac->netdev;
1638 int pkts; 1636 int pkts;
1639 1637
1640 pasemi_mac_clean_tx(tx_ring(mac)); 1638 pasemi_mac_clean_tx(tx_ring(mac));
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index cc7d85bdfb3e..870b4c33f108 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -200,6 +200,9 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
200 200
201 201
202#ifdef SMC_USE_PXA_DMA 202#ifdef SMC_USE_PXA_DMA
203
204#include <mach/dma.h>
205
203/* 206/*
204 * Define the request and free functions 207 * Define the request and free functions
205 * These are unfortunately architecture specific as no generic allocation 208 * These are unfortunately architecture specific as no generic allocation
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 3e7c6a3cbc65..c4ccd121bc9c 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -493,7 +493,8 @@ struct smc_local {
493 * as RX which can overrun memory and lose packets. 493 * as RX which can overrun memory and lose packets.
494 */ 494 */
495#include <linux/dma-mapping.h> 495#include <linux/dma-mapping.h>
496#include <asm/dma.h> 496#include <mach/dma.h>
497#include <mach/hardware.h>
497#include <mach/pxa-regs.h> 498#include <mach/pxa-regs.h>
498 499
499#ifdef SMC_insl 500#ifdef SMC_insl
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 5e989d884ddd..dc3f1108884d 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1484,13 +1484,13 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1484 } 1484 }
1485 1485
1486 if (likely(intsts & inten & INT_STS_RSFL_)) { 1486 if (likely(intsts & inten & INT_STS_RSFL_)) {
1487 if (likely(netif_rx_schedule_prep(dev, &pdata->napi))) { 1487 if (likely(netif_rx_schedule_prep(&pdata->napi))) {
1488 /* Disable Rx interrupts */ 1488 /* Disable Rx interrupts */
1489 temp = smsc911x_reg_read(pdata, INT_EN); 1489 temp = smsc911x_reg_read(pdata, INT_EN);
1490 temp &= (~INT_EN_RSFL_EN_); 1490 temp &= (~INT_EN_RSFL_EN_);
1491 smsc911x_reg_write(pdata, INT_EN, temp); 1491 smsc911x_reg_write(pdata, INT_EN, temp);
1492 /* Schedule a NAPI poll */ 1492 /* Schedule a NAPI poll */
1493 __netif_rx_schedule(dev, &pdata->napi); 1493 __netif_rx_schedule(&pdata->napi);
1494 } else { 1494 } else {
1495 SMSC_WARNING(RX_ERR, 1495 SMSC_WARNING(RX_ERR,
1496 "netif_rx_schedule_prep failed"); 1496 "netif_rx_schedule_prep failed");
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index c5c123d3af57..88d2c67788df 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1277,7 +1277,6 @@ bad_desc:
1277static int spider_net_poll(struct napi_struct *napi, int budget) 1277static int spider_net_poll(struct napi_struct *napi, int budget)
1278{ 1278{
1279 struct spider_net_card *card = container_of(napi, struct spider_net_card, napi); 1279 struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
1280 struct net_device *netdev = card->netdev;
1281 int packets_done = 0; 1280 int packets_done = 0;
1282 1281
1283 while (packets_done < budget) { 1282 while (packets_done < budget) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 666c1d98cdaf..69f9a0ec764d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -343,7 +343,7 @@ static void tun_net_init(struct net_device *dev)
343 break; 343 break;
344 344
345 case TUN_TAP_DEV: 345 case TUN_TAP_DEV:
346 dev->netdev_ops = &tun_netdev_ops; 346 dev->netdev_ops = &tap_netdev_ops;
347 /* Ethernet TAP Device */ 347 /* Ethernet TAP Device */
348 ether_setup(dev); 348 ether_setup(dev);
349 349
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 198ce3cf378a..9f7896a25f1b 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2831,7 +2831,7 @@ static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf,
2831 for (i = 0; i < iface->desc.bNumEndpoints; i++) { 2831 for (i = 0; i < iface->desc.bNumEndpoints; i++) {
2832 endp = &iface->endpoint[i].desc; 2832 endp = &iface->endpoint[i].desc;
2833 if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) && 2833 if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) &&
2834 ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == type)) 2834 (usb_endpoint_type(endp) == type))
2835 return endp; 2835 return endp;
2836 } 2836 }
2837 2837
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 0c6802507a79..2dc241689d37 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -654,7 +654,7 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
654 netif_rx_complete(dev, napi); 654 netif_rx_complete(dev, napi);
655 qmgr_enable_irq(rxq); 655 qmgr_enable_irq(rxq);
656 if (!qmgr_stat_empty(rxq) && 656 if (!qmgr_stat_empty(rxq) &&
657 netif_rx_reschedule(dev, napi)) { 657 netif_rx_reschedule(napi)) {
658#if DEBUG_RX 658#if DEBUG_RX
659 printk(KERN_DEBUG "%s: hss_hdlc_poll" 659 printk(KERN_DEBUG "%s: hss_hdlc_poll"
660 " netif_rx_reschedule succeeded\n", 660 " netif_rx_reschedule succeeded\n",
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 04c139666965..b5db57d2fcf5 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1065,8 +1065,7 @@ static int eject_installer(struct usb_interface *intf)
1065 /* Find bulk out endpoint */ 1065 /* Find bulk out endpoint */
1066 endpoint = &iface_desc->endpoint[1].desc; 1066 endpoint = &iface_desc->endpoint[1].desc;
1067 if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT && 1067 if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT &&
1068 (endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == 1068 usb_endpoint_xfer_bulk(endpoint)) {
1069 USB_ENDPOINT_XFER_BULK) {
1070 bulk_out_ep = endpoint->bEndpointAddress; 1069 bulk_out_ep = endpoint->bEndpointAddress;
1071 } else { 1070 } else {
1072 dev_err(&udev->dev, 1071 dev_err(&udev->dev,
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index b55cd23ffdef..737bd9484822 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -268,18 +268,6 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
268 return cookie; 268 return cookie;
269} 269}
270 270
271static void increment_tail(struct oprofile_cpu_buffer *b)
272{
273 unsigned long new_tail = b->tail_pos + 1;
274
275 rmb(); /* be sure fifo pointers are synchromized */
276
277 if (new_tail < b->buffer_size)
278 b->tail_pos = new_tail;
279 else
280 b->tail_pos = 0;
281}
282
283static unsigned long last_cookie = INVALID_COOKIE; 271static unsigned long last_cookie = INVALID_COOKIE;
284 272
285static void add_cpu_switch(int i) 273static void add_cpu_switch(int i)
@@ -331,28 +319,25 @@ static void add_trace_begin(void)
331 319
332#define IBS_FETCH_CODE_SIZE 2 320#define IBS_FETCH_CODE_SIZE 2
333#define IBS_OP_CODE_SIZE 5 321#define IBS_OP_CODE_SIZE 5
334#define IBS_EIP(offset) \
335 (((struct op_sample *)&cpu_buf->buffer[(offset)])->eip)
336#define IBS_EVENT(offset) \
337 (((struct op_sample *)&cpu_buf->buffer[(offset)])->event)
338 322
339/* 323/*
340 * Add IBS fetch and op entries to event buffer 324 * Add IBS fetch and op entries to event buffer
341 */ 325 */
342static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code, 326static void add_ibs_begin(int cpu, int code, struct mm_struct *mm)
343 struct mm_struct *mm)
344{ 327{
345 unsigned long rip; 328 unsigned long rip;
346 int i, count; 329 int i, count;
347 unsigned long ibs_cookie = 0; 330 unsigned long ibs_cookie = 0;
348 off_t offset; 331 off_t offset;
332 struct op_sample *sample;
349 333
350 increment_tail(cpu_buf); /* move to RIP entry */ 334 sample = cpu_buffer_read_entry(cpu);
351 335 if (!sample)
352 rip = IBS_EIP(cpu_buf->tail_pos); 336 goto Error;
337 rip = sample->eip;
353 338
354#ifdef __LP64__ 339#ifdef __LP64__
355 rip += IBS_EVENT(cpu_buf->tail_pos) << 32; 340 rip += sample->event << 32;
356#endif 341#endif
357 342
358 if (mm) { 343 if (mm) {
@@ -376,8 +361,8 @@ static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
376 add_event_entry(offset); /* Offset from Dcookie */ 361 add_event_entry(offset); /* Offset from Dcookie */
377 362
378 /* we send the Dcookie offset, but send the raw Linear Add also*/ 363 /* we send the Dcookie offset, but send the raw Linear Add also*/
379 add_event_entry(IBS_EIP(cpu_buf->tail_pos)); 364 add_event_entry(sample->eip);
380 add_event_entry(IBS_EVENT(cpu_buf->tail_pos)); 365 add_event_entry(sample->event);
381 366
382 if (code == IBS_FETCH_CODE) 367 if (code == IBS_FETCH_CODE)
383 count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/ 368 count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
@@ -385,10 +370,17 @@ static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
385 count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/ 370 count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
386 371
387 for (i = 0; i < count; i++) { 372 for (i = 0; i < count; i++) {
388 increment_tail(cpu_buf); 373 sample = cpu_buffer_read_entry(cpu);
389 add_event_entry(IBS_EIP(cpu_buf->tail_pos)); 374 if (!sample)
390 add_event_entry(IBS_EVENT(cpu_buf->tail_pos)); 375 goto Error;
376 add_event_entry(sample->eip);
377 add_event_entry(sample->event);
391 } 378 }
379
380 return;
381
382Error:
383 return;
392} 384}
393 385
394#endif 386#endif
@@ -466,33 +458,6 @@ static inline int is_code(unsigned long val)
466} 458}
467 459
468 460
469/* "acquire" as many cpu buffer slots as we can */
470static unsigned long get_slots(struct oprofile_cpu_buffer *b)
471{
472 unsigned long head = b->head_pos;
473 unsigned long tail = b->tail_pos;
474
475 /*
476 * Subtle. This resets the persistent last_task
477 * and in_kernel values used for switching notes.
478 * BUT, there is a small window between reading
479 * head_pos, and this call, that means samples
480 * can appear at the new head position, but not
481 * be prefixed with the notes for switching
482 * kernel mode or a task switch. This small hole
483 * can lead to mis-attribution or samples where
484 * we don't know if it's in the kernel or not,
485 * at the start of an event buffer.
486 */
487 cpu_buffer_reset(b);
488
489 if (head >= tail)
490 return head - tail;
491
492 return head + (b->buffer_size - tail);
493}
494
495
496/* Move tasks along towards death. Any tasks on dead_tasks 461/* Move tasks along towards death. Any tasks on dead_tasks
497 * will definitely have no remaining references in any 462 * will definitely have no remaining references in any
498 * CPU buffers at this point, because we use two lists, 463 * CPU buffers at this point, because we use two lists,
@@ -559,61 +524,61 @@ typedef enum {
559 */ 524 */
560void sync_buffer(int cpu) 525void sync_buffer(int cpu)
561{ 526{
562 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
563 struct mm_struct *mm = NULL; 527 struct mm_struct *mm = NULL;
528 struct mm_struct *oldmm;
564 struct task_struct *new; 529 struct task_struct *new;
565 unsigned long cookie = 0; 530 unsigned long cookie = 0;
566 int in_kernel = 1; 531 int in_kernel = 1;
567 sync_buffer_state state = sb_buffer_start; 532 sync_buffer_state state = sb_buffer_start;
568#ifndef CONFIG_OPROFILE_IBS
569 unsigned int i; 533 unsigned int i;
570 unsigned long available; 534 unsigned long available;
571#endif
572 535
573 mutex_lock(&buffer_mutex); 536 mutex_lock(&buffer_mutex);
574 537
575 add_cpu_switch(cpu); 538 add_cpu_switch(cpu);
576 539
577 /* Remember, only we can modify tail_pos */ 540 cpu_buffer_reset(cpu);
578 541 available = cpu_buffer_entries(cpu);
579#ifndef CONFIG_OPROFILE_IBS
580 available = get_slots(cpu_buf);
581 542
582 for (i = 0; i < available; ++i) { 543 for (i = 0; i < available; ++i) {
583#else 544 struct op_sample *s = cpu_buffer_read_entry(cpu);
584 while (get_slots(cpu_buf)) { 545 if (!s)
585#endif 546 break;
586 struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
587 547
588 if (is_code(s->eip)) { 548 if (is_code(s->eip)) {
589 if (s->event <= CPU_IS_KERNEL) { 549 switch (s->event) {
550 case 0:
551 case CPU_IS_KERNEL:
590 /* kernel/userspace switch */ 552 /* kernel/userspace switch */
591 in_kernel = s->event; 553 in_kernel = s->event;
592 if (state == sb_buffer_start) 554 if (state == sb_buffer_start)
593 state = sb_sample_start; 555 state = sb_sample_start;
594 add_kernel_ctx_switch(s->event); 556 add_kernel_ctx_switch(s->event);
595 } else if (s->event == CPU_TRACE_BEGIN) { 557 break;
558 case CPU_TRACE_BEGIN:
596 state = sb_bt_start; 559 state = sb_bt_start;
597 add_trace_begin(); 560 add_trace_begin();
561 break;
598#ifdef CONFIG_OPROFILE_IBS 562#ifdef CONFIG_OPROFILE_IBS
599 } else if (s->event == IBS_FETCH_BEGIN) { 563 case IBS_FETCH_BEGIN:
600 state = sb_bt_start; 564 state = sb_bt_start;
601 add_ibs_begin(cpu_buf, IBS_FETCH_CODE, mm); 565 add_ibs_begin(cpu, IBS_FETCH_CODE, mm);
602 } else if (s->event == IBS_OP_BEGIN) { 566 break;
567 case IBS_OP_BEGIN:
603 state = sb_bt_start; 568 state = sb_bt_start;
604 add_ibs_begin(cpu_buf, IBS_OP_CODE, mm); 569 add_ibs_begin(cpu, IBS_OP_CODE, mm);
570 break;
605#endif 571#endif
606 } else { 572 default:
607 struct mm_struct *oldmm = mm;
608
609 /* userspace context switch */ 573 /* userspace context switch */
574 oldmm = mm;
610 new = (struct task_struct *)s->event; 575 new = (struct task_struct *)s->event;
611
612 release_mm(oldmm); 576 release_mm(oldmm);
613 mm = take_tasks_mm(new); 577 mm = take_tasks_mm(new);
614 if (mm != oldmm) 578 if (mm != oldmm)
615 cookie = get_exec_dcookie(mm); 579 cookie = get_exec_dcookie(mm);
616 add_user_ctx_switch(new, cookie); 580 add_user_ctx_switch(new, cookie);
581 break;
617 } 582 }
618 } else if (state >= sb_bt_start && 583 } else if (state >= sb_bt_start &&
619 !add_sample(mm, s, in_kernel)) { 584 !add_sample(mm, s, in_kernel)) {
@@ -622,8 +587,6 @@ void sync_buffer(int cpu)
622 atomic_inc(&oprofile_stats.bt_lost_no_mapping); 587 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
623 } 588 }
624 } 589 }
625
626 increment_tail(cpu_buf);
627 } 590 }
628 release_mm(mm); 591 release_mm(mm);
629 592
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 01d38e78cde1..61090969158f 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -28,6 +28,25 @@
28#include "buffer_sync.h" 28#include "buffer_sync.h"
29#include "oprof.h" 29#include "oprof.h"
30 30
31#define OP_BUFFER_FLAGS 0
32
33/*
34 * Read and write access is using spin locking. Thus, writing to the
35 * buffer by NMI handler (x86) could occur also during critical
36 * sections when reading the buffer. To avoid this, there are 2
37 * buffers for independent read and write access. Read access is in
38 * process context only, write access only in the NMI handler. If the
39 * read buffer runs empty, both buffers are swapped atomically. There
40 * is potentially a small window during swapping where the buffers are
41 * disabled and samples could be lost.
42 *
43 * Using 2 buffers is a little bit overhead, but the solution is clear
44 * and does not require changes in the ring buffer implementation. It
45 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code.
47 */
48struct ring_buffer *op_ring_buffer_read;
49struct ring_buffer *op_ring_buffer_write;
31DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 50DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
32 51
33static void wq_sync_buffer(struct work_struct *work); 52static void wq_sync_buffer(struct work_struct *work);
@@ -37,12 +56,12 @@ static int work_enabled;
37 56
38void free_cpu_buffers(void) 57void free_cpu_buffers(void)
39{ 58{
40 int i; 59 if (op_ring_buffer_read)
41 60 ring_buffer_free(op_ring_buffer_read);
42 for_each_possible_cpu(i) { 61 op_ring_buffer_read = NULL;
43 vfree(per_cpu(cpu_buffer, i).buffer); 62 if (op_ring_buffer_write)
44 per_cpu(cpu_buffer, i).buffer = NULL; 63 ring_buffer_free(op_ring_buffer_write);
45 } 64 op_ring_buffer_write = NULL;
46} 65}
47 66
48unsigned long oprofile_get_cpu_buffer_size(void) 67unsigned long oprofile_get_cpu_buffer_size(void)
@@ -64,14 +83,16 @@ int alloc_cpu_buffers(void)
64 83
65 unsigned long buffer_size = fs_cpu_buffer_size; 84 unsigned long buffer_size = fs_cpu_buffer_size;
66 85
86 op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
87 if (!op_ring_buffer_read)
88 goto fail;
89 op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
90 if (!op_ring_buffer_write)
91 goto fail;
92
67 for_each_possible_cpu(i) { 93 for_each_possible_cpu(i) {
68 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 94 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
69 95
70 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
71 cpu_to_node(i));
72 if (!b->buffer)
73 goto fail;
74
75 b->last_task = NULL; 96 b->last_task = NULL;
76 b->last_is_kernel = -1; 97 b->last_is_kernel = -1;
77 b->tracing = 0; 98 b->tracing = 0;
@@ -124,57 +145,31 @@ void end_cpu_work(void)
124 flush_scheduled_work(); 145 flush_scheduled_work();
125} 146}
126 147
127/* Resets the cpu buffer to a sane state. */ 148static inline int
128void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf) 149add_sample(struct oprofile_cpu_buffer *cpu_buf,
129{ 150 unsigned long pc, unsigned long event)
130 /* reset these to invalid values; the next sample
131 * collected will populate the buffer with proper
132 * values to initialize the buffer
133 */
134 cpu_buf->last_is_kernel = -1;
135 cpu_buf->last_task = NULL;
136}
137
138/* compute number of available slots in cpu_buffer queue */
139static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
140{ 151{
141 unsigned long head = b->head_pos; 152 struct op_entry entry;
142 unsigned long tail = b->tail_pos; 153 int ret;
143 154
144 if (tail > head) 155 ret = cpu_buffer_write_entry(&entry);
145 return (tail - head) - 1; 156 if (ret)
157 return ret;
146 158
147 return tail + (b->buffer_size - head) - 1; 159 entry.sample->eip = pc;
148} 160 entry.sample->event = event;
149 161
150static void increment_head(struct oprofile_cpu_buffer *b) 162 ret = cpu_buffer_write_commit(&entry);
151{ 163 if (ret)
152 unsigned long new_head = b->head_pos + 1; 164 return ret;
153
154 /* Ensure anything written to the slot before we
155 * increment is visible */
156 wmb();
157
158 if (new_head < b->buffer_size)
159 b->head_pos = new_head;
160 else
161 b->head_pos = 0;
162}
163 165
164static inline void 166 return 0;
165add_sample(struct oprofile_cpu_buffer *cpu_buf,
166 unsigned long pc, unsigned long event)
167{
168 struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos];
169 entry->eip = pc;
170 entry->event = event;
171 increment_head(cpu_buf);
172} 167}
173 168
174static inline void 169static inline int
175add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) 170add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
176{ 171{
177 add_sample(buffer, ESCAPE_CODE, value); 172 return add_sample(buffer, ESCAPE_CODE, value);
178} 173}
179 174
180/* This must be safe from any context. It's safe writing here 175/* This must be safe from any context. It's safe writing here
@@ -198,11 +193,6 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
198 return 0; 193 return 0;
199 } 194 }
200 195
201 if (nr_available_slots(cpu_buf) < 3) {
202 cpu_buf->sample_lost_overflow++;
203 return 0;
204 }
205
206 is_kernel = !!is_kernel; 196 is_kernel = !!is_kernel;
207 197
208 task = current; 198 task = current;
@@ -210,26 +200,29 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
210 /* notice a switch from user->kernel or vice versa */ 200 /* notice a switch from user->kernel or vice versa */
211 if (cpu_buf->last_is_kernel != is_kernel) { 201 if (cpu_buf->last_is_kernel != is_kernel) {
212 cpu_buf->last_is_kernel = is_kernel; 202 cpu_buf->last_is_kernel = is_kernel;
213 add_code(cpu_buf, is_kernel); 203 if (add_code(cpu_buf, is_kernel))
204 goto fail;
214 } 205 }
215 206
216 /* notice a task switch */ 207 /* notice a task switch */
217 if (cpu_buf->last_task != task) { 208 if (cpu_buf->last_task != task) {
218 cpu_buf->last_task = task; 209 cpu_buf->last_task = task;
219 add_code(cpu_buf, (unsigned long)task); 210 if (add_code(cpu_buf, (unsigned long)task))
211 goto fail;
220 } 212 }
221 213
222 add_sample(cpu_buf, pc, event); 214 if (add_sample(cpu_buf, pc, event))
215 goto fail;
216
223 return 1; 217 return 1;
218
219fail:
220 cpu_buf->sample_lost_overflow++;
221 return 0;
224} 222}
225 223
226static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) 224static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
227{ 225{
228 if (nr_available_slots(cpu_buf) < 4) {
229 cpu_buf->sample_lost_overflow++;
230 return 0;
231 }
232
233 add_code(cpu_buf, CPU_TRACE_BEGIN); 226 add_code(cpu_buf, CPU_TRACE_BEGIN);
234 cpu_buf->tracing = 1; 227 cpu_buf->tracing = 1;
235 return 1; 228 return 1;
@@ -253,8 +246,10 @@ void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
253 if (!oprofile_begin_trace(cpu_buf)) 246 if (!oprofile_begin_trace(cpu_buf))
254 return; 247 return;
255 248
256 /* if log_sample() fail we can't backtrace since we lost the source 249 /*
257 * of this event */ 250 * if log_sample() fail we can't backtrace since we lost the
251 * source of this event
252 */
258 if (log_sample(cpu_buf, pc, is_kernel, event)) 253 if (log_sample(cpu_buf, pc, is_kernel, event))
259 oprofile_ops.backtrace(regs, backtrace_depth); 254 oprofile_ops.backtrace(regs, backtrace_depth);
260 oprofile_end_trace(cpu_buf); 255 oprofile_end_trace(cpu_buf);
@@ -272,49 +267,55 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
272 267
273#define MAX_IBS_SAMPLE_SIZE 14 268#define MAX_IBS_SAMPLE_SIZE 14
274 269
275void oprofile_add_ibs_sample(struct pt_regs *const regs, 270void oprofile_add_ibs_sample(struct pt_regs * const regs,
276 unsigned int *const ibs_sample, int ibs_code) 271 unsigned int * const ibs_sample, int ibs_code)
277{ 272{
278 int is_kernel = !user_mode(regs); 273 int is_kernel = !user_mode(regs);
279 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 274 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
280 struct task_struct *task; 275 struct task_struct *task;
276 int fail = 0;
281 277
282 cpu_buf->sample_received++; 278 cpu_buf->sample_received++;
283 279
284 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
285 /* we can't backtrace since we lost the source of this event */
286 cpu_buf->sample_lost_overflow++;
287 return;
288 }
289
290 /* notice a switch from user->kernel or vice versa */ 280 /* notice a switch from user->kernel or vice versa */
291 if (cpu_buf->last_is_kernel != is_kernel) { 281 if (cpu_buf->last_is_kernel != is_kernel) {
282 if (add_code(cpu_buf, is_kernel))
283 goto fail;
292 cpu_buf->last_is_kernel = is_kernel; 284 cpu_buf->last_is_kernel = is_kernel;
293 add_code(cpu_buf, is_kernel);
294 } 285 }
295 286
296 /* notice a task switch */ 287 /* notice a task switch */
297 if (!is_kernel) { 288 if (!is_kernel) {
298 task = current; 289 task = current;
299 if (cpu_buf->last_task != task) { 290 if (cpu_buf->last_task != task) {
291 if (add_code(cpu_buf, (unsigned long)task))
292 goto fail;
300 cpu_buf->last_task = task; 293 cpu_buf->last_task = task;
301 add_code(cpu_buf, (unsigned long)task);
302 } 294 }
303 } 295 }
304 296
305 add_code(cpu_buf, ibs_code); 297 fail = fail || add_code(cpu_buf, ibs_code);
306 add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); 298 fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
307 add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); 299 fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
308 add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); 300 fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
309 301
310 if (ibs_code == IBS_OP_BEGIN) { 302 if (ibs_code == IBS_OP_BEGIN) {
311 add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); 303 fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
312 add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); 304 fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
313 add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); 305 fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
314 } 306 }
315 307
308 if (fail)
309 goto fail;
310
316 if (backtrace_depth) 311 if (backtrace_depth)
317 oprofile_ops.backtrace(regs, backtrace_depth); 312 oprofile_ops.backtrace(regs, backtrace_depth);
313
314 return;
315
316fail:
317 cpu_buf->sample_lost_overflow++;
318 return;
318} 319}
319 320
320#endif 321#endif
@@ -332,21 +333,21 @@ void oprofile_add_trace(unsigned long pc)
332 if (!cpu_buf->tracing) 333 if (!cpu_buf->tracing)
333 return; 334 return;
334 335
335 if (nr_available_slots(cpu_buf) < 1) { 336 /*
336 cpu_buf->tracing = 0; 337 * broken frame can give an eip with the same value as an
337 cpu_buf->sample_lost_overflow++; 338 * escape code, abort the trace if we get it
338 return; 339 */
339 } 340 if (pc == ESCAPE_CODE)
341 goto fail;
340 342
341 /* broken frame can give an eip with the same value as an escape code, 343 if (add_sample(cpu_buf, pc, 0))
342 * abort the trace if we get it */ 344 goto fail;
343 if (pc == ESCAPE_CODE) {
344 cpu_buf->tracing = 0;
345 cpu_buf->backtrace_aborted++;
346 return;
347 }
348 345
349 add_sample(cpu_buf, pc, 0); 346 return;
347fail:
348 cpu_buf->tracing = 0;
349 cpu_buf->backtrace_aborted++;
350 return;
350} 351}
351 352
352/* 353/*
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index d3cc26264db5..aacb0f0bc566 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -15,6 +15,7 @@
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/cache.h> 16#include <linux/cache.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/ring_buffer.h>
18 19
19struct task_struct; 20struct task_struct;
20 21
@@ -32,6 +33,12 @@ struct op_sample {
32 unsigned long event; 33 unsigned long event;
33}; 34};
34 35
36struct op_entry {
37 struct ring_buffer_event *event;
38 struct op_sample *sample;
39 unsigned long irq_flags;
40};
41
35struct oprofile_cpu_buffer { 42struct oprofile_cpu_buffer {
36 volatile unsigned long head_pos; 43 volatile unsigned long head_pos;
37 volatile unsigned long tail_pos; 44 volatile unsigned long tail_pos;
@@ -39,7 +46,6 @@ struct oprofile_cpu_buffer {
39 struct task_struct *last_task; 46 struct task_struct *last_task;
40 int last_is_kernel; 47 int last_is_kernel;
41 int tracing; 48 int tracing;
42 struct op_sample *buffer;
43 unsigned long sample_received; 49 unsigned long sample_received;
44 unsigned long sample_lost_overflow; 50 unsigned long sample_lost_overflow;
45 unsigned long backtrace_aborted; 51 unsigned long backtrace_aborted;
@@ -48,9 +54,68 @@ struct oprofile_cpu_buffer {
48 struct delayed_work work; 54 struct delayed_work work;
49}; 55};
50 56
57extern struct ring_buffer *op_ring_buffer_read;
58extern struct ring_buffer *op_ring_buffer_write;
51DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 59DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
52 60
53void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf); 61/*
62 * Resets the cpu buffer to a sane state.
63 *
64 * reset these to invalid values; the next sample collected will
65 * populate the buffer with proper values to initialize the buffer
66 */
67static inline void cpu_buffer_reset(int cpu)
68{
69 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
70
71 cpu_buf->last_is_kernel = -1;
72 cpu_buf->last_task = NULL;
73}
74
75static inline int cpu_buffer_write_entry(struct op_entry *entry)
76{
77 entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
78 sizeof(struct op_sample),
79 &entry->irq_flags);
80 if (entry->event)
81 entry->sample = ring_buffer_event_data(entry->event);
82 else
83 entry->sample = NULL;
84
85 if (!entry->sample)
86 return -ENOMEM;
87
88 return 0;
89}
90
91static inline int cpu_buffer_write_commit(struct op_entry *entry)
92{
93 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
94 entry->irq_flags);
95}
96
97static inline struct op_sample *cpu_buffer_read_entry(int cpu)
98{
99 struct ring_buffer_event *e;
100 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
101 if (e)
102 return ring_buffer_event_data(e);
103 if (ring_buffer_swap_cpu(op_ring_buffer_read,
104 op_ring_buffer_write,
105 cpu))
106 return NULL;
107 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
108 if (e)
109 return ring_buffer_event_data(e);
110 return NULL;
111}
112
113/* "acquire" as many cpu buffer slots as we can */
114static inline unsigned long cpu_buffer_entries(int cpu)
115{
116 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
117 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
118}
54 119
55/* transient events for the CPU buffer -> event buffer */ 120/* transient events for the CPU buffer -> event buffer */
56#define CPU_IS_KERNEL 1 121#define CPU_IS_KERNEL 1
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index cc106d503ace..d8201998b0b7 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -14,9 +14,13 @@
14#include "oprofile_stats.h" 14#include "oprofile_stats.h"
15#include "oprof.h" 15#include "oprof.h"
16 16
17unsigned long fs_buffer_size = 131072; 17#define FS_BUFFER_SIZE_DEFAULT 131072
18unsigned long fs_cpu_buffer_size = 8192; 18#define FS_CPU_BUFFER_SIZE_DEFAULT 8192
19unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */ 19#define FS_BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
20
21unsigned long fs_buffer_size;
22unsigned long fs_cpu_buffer_size;
23unsigned long fs_buffer_watershed;
20 24
21static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 25static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
22{ 26{
@@ -120,6 +124,11 @@ static const struct file_operations dump_fops = {
120 124
121void oprofile_create_files(struct super_block *sb, struct dentry *root) 125void oprofile_create_files(struct super_block *sb, struct dentry *root)
122{ 126{
127 /* reinitialize default values */
128 fs_buffer_size = FS_BUFFER_SIZE_DEFAULT;
129 fs_cpu_buffer_size = FS_CPU_BUFFER_SIZE_DEFAULT;
130 fs_buffer_watershed = FS_BUFFER_WATERSHED_DEFAULT;
131
123 oprofilefs_create_file(sb, root, "enable", &enable_fops); 132 oprofilefs_create_file(sb, root, "enable", &enable_fops);
124 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); 133 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
125 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); 134 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 2de5a3238c94..f78371b22529 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -5,6 +5,7 @@
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/irq.h> 6#include <linux/irq.h>
7#include <asm/io_apic.h> 7#include <asm/io_apic.h>
8#include <asm/smp.h>
8#include <linux/intel-iommu.h> 9#include <linux/intel-iommu.h>
9#include "intr_remapping.h" 10#include "intr_remapping.h"
10 11
@@ -19,17 +20,75 @@ struct irq_2_iommu {
19 u8 irte_mask; 20 u8 irte_mask;
20}; 21};
21 22
22static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; 23#ifdef CONFIG_SPARSE_IRQ
24static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
25{
26 struct irq_2_iommu *iommu;
27 int node;
28
29 node = cpu_to_node(cpu);
30
31 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
32 printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
33
34 return iommu;
35}
23 36
24static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 37static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
25{ 38{
26 return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; 39 struct irq_desc *desc;
40
41 desc = irq_to_desc(irq);
42
43 if (WARN_ON_ONCE(!desc))
44 return NULL;
45
46 return desc->irq_2_iommu;
47}
48
49static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
50{
51 struct irq_desc *desc;
52 struct irq_2_iommu *irq_iommu;
53
54 /*
55 * alloc irq desc if not allocated already.
56 */
57 desc = irq_to_desc_alloc_cpu(irq, cpu);
58 if (!desc) {
59 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
60 return NULL;
61 }
62
63 irq_iommu = desc->irq_2_iommu;
64
65 if (!irq_iommu)
66 desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
67
68 return desc->irq_2_iommu;
27} 69}
28 70
29static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 71static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
30{ 72{
73 return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
74}
75
76#else /* !CONFIG_SPARSE_IRQ */
77
78static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
79
80static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
81{
82 if (irq < nr_irqs)
83 return &irq_2_iommuX[irq];
84
85 return NULL;
86}
87static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
88{
31 return irq_2_iommu(irq); 89 return irq_2_iommu(irq);
32} 90}
91#endif
33 92
34static DEFINE_SPINLOCK(irq_2_ir_lock); 93static DEFINE_SPINLOCK(irq_2_ir_lock);
35 94
@@ -86,9 +145,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
86 if (!count) 145 if (!count)
87 return -1; 146 return -1;
88 147
148#ifndef CONFIG_SPARSE_IRQ
89 /* protect irq_2_iommu_alloc later */ 149 /* protect irq_2_iommu_alloc later */
90 if (irq >= nr_irqs) 150 if (irq >= nr_irqs)
91 return -1; 151 return -1;
152#endif
92 153
93 /* 154 /*
94 * start the IRTE search from index 0. 155 * start the IRTE search from index 0.
@@ -130,6 +191,12 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
130 table->base[i].present = 1; 191 table->base[i].present = 1;
131 192
132 irq_iommu = irq_2_iommu_alloc(irq); 193 irq_iommu = irq_2_iommu_alloc(irq);
194 if (!irq_iommu) {
195 spin_unlock(&irq_2_ir_lock);
196 printk(KERN_ERR "can't allocate irq_2_iommu\n");
197 return -1;
198 }
199
133 irq_iommu->iommu = iommu; 200 irq_iommu->iommu = iommu;
134 irq_iommu->irte_index = index; 201 irq_iommu->irte_index = index;
135 irq_iommu->sub_handle = 0; 202 irq_iommu->sub_handle = 0;
@@ -177,6 +244,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
177 244
178 irq_iommu = irq_2_iommu_alloc(irq); 245 irq_iommu = irq_2_iommu_alloc(irq);
179 246
247 if (!irq_iommu) {
248 spin_unlock(&irq_2_ir_lock);
249 printk(KERN_ERR "can't allocate irq_2_iommu\n");
250 return -1;
251 }
252
180 irq_iommu->iommu = iommu; 253 irq_iommu->iommu = iommu;
181 irq_iommu->irte_index = index; 254 irq_iommu->irte_index = index;
182 irq_iommu->sub_handle = subhandle; 255 irq_iommu->sub_handle = subhandle;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 74801f7df9c9..11a51f8ed3b3 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -103,11 +103,11 @@ static void msix_set_enable(struct pci_dev *dev, int enable)
103 } 103 }
104} 104}
105 105
106static void msix_flush_writes(unsigned int irq) 106static void msix_flush_writes(struct irq_desc *desc)
107{ 107{
108 struct msi_desc *entry; 108 struct msi_desc *entry;
109 109
110 entry = get_irq_msi(irq); 110 entry = get_irq_desc_msi(desc);
111 BUG_ON(!entry || !entry->dev); 111 BUG_ON(!entry || !entry->dev);
112 switch (entry->msi_attrib.type) { 112 switch (entry->msi_attrib.type) {
113 case PCI_CAP_ID_MSI: 113 case PCI_CAP_ID_MSI:
@@ -135,11 +135,11 @@ static void msix_flush_writes(unsigned int irq)
135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device 135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
136 * doesn't support MSI masking. 136 * doesn't support MSI masking.
137 */ 137 */
138static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) 138static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag)
139{ 139{
140 struct msi_desc *entry; 140 struct msi_desc *entry;
141 141
142 entry = get_irq_msi(irq); 142 entry = get_irq_desc_msi(desc);
143 BUG_ON(!entry || !entry->dev); 143 BUG_ON(!entry || !entry->dev);
144 switch (entry->msi_attrib.type) { 144 switch (entry->msi_attrib.type) {
145 case PCI_CAP_ID_MSI: 145 case PCI_CAP_ID_MSI:
@@ -172,9 +172,9 @@ static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
172 return 1; 172 return 1;
173} 173}
174 174
175void read_msi_msg(unsigned int irq, struct msi_msg *msg) 175void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
176{ 176{
177 struct msi_desc *entry = get_irq_msi(irq); 177 struct msi_desc *entry = get_irq_desc_msi(desc);
178 switch(entry->msi_attrib.type) { 178 switch(entry->msi_attrib.type) {
179 case PCI_CAP_ID_MSI: 179 case PCI_CAP_ID_MSI:
180 { 180 {
@@ -211,9 +211,16 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
211 } 211 }
212} 212}
213 213
214void write_msi_msg(unsigned int irq, struct msi_msg *msg) 214void read_msi_msg(unsigned int irq, struct msi_msg *msg)
215{ 215{
216 struct msi_desc *entry = get_irq_msi(irq); 216 struct irq_desc *desc = irq_to_desc(irq);
217
218 read_msi_msg_desc(desc, msg);
219}
220
221void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
222{
223 struct msi_desc *entry = get_irq_desc_msi(desc);
217 switch (entry->msi_attrib.type) { 224 switch (entry->msi_attrib.type) {
218 case PCI_CAP_ID_MSI: 225 case PCI_CAP_ID_MSI:
219 { 226 {
@@ -252,21 +259,31 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
252 entry->msg = *msg; 259 entry->msg = *msg;
253} 260}
254 261
262void write_msi_msg(unsigned int irq, struct msi_msg *msg)
263{
264 struct irq_desc *desc = irq_to_desc(irq);
265
266 write_msi_msg_desc(desc, msg);
267}
268
255void mask_msi_irq(unsigned int irq) 269void mask_msi_irq(unsigned int irq)
256{ 270{
257 msi_set_mask_bits(irq, 1, 1); 271 struct irq_desc *desc = irq_to_desc(irq);
258 msix_flush_writes(irq); 272
273 msi_set_mask_bits(desc, 1, 1);
274 msix_flush_writes(desc);
259} 275}
260 276
261void unmask_msi_irq(unsigned int irq) 277void unmask_msi_irq(unsigned int irq)
262{ 278{
263 msi_set_mask_bits(irq, 1, 0); 279 struct irq_desc *desc = irq_to_desc(irq);
264 msix_flush_writes(irq); 280
281 msi_set_mask_bits(desc, 1, 0);
282 msix_flush_writes(desc);
265} 283}
266 284
267static int msi_free_irqs(struct pci_dev* dev); 285static int msi_free_irqs(struct pci_dev* dev);
268 286
269
270static struct msi_desc* alloc_msi_entry(void) 287static struct msi_desc* alloc_msi_entry(void)
271{ 288{
272 struct msi_desc *entry; 289 struct msi_desc *entry;
@@ -303,9 +320,11 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
303 pci_intx_for_msi(dev, 0); 320 pci_intx_for_msi(dev, 0);
304 msi_set_enable(dev, 0); 321 msi_set_enable(dev, 0);
305 write_msi_msg(dev->irq, &entry->msg); 322 write_msi_msg(dev->irq, &entry->msg);
306 if (entry->msi_attrib.maskbit) 323 if (entry->msi_attrib.maskbit) {
307 msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask, 324 struct irq_desc *desc = irq_to_desc(dev->irq);
325 msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
308 entry->msi_attrib.masked); 326 entry->msi_attrib.masked);
327 }
309 328
310 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 329 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
311 control &= ~PCI_MSI_FLAGS_QSIZE; 330 control &= ~PCI_MSI_FLAGS_QSIZE;
@@ -327,8 +346,9 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
327 msix_set_enable(dev, 0); 346 msix_set_enable(dev, 0);
328 347
329 list_for_each_entry(entry, &dev->msi_list, list) { 348 list_for_each_entry(entry, &dev->msi_list, list) {
349 struct irq_desc *desc = irq_to_desc(entry->irq);
330 write_msi_msg(entry->irq, &entry->msg); 350 write_msi_msg(entry->irq, &entry->msg);
331 msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked); 351 msi_set_mask_bits(desc, 1, entry->msi_attrib.masked);
332 } 352 }
333 353
334 BUG_ON(list_empty(&dev->msi_list)); 354 BUG_ON(list_empty(&dev->msi_list));
@@ -596,7 +616,8 @@ void pci_msi_shutdown(struct pci_dev* dev)
596 /* Return the the pci reset with msi irqs unmasked */ 616 /* Return the the pci reset with msi irqs unmasked */
597 if (entry->msi_attrib.maskbit) { 617 if (entry->msi_attrib.maskbit) {
598 u32 mask = entry->msi_attrib.maskbits_mask; 618 u32 mask = entry->msi_attrib.maskbits_mask;
599 msi_set_mask_bits(dev->irq, mask, ~mask); 619 struct irq_desc *desc = irq_to_desc(dev->irq);
620 msi_set_mask_bits(desc, mask, ~mask);
600 } 621 }
601 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) 622 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
602 return; 623 return;
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 222904411a13..276473543982 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -217,7 +217,7 @@ config PCMCIA_PXA2XX
217 depends on ARM && ARCH_PXA && PCMCIA 217 depends on ARM && ARCH_PXA && PCMCIA
218 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ 218 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
219 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \ 219 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
220 || ARCH_VIPER) 220 || ARCH_VIPER || ARCH_PXA_ESERIES)
221 help 221 help
222 Say Y here to include support for the PXA2xx PCMCIA controller 222 Say Y here to include support for the PXA2xx PCMCIA controller
223 223
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 238629ad7f7c..bbac46327227 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -72,5 +72,6 @@ pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o
72pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o 72pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
73pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o 73pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
74pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o 74pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
75pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
75 76
76obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_core.o $(pxa2xx-obj-y) 77obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_core.o $(pxa2xx-obj-y)
diff --git a/drivers/pcmcia/pxa2xx_e740.c b/drivers/pcmcia/pxa2xx_e740.c
new file mode 100644
index 000000000000..f663a011bf4a
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_e740.c
@@ -0,0 +1,176 @@
1/*
2 * Toshiba e740 PCMCIA specific routines.
3 *
4 * (c) 2004 Ian Molton <spyro@f2s.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/gpio.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18
19#include <mach/hardware.h>
20#include <mach/pxa-regs.h>
21#include <mach/eseries-gpio.h>
22
23#include <asm/irq.h>
24#include <asm/mach-types.h>
25
26#include "soc_common.h"
27
28static struct pcmcia_irqs cd_irqs[] = {
29 {
30 .sock = 0,
31 .irq = IRQ_GPIO(GPIO_E740_PCMCIA_CD0),
32 .str = "CF card detect"
33 },
34 {
35 .sock = 1,
36 .irq = IRQ_GPIO(GPIO_E740_PCMCIA_CD1),
37 .str = "Wifi switch"
38 },
39};
40
41static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
42{
43 skt->irq = skt->nr == 0 ? IRQ_GPIO(GPIO_E740_PCMCIA_RDY0) :
44 IRQ_GPIO(GPIO_E740_PCMCIA_RDY1);
45
46 return soc_pcmcia_request_irqs(skt, &cd_irqs[skt->nr], 1);
47}
48
49/*
50 * Release all resources.
51 */
52static void e740_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
53{
54 soc_pcmcia_free_irqs(skt, &cd_irqs[skt->nr], 1);
55}
56
57static void e740_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
58 struct pcmcia_state *state)
59{
60 if (skt->nr == 0) {
61 state->detect = gpio_get_value(GPIO_E740_PCMCIA_CD0) ? 0 : 1;
62 state->ready = gpio_get_value(GPIO_E740_PCMCIA_RDY0) ? 1 : 0;
63 } else {
64 state->detect = gpio_get_value(GPIO_E740_PCMCIA_CD1) ? 0 : 1;
65 state->ready = gpio_get_value(GPIO_E740_PCMCIA_RDY1) ? 1 : 0;
66 }
67
68 state->vs_3v = 1;
69 state->bvd1 = 1;
70 state->bvd2 = 1;
71 state->wrprot = 0;
72 state->vs_Xv = 0;
73}
74
75static int e740_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
76 const socket_state_t *state)
77{
78 if (state->flags & SS_RESET) {
79 if (skt->nr == 0)
80 gpio_set_value(GPIO_E740_PCMCIA_RST0, 1);
81 else
82 gpio_set_value(GPIO_E740_PCMCIA_RST1, 1);
83 } else {
84 if (skt->nr == 0)
85 gpio_set_value(GPIO_E740_PCMCIA_RST0, 0);
86 else
87 gpio_set_value(GPIO_E740_PCMCIA_RST1, 0);
88 }
89
90 switch (state->Vcc) {
91 case 0: /* Socket off */
92 if (skt->nr == 0)
93 gpio_set_value(GPIO_E740_PCMCIA_PWR0, 0);
94 else
95 gpio_set_value(GPIO_E740_PCMCIA_PWR1, 1);
96 break;
97 case 50:
98 case 33: /* socket on */
99 if (skt->nr == 0)
100 gpio_set_value(GPIO_E740_PCMCIA_PWR0, 1);
101 else
102 gpio_set_value(GPIO_E740_PCMCIA_PWR1, 0);
103 break;
104 default:
105 printk(KERN_ERR "e740_cs: Unsupported Vcc: %d\n", state->Vcc);
106 }
107
108 return 0;
109}
110
111/*
112 * Enable card status IRQs on (re-)initialisation. This can
113 * be called at initialisation, power management event, or
114 * pcmcia event.
115 */
116static void e740_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
117{
118 soc_pcmcia_enable_irqs(skt, cd_irqs, ARRAY_SIZE(cd_irqs));
119}
120
121/*
122 * Disable card status IRQs on suspend.
123 */
124static void e740_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
125{
126 soc_pcmcia_disable_irqs(skt, cd_irqs, ARRAY_SIZE(cd_irqs));
127}
128
129static struct pcmcia_low_level e740_pcmcia_ops = {
130 .owner = THIS_MODULE,
131 .hw_init = e740_pcmcia_hw_init,
132 .hw_shutdown = e740_pcmcia_hw_shutdown,
133 .socket_state = e740_pcmcia_socket_state,
134 .configure_socket = e740_pcmcia_configure_socket,
135 .socket_init = e740_pcmcia_socket_init,
136 .socket_suspend = e740_pcmcia_socket_suspend,
137 .nr = 2,
138};
139
140static struct platform_device *e740_pcmcia_device;
141
142static int __init e740_pcmcia_init(void)
143{
144 int ret;
145
146 if (!machine_is_e740())
147 return -ENODEV;
148
149 e740_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
150 if (!e740_pcmcia_device)
151 return -ENOMEM;
152
153 ret = platform_device_add_data(e740_pcmcia_device, &e740_pcmcia_ops,
154 sizeof(e740_pcmcia_ops));
155
156 if (!ret)
157 ret = platform_device_add(e740_pcmcia_device);
158
159 if (ret)
160 platform_device_put(e740_pcmcia_device);
161
162 return ret;
163}
164
165static void __exit e740_pcmcia_exit(void)
166{
167 platform_device_unregister(e740_pcmcia_device);
168}
169
170module_init(e740_pcmcia_init);
171module_exit(e740_pcmcia_exit);
172
173MODULE_LICENSE("GPL v2");
174MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
175MODULE_ALIAS("platform:pxa2xx-pcmcia");
176MODULE_DESCRIPTION("e740 PCMCIA platform support");
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 2133f37906f2..d5e4e637ddec 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -21,6 +21,7 @@
21 21
22#include <mach/board.h> 22#include <mach/board.h>
23#include <mach/at91_rtt.h> 23#include <mach/at91_rtt.h>
24#include <mach/cpu.h>
24 25
25 26
26/* 27/*
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index f59277bbedaa..7a568beba3f0 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -26,7 +26,7 @@
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/irq.h> 28#include <asm/irq.h>
29#include <asm/plat-s3c/regs-rtc.h> 29#include <plat/regs-rtc.h>
30 30
31/* I have yet to find an S3C implementation with more than one 31/* I have yet to find an S3C implementation with more than one
32 * of these rtc blocks in */ 32 * of these rtc blocks in */
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 66a9bb85bbe8..d26a5f82aaba 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -38,11 +38,11 @@
38#include <mach/pxa-regs.h> 38#include <mach/pxa-regs.h>
39#endif 39#endif
40 40
41#define TIMER_FREQ CLOCK_TICK_RATE
42#define RTC_DEF_DIVIDER 32768 - 1 41#define RTC_DEF_DIVIDER 32768 - 1
43#define RTC_DEF_TRIM 0 42#define RTC_DEF_TRIM 0
44 43
45static unsigned long rtc_freq = 1024; 44static unsigned long rtc_freq = 1024;
45static unsigned long timer_freq;
46static struct rtc_time rtc_alarm; 46static struct rtc_time rtc_alarm;
47static DEFINE_SPINLOCK(sa1100_rtc_lock); 47static DEFINE_SPINLOCK(sa1100_rtc_lock);
48 48
@@ -157,7 +157,7 @@ static irqreturn_t timer1_interrupt(int irq, void *dev_id)
157 rtc_update_irq(rtc, rtc_timer1_count, RTC_PF | RTC_IRQF); 157 rtc_update_irq(rtc, rtc_timer1_count, RTC_PF | RTC_IRQF);
158 158
159 if (rtc_timer1_count == 1) 159 if (rtc_timer1_count == 1)
160 rtc_timer1_count = (rtc_freq * ((1<<30)/(TIMER_FREQ>>2))); 160 rtc_timer1_count = (rtc_freq * ((1 << 30) / (timer_freq >> 2)));
161 161
162 return IRQ_HANDLED; 162 return IRQ_HANDLED;
163} 163}
@@ -166,7 +166,7 @@ static int sa1100_rtc_read_callback(struct device *dev, int data)
166{ 166{
167 if (data & RTC_PF) { 167 if (data & RTC_PF) {
168 /* interpolate missed periods and set match for the next */ 168 /* interpolate missed periods and set match for the next */
169 unsigned long period = TIMER_FREQ/rtc_freq; 169 unsigned long period = timer_freq / rtc_freq;
170 unsigned long oscr = OSCR; 170 unsigned long oscr = OSCR;
171 unsigned long osmr1 = OSMR1; 171 unsigned long osmr1 = OSMR1;
172 unsigned long missed = (oscr - osmr1)/period; 172 unsigned long missed = (oscr - osmr1)/period;
@@ -263,7 +263,7 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
263 return 0; 263 return 0;
264 case RTC_PIE_ON: 264 case RTC_PIE_ON:
265 spin_lock_irq(&sa1100_rtc_lock); 265 spin_lock_irq(&sa1100_rtc_lock);
266 OSMR1 = TIMER_FREQ/rtc_freq + OSCR; 266 OSMR1 = timer_freq / rtc_freq + OSCR;
267 OIER |= OIER_E1; 267 OIER |= OIER_E1;
268 rtc_timer1_count = 1; 268 rtc_timer1_count = 1;
269 spin_unlock_irq(&sa1100_rtc_lock); 269 spin_unlock_irq(&sa1100_rtc_lock);
@@ -271,7 +271,7 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
271 case RTC_IRQP_READ: 271 case RTC_IRQP_READ:
272 return put_user(rtc_freq, (unsigned long *)arg); 272 return put_user(rtc_freq, (unsigned long *)arg);
273 case RTC_IRQP_SET: 273 case RTC_IRQP_SET:
274 if (arg < 1 || arg > TIMER_FREQ) 274 if (arg < 1 || arg > timer_freq)
275 return -EINVAL; 275 return -EINVAL;
276 rtc_freq = arg; 276 rtc_freq = arg;
277 return 0; 277 return 0;
@@ -352,6 +352,8 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
352{ 352{
353 struct rtc_device *rtc; 353 struct rtc_device *rtc;
354 354
355 timer_freq = get_clock_tick_rate();
356
355 /* 357 /*
356 * According to the manual we should be able to let RTTR be zero 358 * According to the manual we should be able to let RTTR be zero
357 * and then a default diviser for a 32.768KHz clock is used. 359 * and then a default diviser for a 32.768KHz clock is used.
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 3d442444c618..28c90b89f2b4 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -188,11 +188,13 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
188 config = kvm_vq_config(kdev->desc)+index; 188 config = kvm_vq_config(kdev->desc)+index;
189 189
190 err = vmem_add_mapping(config->address, 190 err = vmem_add_mapping(config->address,
191 vring_size(config->num, PAGE_SIZE)); 191 vring_size(config->num,
192 KVM_S390_VIRTIO_RING_ALIGN));
192 if (err) 193 if (err)
193 goto out; 194 goto out;
194 195
195 vq = vring_new_virtqueue(config->num, vdev, (void *) config->address, 196 vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
197 vdev, (void *) config->address,
196 kvm_notify, callback); 198 kvm_notify, callback);
197 if (!vq) { 199 if (!vq) {
198 err = -ENOMEM; 200 err = -ENOMEM;
@@ -209,7 +211,8 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
209 return vq; 211 return vq;
210unmap: 212unmap:
211 vmem_remove_mapping(config->address, 213 vmem_remove_mapping(config->address,
212 vring_size(config->num, PAGE_SIZE)); 214 vring_size(config->num,
215 KVM_S390_VIRTIO_RING_ALIGN));
213out: 216out:
214 return ERR_PTR(err); 217 return ERR_PTR(err);
215} 218}
@@ -220,7 +223,8 @@ static void kvm_del_vq(struct virtqueue *vq)
220 223
221 vring_del_virtqueue(vq); 224 vring_del_virtqueue(vq);
222 vmem_remove_mapping(config->address, 225 vmem_remove_mapping(config->address,
223 vring_size(config->num, PAGE_SIZE)); 226 vring_size(config->num,
227 KVM_S390_VIRTIO_RING_ALIGN));
224} 228}
225 229
226/* 230/*
@@ -295,13 +299,29 @@ static void scan_devices(void)
295 */ 299 */
296static void kvm_extint_handler(u16 code) 300static void kvm_extint_handler(u16 code)
297{ 301{
298 void *data = (void *) *(long *) __LC_PFAULT_INTPARM; 302 struct virtqueue *vq;
299 u16 subcode = S390_lowcore.cpu_addr; 303 u16 subcode;
304 int config_changed;
300 305
306 subcode = S390_lowcore.cpu_addr;
301 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) 307 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
302 return; 308 return;
303 309
304 vring_interrupt(0, data); 310 /* The LSB might be overloaded, we have to mask it */
311 vq = (struct virtqueue *) ((*(long *) __LC_PFAULT_INTPARM) & ~1UL);
312
313 /* We use the LSB of extparam, to decide, if this interrupt is a config
314 * change or a "standard" interrupt */
315 config_changed = (*(int *) __LC_EXT_PARAMS & 1);
316
317 if (config_changed) {
318 struct virtio_driver *drv;
319 drv = container_of(vq->vdev->dev.driver,
320 struct virtio_driver, driver);
321 if (drv->config_changed)
322 drv->config_changed(vq->vdev);
323 } else
324 vring_interrupt(0, vq);
305} 325}
306 326
307/* 327/*
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e529b55b3ce9..8af7dfbe022c 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -34,13 +34,12 @@
34 34
35#define ZFCP_BUS_ID_SIZE 20 35#define ZFCP_BUS_ID_SIZE 20
36 36
37static char *device;
38
39MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); 37MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
40MODULE_DESCRIPTION("FCP HBA driver"); 38MODULE_DESCRIPTION("FCP HBA driver");
41MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
42 40
43module_param(device, charp, 0400); 41static char *init_device;
42module_param_named(device, init_device, charp, 0400);
44MODULE_PARM_DESC(device, "specify initial device"); 43MODULE_PARM_DESC(device, "specify initial device");
45 44
46static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter) 45static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
@@ -73,46 +72,7 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
73 return 1; 72 return 1;
74} 73}
75 74
76static int __init zfcp_device_setup(char *devstr) 75static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
77{
78 char *token;
79 char *str;
80
81 if (!devstr)
82 return 0;
83
84 /* duplicate devstr and keep the original for sysfs presentation*/
85 str = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
86 if (!str)
87 return 0;
88
89 strcpy(str, devstr);
90
91 token = strsep(&str, ",");
92 if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
93 goto err_out;
94 strncpy(zfcp_data.init_busid, token, ZFCP_BUS_ID_SIZE);
95
96 token = strsep(&str, ",");
97 if (!token || strict_strtoull(token, 0,
98 (unsigned long long *) &zfcp_data.init_wwpn))
99 goto err_out;
100
101 token = strsep(&str, ",");
102 if (!token || strict_strtoull(token, 0,
103 (unsigned long long *) &zfcp_data.init_fcp_lun))
104 goto err_out;
105
106 kfree(str);
107 return 1;
108
109 err_out:
110 kfree(str);
111 pr_err("%s is not a valid SCSI device\n", devstr);
112 return 0;
113}
114
115static void __init zfcp_init_device_configure(void)
116{ 76{
117 struct zfcp_adapter *adapter; 77 struct zfcp_adapter *adapter;
118 struct zfcp_port *port; 78 struct zfcp_port *port;
@@ -120,17 +80,17 @@ static void __init zfcp_init_device_configure(void)
120 80
121 down(&zfcp_data.config_sema); 81 down(&zfcp_data.config_sema);
122 read_lock_irq(&zfcp_data.config_lock); 82 read_lock_irq(&zfcp_data.config_lock);
123 adapter = zfcp_get_adapter_by_busid(zfcp_data.init_busid); 83 adapter = zfcp_get_adapter_by_busid(busid);
124 if (adapter) 84 if (adapter)
125 zfcp_adapter_get(adapter); 85 zfcp_adapter_get(adapter);
126 read_unlock_irq(&zfcp_data.config_lock); 86 read_unlock_irq(&zfcp_data.config_lock);
127 87
128 if (!adapter) 88 if (!adapter)
129 goto out_adapter; 89 goto out_adapter;
130 port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0); 90 port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
131 if (IS_ERR(port)) 91 if (IS_ERR(port))
132 goto out_port; 92 goto out_port;
133 unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun); 93 unit = zfcp_unit_enqueue(port, lun);
134 if (IS_ERR(unit)) 94 if (IS_ERR(unit))
135 goto out_unit; 95 goto out_unit;
136 up(&zfcp_data.config_sema); 96 up(&zfcp_data.config_sema);
@@ -160,6 +120,42 @@ static struct kmem_cache *zfcp_cache_create(int size, char *name)
160 return kmem_cache_create(name , size, align, 0, NULL); 120 return kmem_cache_create(name , size, align, 0, NULL);
161} 121}
162 122
123static void __init zfcp_init_device_setup(char *devstr)
124{
125 char *token;
126 char *str;
127 char busid[ZFCP_BUS_ID_SIZE];
128 u64 wwpn, lun;
129
130 /* duplicate devstr and keep the original for sysfs presentation*/
131 str = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
132 if (!str)
133 return;
134
135 strcpy(str, devstr);
136
137 token = strsep(&str, ",");
138 if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
139 goto err_out;
140 strncpy(busid, token, ZFCP_BUS_ID_SIZE);
141
142 token = strsep(&str, ",");
143 if (!token || strict_strtoull(token, 0, (unsigned long long *) &wwpn))
144 goto err_out;
145
146 token = strsep(&str, ",");
147 if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun))
148 goto err_out;
149
150 kfree(str);
151 zfcp_init_device_configure(busid, wwpn, lun);
152 return;
153
154 err_out:
155 kfree(str);
156 pr_err("%s is not a valid SCSI device\n", devstr);
157}
158
163static int __init zfcp_module_init(void) 159static int __init zfcp_module_init(void)
164{ 160{
165 int retval = -ENOMEM; 161 int retval = -ENOMEM;
@@ -181,7 +177,6 @@ static int __init zfcp_module_init(void)
181 177
182 zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq"); 178 zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
183 179
184 INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
185 sema_init(&zfcp_data.config_sema, 1); 180 sema_init(&zfcp_data.config_sema, 1);
186 rwlock_init(&zfcp_data.config_lock); 181 rwlock_init(&zfcp_data.config_lock);
187 182
@@ -203,10 +198,9 @@ static int __init zfcp_module_init(void)
203 goto out_ccw_register; 198 goto out_ccw_register;
204 } 199 }
205 200
206 if (zfcp_device_setup(device)) 201 if (init_device)
207 zfcp_init_device_configure(); 202 zfcp_init_device_setup(init_device);
208 203 return 0;
209 goto out;
210 204
211out_ccw_register: 205out_ccw_register:
212 misc_deregister(&zfcp_cfdc_misc); 206 misc_deregister(&zfcp_cfdc_misc);
@@ -527,14 +521,11 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
527 &zfcp_sysfs_adapter_attrs)) 521 &zfcp_sysfs_adapter_attrs))
528 goto sysfs_failed; 522 goto sysfs_failed;
529 523
530 write_lock_irq(&zfcp_data.config_lock);
531 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 524 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
532 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
533 write_unlock_irq(&zfcp_data.config_lock);
534
535 zfcp_fc_nameserver_init(adapter); 525 zfcp_fc_nameserver_init(adapter);
536 526
537 return 0; 527 if (!zfcp_adapter_scsi_register(adapter))
528 return 0;
538 529
539sysfs_failed: 530sysfs_failed:
540 zfcp_adapter_debug_unregister(adapter); 531 zfcp_adapter_debug_unregister(adapter);
@@ -573,14 +564,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
573 return; 564 return;
574 565
575 zfcp_adapter_debug_unregister(adapter); 566 zfcp_adapter_debug_unregister(adapter);
576
577 /* remove specified adapter data structure from list */
578 write_lock_irq(&zfcp_data.config_lock);
579 list_del(&adapter->list);
580 write_unlock_irq(&zfcp_data.config_lock);
581
582 zfcp_qdio_free(adapter); 567 zfcp_qdio_free(adapter);
583
584 zfcp_free_low_mem_buffers(adapter); 568 zfcp_free_low_mem_buffers(adapter);
585 kfree(adapter->req_list); 569 kfree(adapter->req_list);
586 kfree(adapter->fc_stats); 570 kfree(adapter->fc_stats);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 728147131e1d..285881f07648 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -106,10 +106,6 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
106 if (retval) 106 if (retval)
107 goto out; 107 goto out;
108 108
109 retval = zfcp_adapter_scsi_register(adapter);
110 if (retval)
111 goto out_scsi_register;
112
113 /* initialize request counter */ 109 /* initialize request counter */
114 BUG_ON(!zfcp_reqlist_isempty(adapter)); 110 BUG_ON(!zfcp_reqlist_isempty(adapter));
115 adapter->req_no = 0; 111 adapter->req_no = 0;
@@ -123,8 +119,6 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
123 flush_work(&adapter->scan_work); 119 flush_work(&adapter->scan_work);
124 return 0; 120 return 0;
125 121
126 out_scsi_register:
127 zfcp_erp_thread_kill(adapter);
128 out: 122 out:
129 up(&zfcp_data.config_sema); 123 up(&zfcp_data.config_sema);
130 return retval; 124 return retval;
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index f1a7518e67ed..10cbfd172a28 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -85,20 +85,9 @@ static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
85 85
86static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno) 86static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
87{ 87{
88 struct zfcp_adapter *adapter = NULL, *cur_adapter; 88 char busid[9];
89 struct ccw_dev_id dev_id; 89 snprintf(busid, sizeof(busid), "0.0.%04x", devno);
90 90 return zfcp_get_adapter_by_busid(busid);
91 read_lock_irq(&zfcp_data.config_lock);
92 list_for_each_entry(cur_adapter, &zfcp_data.adapter_list_head, list) {
93 ccw_device_get_id(cur_adapter->ccw_device, &dev_id);
94 if (dev_id.devno == devno) {
95 adapter = cur_adapter;
96 zfcp_adapter_get(adapter);
97 break;
98 }
99 }
100 read_unlock_irq(&zfcp_data.config_lock);
101 return adapter;
102} 91}
103 92
104static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command) 93static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 735d675623f8..cb6df609953e 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -522,7 +522,7 @@ static const char *zfcp_rec_dbf_ids[] = {
522 [29] = "link down", 522 [29] = "link down",
523 [30] = "link up status read", 523 [30] = "link up status read",
524 [31] = "open port failed", 524 [31] = "open port failed",
525 [32] = "open port failed", 525 [32] = "",
526 [33] = "close port", 526 [33] = "close port",
527 [34] = "open unit failed", 527 [34] = "open unit failed",
528 [35] = "exclusive open unit failed", 528 [35] = "exclusive open unit failed",
@@ -936,6 +936,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
936 rct->reason_code = hdr->reason_code; 936 rct->reason_code = hdr->reason_code;
937 rct->expl = hdr->reason_code_expl; 937 rct->expl = hdr->reason_code_expl;
938 rct->vendor_unique = hdr->vendor_unique; 938 rct->vendor_unique = hdr->vendor_unique;
939 rct->max_res_size = hdr->max_res_size;
939 rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), 940 rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
940 ZFCP_DBF_SAN_MAX_PAYLOAD); 941 ZFCP_DBF_SAN_MAX_PAYLOAD);
941 debug_event(adapter->san_dbf, level, r, sizeof(*r)); 942 debug_event(adapter->san_dbf, level, r, sizeof(*r));
@@ -1043,6 +1044,7 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
1043 zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); 1044 zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
1044 zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl); 1045 zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl);
1045 zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique); 1046 zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique);
1047 zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
1046 } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || 1048 } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
1047 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || 1049 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
1048 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { 1050 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
@@ -1249,7 +1251,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
1249 char dbf_name[DEBUG_MAX_NAME_LEN]; 1251 char dbf_name[DEBUG_MAX_NAME_LEN];
1250 1252
1251 /* debug feature area which records recovery activity */ 1253 /* debug feature area which records recovery activity */
1252 sprintf(dbf_name, "zfcp_%s_rec", zfcp_get_busid_by_adapter(adapter)); 1254 sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
1253 adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1, 1255 adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1,
1254 sizeof(struct zfcp_rec_dbf_record)); 1256 sizeof(struct zfcp_rec_dbf_record));
1255 if (!adapter->rec_dbf) 1257 if (!adapter->rec_dbf)
@@ -1259,7 +1261,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
1259 debug_set_level(adapter->rec_dbf, 3); 1261 debug_set_level(adapter->rec_dbf, 3);
1260 1262
1261 /* debug feature area which records HBA (FSF and QDIO) conditions */ 1263 /* debug feature area which records HBA (FSF and QDIO) conditions */
1262 sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter)); 1264 sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
1263 adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1, 1265 adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1,
1264 sizeof(struct zfcp_hba_dbf_record)); 1266 sizeof(struct zfcp_hba_dbf_record));
1265 if (!adapter->hba_dbf) 1267 if (!adapter->hba_dbf)
@@ -1269,7 +1271,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
1269 debug_set_level(adapter->hba_dbf, 3); 1271 debug_set_level(adapter->hba_dbf, 3);
1270 1272
1271 /* debug feature area which records SAN command failures and recovery */ 1273 /* debug feature area which records SAN command failures and recovery */
1272 sprintf(dbf_name, "zfcp_%s_san", zfcp_get_busid_by_adapter(adapter)); 1274 sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
1273 adapter->san_dbf = debug_register(dbf_name, dbfsize, 1, 1275 adapter->san_dbf = debug_register(dbf_name, dbfsize, 1,
1274 sizeof(struct zfcp_san_dbf_record)); 1276 sizeof(struct zfcp_san_dbf_record));
1275 if (!adapter->san_dbf) 1277 if (!adapter->san_dbf)
@@ -1279,7 +1281,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
1279 debug_set_level(adapter->san_dbf, 6); 1281 debug_set_level(adapter->san_dbf, 6);
1280 1282
1281 /* debug feature area which records SCSI command failures and recovery */ 1283 /* debug feature area which records SCSI command failures and recovery */
1282 sprintf(dbf_name, "zfcp_%s_scsi", zfcp_get_busid_by_adapter(adapter)); 1284 sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
1283 adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1, 1285 adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1,
1284 sizeof(struct zfcp_scsi_dbf_record)); 1286 sizeof(struct zfcp_scsi_dbf_record));
1285 if (!adapter->scsi_dbf) 1287 if (!adapter->scsi_dbf)
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 5d6b2dff855b..74998ff88e57 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -171,6 +171,7 @@ struct zfcp_san_dbf_record_ct_response {
171 u8 reason_code; 171 u8 reason_code;
172 u8 expl; 172 u8 expl;
173 u8 vendor_unique; 173 u8 vendor_unique;
174 u16 max_res_size;
174 u32 len; 175 u32 len;
175} __attribute__ ((packed)); 176} __attribute__ ((packed));
176 177
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e19e46ae4a68..510662783a6f 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -159,20 +159,6 @@ struct fcp_rscn_element {
159 u32 nport_did:24; 159 u32 nport_did:24;
160} __attribute__((packed)); 160} __attribute__((packed));
161 161
162#define ZFCP_PORT_ADDRESS 0x0
163#define ZFCP_AREA_ADDRESS 0x1
164#define ZFCP_DOMAIN_ADDRESS 0x2
165#define ZFCP_FABRIC_ADDRESS 0x3
166
167#define ZFCP_PORTS_RANGE_PORT 0xFFFFFF
168#define ZFCP_PORTS_RANGE_AREA 0xFFFF00
169#define ZFCP_PORTS_RANGE_DOMAIN 0xFF0000
170#define ZFCP_PORTS_RANGE_FABRIC 0x000000
171
172#define ZFCP_NO_PORTS_PER_AREA 0x100
173#define ZFCP_NO_PORTS_PER_DOMAIN 0x10000
174#define ZFCP_NO_PORTS_PER_FABRIC 0x1000000
175
176/* see fc-ph */ 162/* see fc-ph */
177struct fcp_logo { 163struct fcp_logo {
178 u32 command; 164 u32 command;
@@ -211,7 +197,6 @@ struct zfcp_ls_adisc {
211#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09 197#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09
212#define ZFCP_CT_GID_PN 0x0121 198#define ZFCP_CT_GID_PN 0x0121
213#define ZFCP_CT_GPN_FT 0x0172 199#define ZFCP_CT_GPN_FT 0x0172
214#define ZFCP_CT_MAX_SIZE 0x1020
215#define ZFCP_CT_ACCEPT 0x8002 200#define ZFCP_CT_ACCEPT 0x8002
216#define ZFCP_CT_REJECT 0x8001 201#define ZFCP_CT_REJECT 0x8001
217 202
@@ -258,7 +243,6 @@ struct zfcp_ls_adisc {
258 243
259/* remote port status */ 244/* remote port status */
260#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 245#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
261#define ZFCP_STATUS_PORT_DID_DID 0x00000002
262#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004 246#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
263#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008 247#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
264#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020 248#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
@@ -340,8 +324,6 @@ struct ct_iu_gid_pn_resp {
340 * @wka_port: port where the request is sent to 324 * @wka_port: port where the request is sent to
341 * @req: scatter-gather list for request 325 * @req: scatter-gather list for request
342 * @resp: scatter-gather list for response 326 * @resp: scatter-gather list for response
343 * @req_count: number of elements in request scatter-gather list
344 * @resp_count: number of elements in response scatter-gather list
345 * @handler: handler function (called for response to the request) 327 * @handler: handler function (called for response to the request)
346 * @handler_data: data passed to handler function 328 * @handler_data: data passed to handler function
347 * @timeout: FSF timeout for this request 329 * @timeout: FSF timeout for this request
@@ -352,8 +334,6 @@ struct zfcp_send_ct {
352 struct zfcp_wka_port *wka_port; 334 struct zfcp_wka_port *wka_port;
353 struct scatterlist *req; 335 struct scatterlist *req;
354 struct scatterlist *resp; 336 struct scatterlist *resp;
355 unsigned int req_count;
356 unsigned int resp_count;
357 void (*handler)(unsigned long); 337 void (*handler)(unsigned long);
358 unsigned long handler_data; 338 unsigned long handler_data;
359 int timeout; 339 int timeout;
@@ -378,8 +358,6 @@ struct zfcp_gid_pn_data {
378 * @d_id: destiniation id of port where request is sent to 358 * @d_id: destiniation id of port where request is sent to
379 * @req: scatter-gather list for request 359 * @req: scatter-gather list for request
380 * @resp: scatter-gather list for response 360 * @resp: scatter-gather list for response
381 * @req_count: number of elements in request scatter-gather list
382 * @resp_count: number of elements in response scatter-gather list
383 * @handler: handler function (called for response to the request) 361 * @handler: handler function (called for response to the request)
384 * @handler_data: data passed to handler function 362 * @handler_data: data passed to handler function
385 * @completion: completion for synchronization purposes 363 * @completion: completion for synchronization purposes
@@ -392,8 +370,6 @@ struct zfcp_send_els {
392 u32 d_id; 370 u32 d_id;
393 struct scatterlist *req; 371 struct scatterlist *req;
394 struct scatterlist *resp; 372 struct scatterlist *resp;
395 unsigned int req_count;
396 unsigned int resp_count;
397 void (*handler)(unsigned long); 373 void (*handler)(unsigned long);
398 unsigned long handler_data; 374 unsigned long handler_data;
399 struct completion *completion; 375 struct completion *completion;
@@ -451,7 +427,6 @@ struct zfcp_latencies {
451}; 427};
452 428
453struct zfcp_adapter { 429struct zfcp_adapter {
454 struct list_head list; /* list of adapters */
455 atomic_t refcount; /* reference count */ 430 atomic_t refcount; /* reference count */
456 wait_queue_head_t remove_wq; /* can be used to wait for 431 wait_queue_head_t remove_wq; /* can be used to wait for
457 refcount drop to zero */ 432 refcount drop to zero */
@@ -593,16 +568,11 @@ struct zfcp_fsf_req {
593struct zfcp_data { 568struct zfcp_data {
594 struct scsi_host_template scsi_host_template; 569 struct scsi_host_template scsi_host_template;
595 struct scsi_transport_template *scsi_transport_template; 570 struct scsi_transport_template *scsi_transport_template;
596 struct list_head adapter_list_head; /* head of adapter list */
597 rwlock_t config_lock; /* serialises changes 571 rwlock_t config_lock; /* serialises changes
598 to adapter/port/unit 572 to adapter/port/unit
599 lists */ 573 lists */
600 struct semaphore config_sema; /* serialises configuration 574 struct semaphore config_sema; /* serialises configuration
601 changes */ 575 changes */
602 atomic_t loglevel; /* current loglevel */
603 char init_busid[20];
604 u64 init_wwpn;
605 u64 init_fcp_lun;
606 struct kmem_cache *fsf_req_qtcb_cache; 576 struct kmem_cache *fsf_req_qtcb_cache;
607 struct kmem_cache *sr_buffer_cache; 577 struct kmem_cache *sr_buffer_cache;
608 struct kmem_cache *gid_pn_cache; 578 struct kmem_cache *gid_pn_cache;
@@ -623,8 +593,6 @@ struct zfcp_fsf_req_qtcb {
623#define ZFCP_SET 0x00000100 593#define ZFCP_SET 0x00000100
624#define ZFCP_CLEAR 0x00000200 594#define ZFCP_CLEAR 0x00000200
625 595
626#define zfcp_get_busid_by_adapter(adapter) (dev_name(&adapter->ccw_device->dev))
627
628/* 596/*
629 * Helper functions for request ID management. 597 * Helper functions for request ID management.
630 */ 598 */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 4ed4950d994b..387a3af528ac 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -840,7 +840,6 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
840 return ZFCP_ERP_FAILED; 840 return ZFCP_ERP_FAILED;
841 } 841 }
842 port->d_id = adapter->peer_d_id; 842 port->d_id = adapter->peer_d_id;
843 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
844 return zfcp_erp_port_strategy_open_port(act); 843 return zfcp_erp_port_strategy_open_port(act);
845} 844}
846 845
@@ -871,12 +870,12 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
871 case ZFCP_ERP_STEP_PORT_CLOSING: 870 case ZFCP_ERP_STEP_PORT_CLOSING:
872 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) 871 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
873 return zfcp_erp_open_ptp_port(act); 872 return zfcp_erp_open_ptp_port(act);
874 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) { 873 if (!port->d_id) {
875 queue_work(zfcp_data.work_queue, &port->gid_pn_work); 874 queue_work(zfcp_data.work_queue, &port->gid_pn_work);
876 return ZFCP_ERP_CONTINUES; 875 return ZFCP_ERP_CONTINUES;
877 } 876 }
878 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: 877 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
879 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) { 878 if (!port->d_id) {
880 if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) { 879 if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
881 zfcp_erp_port_failed(port, 26, NULL); 880 zfcp_erp_port_failed(port, 26, NULL);
882 return ZFCP_ERP_EXIT; 881 return ZFCP_ERP_EXIT;
@@ -888,7 +887,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
888 case ZFCP_ERP_STEP_PORT_OPENING: 887 case ZFCP_ERP_STEP_PORT_OPENING:
889 /* D_ID might have changed during open */ 888 /* D_ID might have changed during open */
890 if (p_status & ZFCP_STATUS_COMMON_OPEN) { 889 if (p_status & ZFCP_STATUS_COMMON_OPEN) {
891 if (p_status & ZFCP_STATUS_PORT_DID_DID) 890 if (port->d_id)
892 return ZFCP_ERP_SUCCEEDED; 891 return ZFCP_ERP_SUCCEEDED;
893 else { 892 else {
894 act->step = ZFCP_ERP_STEP_PORT_CLOSING; 893 act->step = ZFCP_ERP_STEP_PORT_CLOSING;
@@ -1385,6 +1384,7 @@ static int zfcp_erp_thread(void *data)
1385 struct list_head *next; 1384 struct list_head *next;
1386 struct zfcp_erp_action *act; 1385 struct zfcp_erp_action *act;
1387 unsigned long flags; 1386 unsigned long flags;
1387 int ignore;
1388 1388
1389 daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev)); 1389 daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev));
1390 /* Block all signals */ 1390 /* Block all signals */
@@ -1407,7 +1407,7 @@ static int zfcp_erp_thread(void *data)
1407 } 1407 }
1408 1408
1409 zfcp_rec_dbf_event_thread_lock(4, adapter); 1409 zfcp_rec_dbf_event_thread_lock(4, adapter);
1410 down_interruptible(&adapter->erp_ready_sem); 1410 ignore = down_interruptible(&adapter->erp_ready_sem);
1411 zfcp_rec_dbf_event_thread_lock(5, adapter); 1411 zfcp_rec_dbf_event_thread_lock(5, adapter);
1412 } 1412 }
1413 1413
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index f009f2a7ec3e..eabdfe24456e 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -11,6 +11,20 @@
11 11
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13 13
14enum rscn_address_format {
15 RSCN_PORT_ADDRESS = 0x0,
16 RSCN_AREA_ADDRESS = 0x1,
17 RSCN_DOMAIN_ADDRESS = 0x2,
18 RSCN_FABRIC_ADDRESS = 0x3,
19};
20
21static u32 rscn_range_mask[] = {
22 [RSCN_PORT_ADDRESS] = 0xFFFFFF,
23 [RSCN_AREA_ADDRESS] = 0xFFFF00,
24 [RSCN_DOMAIN_ADDRESS] = 0xFF0000,
25 [RSCN_FABRIC_ADDRESS] = 0x000000,
26};
27
14struct ct_iu_gpn_ft_req { 28struct ct_iu_gpn_ft_req {
15 struct ct_hdr header; 29 struct ct_hdr header;
16 u8 flags; 30 u8 flags;
@@ -26,9 +40,12 @@ struct gpn_ft_resp_acc {
26 u64 wwpn; 40 u64 wwpn;
27} __attribute__ ((packed)); 41} __attribute__ ((packed));
28 42
29#define ZFCP_GPN_FT_ENTRIES ((PAGE_SIZE - sizeof(struct ct_hdr)) \ 43#define ZFCP_CT_SIZE_ONE_PAGE (PAGE_SIZE - sizeof(struct ct_hdr))
30 / sizeof(struct gpn_ft_resp_acc)) 44#define ZFCP_GPN_FT_ENTRIES (ZFCP_CT_SIZE_ONE_PAGE \
45 / sizeof(struct gpn_ft_resp_acc))
31#define ZFCP_GPN_FT_BUFFERS 4 46#define ZFCP_GPN_FT_BUFFERS 4
47#define ZFCP_GPN_FT_MAX_SIZE (ZFCP_GPN_FT_BUFFERS * PAGE_SIZE \
48 - sizeof(struct ct_hdr))
32#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1) 49#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
33 50
34struct ct_iu_gpn_ft_resp { 51struct ct_iu_gpn_ft_resp {
@@ -160,22 +177,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
160 for (i = 1; i < no_entries; i++) { 177 for (i = 1; i < no_entries; i++) {
161 /* skip head and start with 1st element */ 178 /* skip head and start with 1st element */
162 fcp_rscn_element++; 179 fcp_rscn_element++;
163 switch (fcp_rscn_element->addr_format) { 180 range_mask = rscn_range_mask[fcp_rscn_element->addr_format];
164 case ZFCP_PORT_ADDRESS:
165 range_mask = ZFCP_PORTS_RANGE_PORT;
166 break;
167 case ZFCP_AREA_ADDRESS:
168 range_mask = ZFCP_PORTS_RANGE_AREA;
169 break;
170 case ZFCP_DOMAIN_ADDRESS:
171 range_mask = ZFCP_PORTS_RANGE_DOMAIN;
172 break;
173 case ZFCP_FABRIC_ADDRESS:
174 range_mask = ZFCP_PORTS_RANGE_FABRIC;
175 break;
176 default:
177 continue;
178 }
179 _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element); 181 _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element);
180 } 182 }
181 schedule_work(&fsf_req->adapter->scan_work); 183 schedule_work(&fsf_req->adapter->scan_work);
@@ -266,7 +268,6 @@ static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
266 return; 268 return;
267 /* looks like a valid d_id */ 269 /* looks like a valid d_id */
268 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; 270 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
269 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
270} 271}
271 272
272int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, 273int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
@@ -284,8 +285,6 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
284 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; 285 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
285 gid_pn->ct.req = &gid_pn->req; 286 gid_pn->ct.req = &gid_pn->req;
286 gid_pn->ct.resp = &gid_pn->resp; 287 gid_pn->ct.resp = &gid_pn->resp;
287 gid_pn->ct.req_count = 1;
288 gid_pn->ct.resp_count = 1;
289 sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req, 288 sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
290 sizeof(struct ct_iu_gid_pn_req)); 289 sizeof(struct ct_iu_gid_pn_req));
291 sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp, 290 sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp,
@@ -297,7 +296,7 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
297 gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER; 296 gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER;
298 gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS; 297 gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
299 gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN; 298 gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
300 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE; 299 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4;
301 gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn; 300 gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
302 301
303 init_completion(&compl_rec.done); 302 init_completion(&compl_rec.done);
@@ -407,8 +406,6 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
407 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc, 406 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
408 sizeof(struct zfcp_ls_adisc)); 407 sizeof(struct zfcp_ls_adisc));
409 408
410 adisc->els.req_count = 1;
411 adisc->els.resp_count = 1;
412 adisc->els.adapter = adapter; 409 adisc->els.adapter = adapter;
413 adisc->els.port = port; 410 adisc->els.port = port;
414 adisc->els.d_id = port->d_id; 411 adisc->els.d_id = port->d_id;
@@ -448,17 +445,17 @@ void zfcp_test_link(struct zfcp_port *port)
448 zfcp_erp_port_forced_reopen(port, 0, 65, NULL); 445 zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
449} 446}
450 447
451static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft) 448static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
452{ 449{
453 struct scatterlist *sg = &gpn_ft->sg_req; 450 struct scatterlist *sg = &gpn_ft->sg_req;
454 451
455 kfree(sg_virt(sg)); /* free request buffer */ 452 kfree(sg_virt(sg)); /* free request buffer */
456 zfcp_sg_free_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS); 453 zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
457 454
458 kfree(gpn_ft); 455 kfree(gpn_ft);
459} 456}
460 457
461static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void) 458static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num)
462{ 459{
463 struct zfcp_gpn_ft *gpn_ft; 460 struct zfcp_gpn_ft *gpn_ft;
464 struct ct_iu_gpn_ft_req *req; 461 struct ct_iu_gpn_ft_req *req;
@@ -475,8 +472,8 @@ static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void)
475 } 472 }
476 sg_init_one(&gpn_ft->sg_req, req, sizeof(*req)); 473 sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
477 474
478 if (zfcp_sg_setup_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS)) { 475 if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
479 zfcp_free_sg_env(gpn_ft); 476 zfcp_free_sg_env(gpn_ft, buf_num);
480 gpn_ft = NULL; 477 gpn_ft = NULL;
481 } 478 }
482out: 479out:
@@ -485,7 +482,8 @@ out:
485 482
486 483
487static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, 484static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
488 struct zfcp_adapter *adapter) 485 struct zfcp_adapter *adapter,
486 int max_bytes)
489{ 487{
490 struct zfcp_send_ct *ct = &gpn_ft->ct; 488 struct zfcp_send_ct *ct = &gpn_ft->ct;
491 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); 489 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
@@ -498,8 +496,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
498 req->header.gs_subtype = ZFCP_CT_NAME_SERVER; 496 req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
499 req->header.options = ZFCP_CT_SYNCHRONOUS; 497 req->header.options = ZFCP_CT_SYNCHRONOUS;
500 req->header.cmd_rsp_code = ZFCP_CT_GPN_FT; 498 req->header.cmd_rsp_code = ZFCP_CT_GPN_FT;
501 req->header.max_res_size = (sizeof(struct gpn_ft_resp_acc) * 499 req->header.max_res_size = max_bytes / 4;
502 (ZFCP_GPN_FT_MAX_ENTRIES - 1)) >> 2;
503 req->flags = 0; 500 req->flags = 0;
504 req->domain_id_scope = 0; 501 req->domain_id_scope = 0;
505 req->area_id_scope = 0; 502 req->area_id_scope = 0;
@@ -512,8 +509,6 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
512 ct->timeout = 10; 509 ct->timeout = 10;
513 ct->req = &gpn_ft->sg_req; 510 ct->req = &gpn_ft->sg_req;
514 ct->resp = gpn_ft->sg_resp; 511 ct->resp = gpn_ft->sg_resp;
515 ct->req_count = 1;
516 ct->resp_count = ZFCP_GPN_FT_BUFFERS;
517 512
518 init_completion(&compl_rec.done); 513 init_completion(&compl_rec.done);
519 compl_rec.handler = NULL; 514 compl_rec.handler = NULL;
@@ -540,7 +535,7 @@ static void zfcp_validate_port(struct zfcp_port *port)
540 zfcp_port_dequeue(port); 535 zfcp_port_dequeue(port);
541} 536}
542 537
543static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft) 538static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
544{ 539{
545 struct zfcp_send_ct *ct = &gpn_ft->ct; 540 struct zfcp_send_ct *ct = &gpn_ft->ct;
546 struct scatterlist *sg = gpn_ft->sg_resp; 541 struct scatterlist *sg = gpn_ft->sg_resp;
@@ -560,13 +555,17 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
560 return -EIO; 555 return -EIO;
561 } 556 }
562 557
563 if (hdr->max_res_size) 558 if (hdr->max_res_size) {
559 dev_warn(&adapter->ccw_device->dev,
560 "The name server reported %d words residual data\n",
561 hdr->max_res_size);
564 return -E2BIG; 562 return -E2BIG;
563 }
565 564
566 down(&zfcp_data.config_sema); 565 down(&zfcp_data.config_sema);
567 566
568 /* first entry is the header */ 567 /* first entry is the header */
569 for (x = 1; x < ZFCP_GPN_FT_MAX_ENTRIES && !last; x++) { 568 for (x = 1; x < max_entries && !last; x++) {
570 if (x % (ZFCP_GPN_FT_ENTRIES + 1)) 569 if (x % (ZFCP_GPN_FT_ENTRIES + 1))
571 acc++; 570 acc++;
572 else 571 else
@@ -589,7 +588,6 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
589 } 588 }
590 589
591 port = zfcp_port_enqueue(adapter, acc->wwpn, 590 port = zfcp_port_enqueue(adapter, acc->wwpn,
592 ZFCP_STATUS_PORT_DID_DID |
593 ZFCP_STATUS_COMMON_NOESC, d_id); 591 ZFCP_STATUS_COMMON_NOESC, d_id);
594 if (IS_ERR(port)) 592 if (IS_ERR(port))
595 ret = PTR_ERR(port); 593 ret = PTR_ERR(port);
@@ -612,6 +610,12 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
612{ 610{
613 int ret, i; 611 int ret, i;
614 struct zfcp_gpn_ft *gpn_ft; 612 struct zfcp_gpn_ft *gpn_ft;
613 int chain, max_entries, buf_num, max_bytes;
614
615 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
616 buf_num = chain ? ZFCP_GPN_FT_BUFFERS : 1;
617 max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES;
618 max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE;
615 619
616 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT) 620 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
617 return 0; 621 return 0;
@@ -620,23 +624,23 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
620 if (ret) 624 if (ret)
621 return ret; 625 return ret;
622 626
623 gpn_ft = zfcp_alloc_sg_env(); 627 gpn_ft = zfcp_alloc_sg_env(buf_num);
624 if (!gpn_ft) { 628 if (!gpn_ft) {
625 ret = -ENOMEM; 629 ret = -ENOMEM;
626 goto out; 630 goto out;
627 } 631 }
628 632
629 for (i = 0; i < 3; i++) { 633 for (i = 0; i < 3; i++) {
630 ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter); 634 ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter, max_bytes);
631 if (!ret) { 635 if (!ret) {
632 ret = zfcp_scan_eval_gpn_ft(gpn_ft); 636 ret = zfcp_scan_eval_gpn_ft(gpn_ft, max_entries);
633 if (ret == -EAGAIN) 637 if (ret == -EAGAIN)
634 ssleep(1); 638 ssleep(1);
635 else 639 else
636 break; 640 break;
637 } 641 }
638 } 642 }
639 zfcp_free_sg_env(gpn_ft); 643 zfcp_free_sg_env(gpn_ft, buf_num);
640out: 644out:
641 zfcp_wka_port_put(&adapter->nsp); 645 zfcp_wka_port_put(&adapter->nsp);
642 return ret; 646 return ret;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9c72e083559d..e6416f8541b0 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -644,38 +644,38 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
644 } 644 }
645} 645}
646 646
647static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter) 647static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
648{ 648{
649 struct zfcp_qdio_queue *req_q = &adapter->req_q; 649 if (atomic_read(&adapter->req_q.count) > 0)
650
651 spin_lock_bh(&adapter->req_q_lock);
652 if (atomic_read(&req_q->count))
653 return 1; 650 return 1;
654 spin_unlock_bh(&adapter->req_q_lock); 651 atomic_inc(&adapter->qdio_outb_full);
655 return 0; 652 return 0;
656} 653}
657 654
658static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
659{
660 unsigned int count = atomic_read(&adapter->req_q.count);
661 if (!count)
662 atomic_inc(&adapter->qdio_outb_full);
663 return count > 0;
664}
665
666static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) 655static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
656 __releases(&adapter->req_q_lock)
657 __acquires(&adapter->req_q_lock)
667{ 658{
659 struct zfcp_qdio_queue *req_q = &adapter->req_q;
668 long ret; 660 long ret;
669 661
662 if (atomic_read(&req_q->count) <= -REQUEST_LIST_SIZE)
663 return -EIO;
664 if (atomic_read(&req_q->count) > 0)
665 return 0;
666
667 atomic_dec(&req_q->count);
670 spin_unlock_bh(&adapter->req_q_lock); 668 spin_unlock_bh(&adapter->req_q_lock);
671 ret = wait_event_interruptible_timeout(adapter->request_wq, 669 ret = wait_event_interruptible_timeout(adapter->request_wq,
672 zfcp_fsf_sbal_check(adapter), 5 * HZ); 670 atomic_read(&req_q->count) >= 0,
671 5 * HZ);
672 spin_lock_bh(&adapter->req_q_lock);
673 atomic_inc(&req_q->count);
674
673 if (ret > 0) 675 if (ret > 0)
674 return 0; 676 return 0;
675 if (!ret) 677 if (!ret)
676 atomic_inc(&adapter->qdio_outb_full); 678 atomic_inc(&adapter->qdio_outb_full);
677
678 spin_lock_bh(&adapter->req_q_lock);
679 return -EIO; 679 return -EIO;
680} 680}
681 681
@@ -1013,12 +1013,29 @@ skip_fsfstatus:
1013 send_ct->handler(send_ct->handler_data); 1013 send_ct->handler(send_ct->handler_data);
1014} 1014}
1015 1015
1016static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req, 1016static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1017 struct scatterlist *sg_req, 1017 struct scatterlist *sg_req,
1018 struct scatterlist *sg_resp, int max_sbals) 1018 struct scatterlist *sg_resp,
1019 int max_sbals)
1019{ 1020{
1021 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(req);
1022 u32 feat = req->adapter->adapter_features;
1020 int bytes; 1023 int bytes;
1021 1024
1025 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
1026 if (sg_req->length > PAGE_SIZE || sg_resp->length > PAGE_SIZE ||
1027 !sg_is_last(sg_req) || !sg_is_last(sg_resp))
1028 return -EOPNOTSUPP;
1029
1030 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1031 sbale[2].addr = sg_virt(sg_req);
1032 sbale[2].length = sg_req->length;
1033 sbale[3].addr = sg_virt(sg_resp);
1034 sbale[3].length = sg_resp->length;
1035 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1036 return 0;
1037 }
1038
1022 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, 1039 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1023 sg_req, max_sbals); 1040 sg_req, max_sbals);
1024 if (bytes <= 0) 1041 if (bytes <= 0)
@@ -1060,8 +1077,8 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1060 goto out; 1077 goto out;
1061 } 1078 }
1062 1079
1063 ret = zfcp_fsf_setup_sbals(req, ct->req, ct->resp, 1080 ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp,
1064 FSF_MAX_SBALS_PER_REQ); 1081 FSF_MAX_SBALS_PER_REQ);
1065 if (ret) 1082 if (ret)
1066 goto failed_send; 1083 goto failed_send;
1067 1084
@@ -1171,7 +1188,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1171 goto out; 1188 goto out;
1172 } 1189 }
1173 1190
1174 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 2); 1191 ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2);
1175 1192
1176 if (ret) 1193 if (ret)
1177 goto failed_send; 1194 goto failed_send;
@@ -1406,13 +1423,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1406 switch (header->fsf_status_qual.word[0]) { 1423 switch (header->fsf_status_qual.word[0]) {
1407 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1424 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1408 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1425 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1409 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1410 break;
1411 case FSF_SQ_NO_RETRY_POSSIBLE: 1426 case FSF_SQ_NO_RETRY_POSSIBLE:
1412 dev_warn(&req->adapter->ccw_device->dev,
1413 "Remote port 0x%016Lx could not be opened\n",
1414 (unsigned long long)port->wwpn);
1415 zfcp_erp_port_failed(port, 32, req);
1416 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1427 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1417 break; 1428 break;
1418 } 1429 }
@@ -1440,10 +1451,10 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1440 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1451 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1441 */ 1452 */
1442 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; 1453 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
1443 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) { 1454 if (req->qtcb->bottom.support.els1_length >=
1455 FSF_PLOGI_MIN_LEN) {
1444 if (plogi->serv_param.wwpn != port->wwpn) 1456 if (plogi->serv_param.wwpn != port->wwpn)
1445 atomic_clear_mask(ZFCP_STATUS_PORT_DID_DID, 1457 port->d_id = 0;
1446 &port->status);
1447 else { 1458 else {
1448 port->wwnn = plogi->serv_param.wwnn; 1459 port->wwnn = plogi->serv_param.wwnn;
1449 zfcp_fc_plogi_evaluate(port, plogi); 1460 zfcp_fc_plogi_evaluate(port, plogi);
@@ -1907,7 +1918,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1907 dev_err(&adapter->ccw_device->dev, 1918 dev_err(&adapter->ccw_device->dev,
1908 "Shared read-write access not " 1919 "Shared read-write access not "
1909 "supported (unit 0x%016Lx, port " 1920 "supported (unit 0x%016Lx, port "
1910 "0x%016Lx\n)", 1921 "0x%016Lx)\n",
1911 (unsigned long long)unit->fcp_lun, 1922 (unsigned long long)unit->fcp_lun,
1912 (unsigned long long)unit->port->wwpn); 1923 (unsigned long long)unit->port->wwpn);
1913 zfcp_erp_unit_failed(unit, 36, req); 1924 zfcp_erp_unit_failed(unit, 36, req);
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index fa2a31780611..8bb200252347 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -164,6 +164,7 @@
164#define FSF_FEATURE_LUN_SHARING 0x00000004 164#define FSF_FEATURE_LUN_SHARING 0x00000004
165#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008 165#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008
166#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010 166#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
167#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
167#define FSF_FEATURE_UPDATE_ALERT 0x00000100 168#define FSF_FEATURE_UPDATE_ALERT 0x00000100
168#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 169#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
169 170
@@ -322,6 +323,7 @@ struct fsf_nport_serv_param {
322 u8 vendor_version_level[16]; 323 u8 vendor_version_level[16];
323} __attribute__ ((packed)); 324} __attribute__ ((packed));
324 325
326#define FSF_PLOGI_MIN_LEN 112
325struct fsf_plogi { 327struct fsf_plogi {
326 u32 code; 328 u32 code;
327 struct fsf_nport_serv_param serv_param; 329 struct fsf_nport_serv_param serv_param;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index d3b55fb66f13..33e0a206a0a4 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -112,7 +112,7 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
112 * corruption and must stop the machine immediatly. 112 * corruption and must stop the machine immediatly.
113 */ 113 */
114 panic("error: unknown request id (%lx) on adapter %s.\n", 114 panic("error: unknown request id (%lx) on adapter %s.\n",
115 req_id, zfcp_get_busid_by_adapter(adapter)); 115 req_id, dev_name(&adapter->ccw_device->dev));
116 116
117 zfcp_reqlist_remove(adapter, fsf_req); 117 zfcp_reqlist_remove(adapter, fsf_req);
118 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 118 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
@@ -392,7 +392,7 @@ int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
392 392
393 init_data->cdev = adapter->ccw_device; 393 init_data->cdev = adapter->ccw_device;
394 init_data->q_format = QDIO_ZFCP_QFMT; 394 init_data->q_format = QDIO_ZFCP_QFMT;
395 memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); 395 memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8);
396 ASCEBC(init_data->adapter_name, 8); 396 ASCEBC(init_data->adapter_name, 8);
397 init_data->qib_param_field_format = 0; 397 init_data->qib_param_field_format = 0;
398 init_data->qib_param_field = NULL; 398 init_data->qib_param_field = NULL;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 403ecad48d4b..152d4aa9354f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -352,6 +352,8 @@ config ISCSI_TCP
352 352
353 http://open-iscsi.org 353 http://open-iscsi.org
354 354
355source "drivers/scsi/cxgb3i/Kconfig"
356
355config SGIWD93_SCSI 357config SGIWD93_SCSI
356 tristate "SGI WD93C93 SCSI Driver" 358 tristate "SGI WD93C93 SCSI Driver"
357 depends on SGI_HAS_WD93 && SCSI 359 depends on SGI_HAS_WD93 && SCSI
@@ -603,6 +605,19 @@ config SCSI_FLASHPOINT
603 substantial, so users of MultiMaster Host Adapters may not 605 substantial, so users of MultiMaster Host Adapters may not
604 wish to include it. 606 wish to include it.
605 607
608config LIBFC
609 tristate "LibFC module"
610 select SCSI_FC_ATTRS
611 ---help---
612 Fibre Channel library module
613
614config FCOE
615 tristate "FCoE module"
616 depends on PCI
617 select LIBFC
618 ---help---
619 Fibre Channel over Ethernet module
620
606config SCSI_DMX3191D 621config SCSI_DMX3191D
607 tristate "DMX3191D SCSI support" 622 tristate "DMX3191D SCSI support"
608 depends on PCI && SCSI 623 depends on PCI && SCSI
@@ -1357,6 +1372,13 @@ config SCSI_LPFC
1357 This lpfc driver supports the Emulex LightPulse 1372 This lpfc driver supports the Emulex LightPulse
1358 Family of Fibre Channel PCI host adapters. 1373 Family of Fibre Channel PCI host adapters.
1359 1374
1375config SCSI_LPFC_DEBUG_FS
1376 bool "Emulex LightPulse Fibre Channel debugfs Support"
1377 depends on SCSI_LPFC && DEBUG_FS
1378 help
1379 This makes debugging infomation from the lpfc driver
1380 available via the debugfs filesystem.
1381
1360config SCSI_SIM710 1382config SCSI_SIM710
1361 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" 1383 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
1362 depends on (EISA || MCA) && SCSI 1384 depends on (EISA || MCA) && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 72fd5043cfa1..1410697257cb 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -36,7 +36,9 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
36obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o 36obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
37obj-$(CONFIG_SCSI_DH) += device_handler/ 37obj-$(CONFIG_SCSI_DH) += device_handler/
38 38
39obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o 39obj-$(CONFIG_LIBFC) += libfc/
40obj-$(CONFIG_FCOE) += fcoe/
41obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
40obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 42obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
41obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o 43obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
42obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o 44obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
@@ -124,6 +126,7 @@ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
124obj-$(CONFIG_SCSI_STEX) += stex.o 126obj-$(CONFIG_SCSI_STEX) += stex.o
125obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 127obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
126obj-$(CONFIG_PS3_ROM) += ps3rom.o 128obj-$(CONFIG_PS3_ROM) += ps3rom.o
129obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
127 130
128obj-$(CONFIG_ARM) += arm/ 131obj-$(CONFIG_ARM) += arm/
129 132
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index eeddbd19eba5..f92da9fd5f20 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -30,7 +30,7 @@
30 * $Log: NCR5380.c,v $ 30 * $Log: NCR5380.c,v $
31 31
32 * Revision 1.10 1998/9/2 Alan Cox 32 * Revision 1.10 1998/9/2 Alan Cox
33 * (alan@redhat.com) 33 * (alan@lxorguk.ukuu.org.uk)
34 * Fixed up the timer lockups reported so far. Things still suck. Looking 34 * Fixed up the timer lockups reported so far. Things still suck. Looking
35 * forward to 2.3 and per device request queues. Then it'll be possible to 35 * forward to 2.3 and per device request queues. Then it'll be possible to
36 * SMP thread this beast and improve life no end. 36 * SMP thread this beast and improve life no end.
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 84bb61628372..3c298c7253ee 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -54,7 +54,7 @@
54 * 9/28/04 Christoph Hellwig <hch@lst.de> 54 * 9/28/04 Christoph Hellwig <hch@lst.de>
55 * - merge the two source files 55 * - merge the two source files
56 * - remove internal queueing code 56 * - remove internal queueing code
57 * 14/06/07 Alan Cox <alan@redhat.com> 57 * 14/06/07 Alan Cox <alan@lxorguk.ukuu.org.uk>
58 * - Grand cleanup and Linuxisation 58 * - Grand cleanup and Linuxisation
59 */ 59 */
60 60
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 8abfd06b5a72..90d1d0878cb8 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index a7355260cfcf..0391d759dfdb 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
@@ -90,14 +90,24 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
90 if (size < le16_to_cpu(kfib->header.SenderSize)) 90 if (size < le16_to_cpu(kfib->header.SenderSize))
91 size = le16_to_cpu(kfib->header.SenderSize); 91 size = le16_to_cpu(kfib->header.SenderSize);
92 if (size > dev->max_fib_size) { 92 if (size > dev->max_fib_size) {
93 dma_addr_t daddr;
94
93 if (size > 2048) { 95 if (size > 2048) {
94 retval = -EINVAL; 96 retval = -EINVAL;
95 goto cleanup; 97 goto cleanup;
96 } 98 }
99
100 kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
101 if (!kfib) {
102 retval = -ENOMEM;
103 goto cleanup;
104 }
105
97 /* Highjack the hw_fib */ 106 /* Highjack the hw_fib */
98 hw_fib = fibptr->hw_fib_va; 107 hw_fib = fibptr->hw_fib_va;
99 hw_fib_pa = fibptr->hw_fib_pa; 108 hw_fib_pa = fibptr->hw_fib_pa;
100 fibptr->hw_fib_va = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa); 109 fibptr->hw_fib_va = kfib;
110 fibptr->hw_fib_pa = daddr;
101 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); 111 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
102 memcpy(kfib, hw_fib, dev->max_fib_size); 112 memcpy(kfib, hw_fib, dev->max_fib_size);
103 } 113 }
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index cbac06355107..16310443b55a 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 289304aab690..d24c2670040b 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 933f208eedba..abc9ef5d1b10 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 94acbeed4e7c..36d8aab97efe 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 8cd6588a83e3..16d8db550027 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 073208b0f622..f70d9f8e79e5 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index fc1a55796a89..b6a3c5c187b6 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 399fe559e4de..2f602720193e 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -13425,8 +13425,7 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
13425 } 13425 }
13426 13426
13427 boardp->asc_n_io_port = pci_resource_len(pdev, 1); 13427 boardp->asc_n_io_port = pci_resource_len(pdev, 1);
13428 boardp->ioremap_addr = ioremap(pci_resource_start(pdev, 1), 13428 boardp->ioremap_addr = pci_ioremap_bar(pdev, 1);
13429 boardp->asc_n_io_port);
13430 if (!boardp->ioremap_addr) { 13429 if (!boardp->ioremap_addr) {
13431 shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) " 13430 shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) "
13432 "returned NULL\n", 13431 "returned NULL\n",
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 7c45d88a205b..ed0e3e55652a 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -22,7 +22,7 @@
22 * aha1740_makecode may still need even more work 22 * aha1740_makecode may still need even more work
23 * if it doesn't work for your devices, take a look. 23 * if it doesn't work for your devices, take a look.
24 * 24 *
25 * Reworked for new_eh and new locking by Alan Cox <alan@redhat.com> 25 * Reworked for new_eh and new locking by Alan Cox <alan@lxorguk.ukuu.org.uk>
26 * 26 *
27 * Converted to EISA and generic DMA APIs by Marc Zyngier 27 * Converted to EISA and generic DMA APIs by Marc Zyngier
28 * <maz@wild-wind.fr.eu.org>, 4/2003. 28 * <maz@wild-wind.fr.eu.org>, 4/2003.
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f91f79c8007d..106c04d2d793 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -235,7 +235,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
235 uint32_t intmask_org; 235 uint32_t intmask_org;
236 int i, j; 236 int i, j;
237 237
238 acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); 238 acb->pmuA = pci_ioremap_bar(pdev, 0);
239 if (!acb->pmuA) { 239 if (!acb->pmuA) {
240 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", 240 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
241 acb->host->host_no); 241 acb->host->host_no);
@@ -329,13 +329,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
329 reg = (struct MessageUnit_B *)(dma_coherent + 329 reg = (struct MessageUnit_B *)(dma_coherent +
330 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); 330 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
331 acb->pmuB = reg; 331 acb->pmuB = reg;
332 mem_base0 = ioremap(pci_resource_start(pdev, 0), 332 mem_base0 = pci_ioremap_bar(pdev, 0);
333 pci_resource_len(pdev, 0));
334 if (!mem_base0) 333 if (!mem_base0)
335 goto out; 334 goto out;
336 335
337 mem_base1 = ioremap(pci_resource_start(pdev, 2), 336 mem_base1 = pci_ioremap_bar(pdev, 2);
338 pci_resource_len(pdev, 2));
339 if (!mem_base1) { 337 if (!mem_base1) {
340 iounmap(mem_base0); 338 iounmap(mem_base0);
341 goto out; 339 goto out;
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 7d311541c76c..20ca0a6374b5 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Copyright (C) 1997 Wu Ching Chen 2 * Copyright (C) 1997 Wu Ching Chen
3 * 2.1.x update (C) 1998 Krzysztof G. Baranowski 3 * 2.1.x update (C) 1998 Krzysztof G. Baranowski
4 * 2.5.x update (C) 2002 Red Hat <alan@redhat.com> 4 * 2.5.x update (C) 2002 Red Hat
5 * 2.6.x update (C) 2004 Red Hat <alan@redhat.com> 5 * 2.6.x update (C) 2004 Red Hat
6 * 6 *
7 * Marcelo Tosatti <marcelo@conectiva.com.br> : SMP fixes 7 * Marcelo Tosatti <marcelo@conectiva.com.br> : SMP fixes
8 * 8 *
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 88ecf94ad979..af9725409f43 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -190,7 +190,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
190 190
191 result = scsi_execute_req(ch->device, cmd, direction, buffer, 191 result = scsi_execute_req(ch->device, cmd, direction, buffer,
192 buflength, &sshdr, timeout * HZ, 192 buflength, &sshdr, timeout * HZ,
193 MAX_RETRIES); 193 MAX_RETRIES, NULL);
194 194
195 dprintk("result: 0x%x\n",result); 195 dprintk("result: 0x%x\n",result);
196 if (driver_byte(result) & DRIVER_SENSE) { 196 if (driver_byte(result) & DRIVER_SENSE) {
diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgb3i/Kbuild
new file mode 100644
index 000000000000..ee7d6d2f9c3b
--- /dev/null
+++ b/drivers/scsi/cxgb3i/Kbuild
@@ -0,0 +1,4 @@
1EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3
2
3cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o
4obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i_ddp.o cxgb3i.o
diff --git a/drivers/scsi/cxgb3i/Kconfig b/drivers/scsi/cxgb3i/Kconfig
new file mode 100644
index 000000000000..bfdcaf5c9c57
--- /dev/null
+++ b/drivers/scsi/cxgb3i/Kconfig
@@ -0,0 +1,7 @@
1config SCSI_CXGB3_ISCSI
2 tristate "Chelsio S3xx iSCSI support"
3 depends on CHELSIO_T3_DEPENDS
4 select CHELSIO_T3
5 select SCSI_ISCSI_ATTRS
6 ---help---
7 This driver supports iSCSI offload for the Chelsio S3 series devices.
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
new file mode 100644
index 000000000000..fde6e4c634e7
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -0,0 +1,139 @@
1/*
2 * cxgb3i.h: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_H__
14#define __CXGB3I_H__
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/list.h>
21#include <linux/netdevice.h>
22#include <linux/scatterlist.h>
23#include <scsi/libiscsi_tcp.h>
24
25/* from cxgb3 LLD */
26#include "common.h"
27#include "t3_cpl.h"
28#include "t3cdev.h"
29#include "cxgb3_ctl_defs.h"
30#include "cxgb3_offload.h"
31#include "firmware_exports.h"
32
33#include "cxgb3i_offload.h"
34#include "cxgb3i_ddp.h"
35
36#define CXGB3I_SCSI_QDEPTH_DFLT 128
37#define CXGB3I_MAX_TARGET CXGB3I_MAX_CONN
38#define CXGB3I_MAX_LUN 512
39#define ISCSI_PDU_NONPAYLOAD_MAX \
40 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE)
41
42struct cxgb3i_adapter;
43struct cxgb3i_hba;
44struct cxgb3i_endpoint;
45
46/**
47 * struct cxgb3i_hba - cxgb3i iscsi structure (per port)
48 *
49 * @snic: cxgb3i adapter containing this port
50 * @ndev: pointer to netdev structure
51 * @shost: pointer to scsi host structure
52 */
53struct cxgb3i_hba {
54 struct cxgb3i_adapter *snic;
55 struct net_device *ndev;
56 struct Scsi_Host *shost;
57};
58
59/**
60 * struct cxgb3i_adapter - cxgb3i adapter structure (per pci)
61 *
62 * @listhead: list head to link elements
63 * @lock: lock for this structure
64 * @tdev: pointer to t3cdev used by cxgb3 driver
65 * @pdev: pointer to pci dev
66 * @hba_cnt: # of hbas (the same as # of ports)
67 * @hba: all the hbas on this adapter
68 * @tx_max_size: max. tx packet size supported
69 * @rx_max_size: max. rx packet size supported
70 * @tag_format: ddp tag format settings
71 */
72struct cxgb3i_adapter {
73 struct list_head list_head;
74 spinlock_t lock;
75 struct t3cdev *tdev;
76 struct pci_dev *pdev;
77 unsigned char hba_cnt;
78 struct cxgb3i_hba *hba[MAX_NPORTS];
79
80 unsigned int tx_max_size;
81 unsigned int rx_max_size;
82
83 struct cxgb3i_tag_format tag_format;
84};
85
86/**
87 * struct cxgb3i_conn - cxgb3i iscsi connection
88 *
89 * @listhead: list head to link elements
90 * @cep: pointer to iscsi_endpoint structure
91 * @conn: pointer to iscsi_conn structure
92 * @hba: pointer to the hba this conn. is going through
93 * @task_idx_bits: # of bits needed for session->cmds_max
94 */
95struct cxgb3i_conn {
96 struct list_head list_head;
97 struct cxgb3i_endpoint *cep;
98 struct iscsi_conn *conn;
99 struct cxgb3i_hba *hba;
100 unsigned int task_idx_bits;
101};
102
103/**
104 * struct cxgb3i_endpoint - iscsi tcp endpoint
105 *
106 * @c3cn: the h/w tcp connection representation
107 * @hba: pointer to the hba this conn. is going through
108 * @cconn: pointer to the associated cxgb3i iscsi connection
109 */
110struct cxgb3i_endpoint {
111 struct s3_conn *c3cn;
112 struct cxgb3i_hba *hba;
113 struct cxgb3i_conn *cconn;
114};
115
116int cxgb3i_iscsi_init(void);
117void cxgb3i_iscsi_cleanup(void);
118
119struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *);
120void cxgb3i_adapter_remove(struct t3cdev *);
121int cxgb3i_adapter_ulp_init(struct cxgb3i_adapter *);
122void cxgb3i_adapter_ulp_cleanup(struct cxgb3i_adapter *);
123
124struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
125struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
126 struct net_device *);
127void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
128
129int cxgb3i_pdu_init(void);
130void cxgb3i_pdu_cleanup(void);
131void cxgb3i_conn_cleanup_task(struct iscsi_task *);
132int cxgb3i_conn_alloc_pdu(struct iscsi_task *, u8);
133int cxgb3i_conn_init_pdu(struct iscsi_task *, unsigned int, unsigned int);
134int cxgb3i_conn_xmit_pdu(struct iscsi_task *);
135
136void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt);
137int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt);
138
139#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
new file mode 100644
index 000000000000..1a41f04264f7
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -0,0 +1,770 @@
1/*
2 * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#include <linux/skbuff.h>
14
15/* from cxgb3 LLD */
16#include "common.h"
17#include "t3_cpl.h"
18#include "t3cdev.h"
19#include "cxgb3_ctl_defs.h"
20#include "cxgb3_offload.h"
21#include "firmware_exports.h"
22
23#include "cxgb3i_ddp.h"
24
25#define DRV_MODULE_NAME "cxgb3i_ddp"
26#define DRV_MODULE_VERSION "1.0.0"
27#define DRV_MODULE_RELDATE "Dec. 1, 2008"
28
29static char version[] =
30 "Chelsio S3xx iSCSI DDP " DRV_MODULE_NAME
31 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
32
33MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
34MODULE_DESCRIPTION("cxgb3i ddp pagepod manager");
35MODULE_LICENSE("GPL");
36MODULE_VERSION(DRV_MODULE_VERSION);
37
38#define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
39#define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
40#define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
41
42#ifdef __DEBUG_CXGB3I_DDP__
43#define ddp_log_debug(fmt, args...) \
44 printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
45#else
46#define ddp_log_debug(fmt...)
47#endif
48
49/*
50 * iSCSI Direct Data Placement
51 *
52 * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
53 * pre-posted final destination host-memory buffers based on the Initiator
54 * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
55 *
56 * The host memory address is programmed into h/w in the format of pagepod
57 * entries.
58 * The location of the pagepod entry is encoded into ddp tag which is used or
59 * is the base for ITT/TTT.
60 */
61
62#define DDP_PGIDX_MAX 4
63#define DDP_THRESHOLD 2048
64static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
65static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
66static unsigned char page_idx = DDP_PGIDX_MAX;
67
68static LIST_HEAD(cxgb3i_ddp_list);
69static DEFINE_RWLOCK(cxgb3i_ddp_rwlock);
70
71/*
72 * functions to program the pagepod in h/w
73 */
74static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
75{
76 struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
77
78 req->wr.wr_lo = 0;
79 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
80 req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
81 V_ULPTX_CMD(ULP_MEM_WRITE));
82 req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
83 V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
84}
85
86static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr,
87 unsigned int idx, unsigned int npods,
88 struct cxgb3i_gather_list *gl)
89{
90 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
91 int i;
92
93 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
94 struct sk_buff *skb = ddp->gl_skb[idx];
95 struct pagepod *ppod;
96 int j, pidx;
97
98 /* hold on to the skb until we clear the ddp mapping */
99 skb_get(skb);
100
101 ulp_mem_io_set_hdr(skb, pm_addr);
102 ppod = (struct pagepod *)
103 (skb->head + sizeof(struct ulp_mem_io));
104 memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod));
105 for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx)
106 ppod->addr[j] = pidx < gl->nelem ?
107 cpu_to_be64(gl->phys_addr[pidx]) : 0UL;
108
109 skb->priority = CPL_PRIORITY_CONTROL;
110 cxgb3_ofld_send(ddp->tdev, skb);
111 }
112 return 0;
113}
114
115static int clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int idx,
116 unsigned int npods)
117{
118 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
119 int i;
120
121 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
122 struct sk_buff *skb = ddp->gl_skb[idx];
123
124 ddp->gl_skb[idx] = NULL;
125 memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE);
126 ulp_mem_io_set_hdr(skb, pm_addr);
127 skb->priority = CPL_PRIORITY_CONTROL;
128 cxgb3_ofld_send(ddp->tdev, skb);
129 }
130 return 0;
131}
132
133static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
134 int start, int max, int count,
135 struct cxgb3i_gather_list *gl)
136{
137 unsigned int i, j;
138
139 spin_lock(&ddp->map_lock);
140 for (i = start; i <= max;) {
141 for (j = 0; j < count; j++) {
142 if (ddp->gl_map[i + j])
143 break;
144 }
145 if (j == count) {
146 for (j = 0; j < count; j++)
147 ddp->gl_map[i + j] = gl;
148 spin_unlock(&ddp->map_lock);
149 return i;
150 }
151 i += j + 1;
152 }
153 spin_unlock(&ddp->map_lock);
154 return -EBUSY;
155}
156
157static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp,
158 int start, int count)
159{
160 spin_lock(&ddp->map_lock);
161 memset(&ddp->gl_map[start], 0,
162 count * sizeof(struct cxgb3i_gather_list *));
163 spin_unlock(&ddp->map_lock);
164}
165
166static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp,
167 int idx, int count)
168{
169 int i;
170
171 for (i = 0; i < count; i++, idx++)
172 if (ddp->gl_skb[idx]) {
173 kfree_skb(ddp->gl_skb[idx]);
174 ddp->gl_skb[idx] = NULL;
175 }
176}
177
178static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx,
179 int count, gfp_t gfp)
180{
181 int i;
182
183 for (i = 0; i < count; i++) {
184 struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) +
185 PPOD_SIZE, gfp);
186 if (skb) {
187 ddp->gl_skb[idx + i] = skb;
188 skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE);
189 } else {
190 ddp_free_gl_skb(ddp, idx, i);
191 return -ENOMEM;
192 }
193 }
194 return 0;
195}
196
197/**
198 * cxgb3i_ddp_find_page_index - return ddp page index for a given page size.
199 * @pgsz: page size
200 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
201 */
202int cxgb3i_ddp_find_page_index(unsigned long pgsz)
203{
204 int i;
205
206 for (i = 0; i < DDP_PGIDX_MAX; i++) {
207 if (pgsz == (1UL << ddp_page_shift[i]))
208 return i;
209 }
210 ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz);
211 return DDP_PGIDX_MAX;
212}
213EXPORT_SYMBOL_GPL(cxgb3i_ddp_find_page_index);
214
215static inline void ddp_gl_unmap(struct pci_dev *pdev,
216 struct cxgb3i_gather_list *gl)
217{
218 int i;
219
220 for (i = 0; i < gl->nelem; i++)
221 pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
222 PCI_DMA_FROMDEVICE);
223}
224
225static inline int ddp_gl_map(struct pci_dev *pdev,
226 struct cxgb3i_gather_list *gl)
227{
228 int i;
229
230 for (i = 0; i < gl->nelem; i++) {
231 gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0,
232 PAGE_SIZE,
233 PCI_DMA_FROMDEVICE);
234 if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i])))
235 goto unmap;
236 }
237
238 return i;
239
240unmap:
241 if (i) {
242 unsigned int nelem = gl->nelem;
243
244 gl->nelem = i;
245 ddp_gl_unmap(pdev, gl);
246 gl->nelem = nelem;
247 }
248 return -ENOMEM;
249}
250
251/**
252 * cxgb3i_ddp_make_gl - build ddp page buffer list
253 * @xferlen: total buffer length
254 * @sgl: page buffer scatter-gather list
255 * @sgcnt: # of page buffers
256 * @pdev: pci_dev, used for pci map
257 * @gfp: allocation mode
258 *
259 * construct a ddp page buffer list from the scsi scattergather list.
260 * coalesce buffers as much as possible, and obtain dma addresses for
261 * each page.
262 *
263 * Return the cxgb3i_gather_list constructed from the page buffers if the
264 * memory can be used for ddp. Return NULL otherwise.
265 */
266struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
267 struct scatterlist *sgl,
268 unsigned int sgcnt,
269 struct pci_dev *pdev,
270 gfp_t gfp)
271{
272 struct cxgb3i_gather_list *gl;
273 struct scatterlist *sg = sgl;
274 struct page *sgpage = sg_page(sg);
275 unsigned int sglen = sg->length;
276 unsigned int sgoffset = sg->offset;
277 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
278 PAGE_SHIFT;
279 int i = 1, j = 0;
280
281 if (xferlen < DDP_THRESHOLD) {
282 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
283 xferlen, DDP_THRESHOLD);
284 return NULL;
285 }
286
287 gl = kzalloc(sizeof(struct cxgb3i_gather_list) +
288 npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
289 gfp);
290 if (!gl)
291 return NULL;
292
293 gl->pages = (struct page **)&gl->phys_addr[npages];
294 gl->length = xferlen;
295 gl->offset = sgoffset;
296 gl->pages[0] = sgpage;
297
298 sg = sg_next(sg);
299 while (sg) {
300 struct page *page = sg_page(sg);
301
302 if (sgpage == page && sg->offset == sgoffset + sglen)
303 sglen += sg->length;
304 else {
305 /* make sure the sgl is fit for ddp:
306 * each has the same page size, and
307 * all of the middle pages are used completely
308 */
309 if ((j && sgoffset) ||
310 ((i != sgcnt - 1) &&
311 ((sglen + sgoffset) & ~PAGE_MASK)))
312 goto error_out;
313
314 j++;
315 if (j == gl->nelem || sg->offset)
316 goto error_out;
317 gl->pages[j] = page;
318 sglen = sg->length;
319 sgoffset = sg->offset;
320 sgpage = page;
321 }
322 i++;
323 sg = sg_next(sg);
324 }
325 gl->nelem = ++j;
326
327 if (ddp_gl_map(pdev, gl) < 0)
328 goto error_out;
329
330 return gl;
331
332error_out:
333 kfree(gl);
334 return NULL;
335}
336EXPORT_SYMBOL_GPL(cxgb3i_ddp_make_gl);
337
338/**
339 * cxgb3i_ddp_release_gl - release a page buffer list
340 * @gl: a ddp page buffer list
341 * @pdev: pci_dev used for pci_unmap
342 * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
343 */
344void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
345 struct pci_dev *pdev)
346{
347 ddp_gl_unmap(pdev, gl);
348 kfree(gl);
349}
350EXPORT_SYMBOL_GPL(cxgb3i_ddp_release_gl);
351
352/**
353 * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
354 * @tdev: t3cdev adapter
355 * @tid: connection id
356 * @tformat: tag format
357 * @tagp: the s/w tag, if ddp setup is successful, it will be updated with
358 * ddp/hw tag
359 * @gl: the page momory list
360 * @gfp: allocation mode
361 *
362 * ddp setup for a given page buffer list and construct the ddp tag.
363 * return 0 if success, < 0 otherwise.
364 */
365int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
366 struct cxgb3i_tag_format *tformat, u32 *tagp,
367 struct cxgb3i_gather_list *gl, gfp_t gfp)
368{
369 struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
370 struct pagepod_hdr hdr;
371 unsigned int npods;
372 int idx = -1, idx_max;
373 int err = -ENOMEM;
374 u32 sw_tag = *tagp;
375 u32 tag;
376
377 if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
378 gl->length < DDP_THRESHOLD) {
379 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
380 page_idx, gl->length, DDP_THRESHOLD);
381 return -EINVAL;
382 }
383
384 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
385 idx_max = ddp->nppods - npods + 1;
386
387 if (ddp->idx_last == ddp->nppods)
388 idx = ddp_find_unused_entries(ddp, 0, idx_max, npods, gl);
389 else {
390 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
391 idx_max, npods, gl);
392 if (idx < 0 && ddp->idx_last >= npods)
393 idx = ddp_find_unused_entries(ddp, 0,
394 ddp->idx_last - npods + 1,
395 npods, gl);
396 }
397 if (idx < 0) {
398 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
399 gl->length, gl->nelem, npods);
400 return idx;
401 }
402
403 err = ddp_alloc_gl_skb(ddp, idx, npods, gfp);
404 if (err < 0)
405 goto unmark_entries;
406
407 tag = cxgb3i_ddp_tag_base(tformat, sw_tag);
408 tag |= idx << PPOD_IDX_SHIFT;
409
410 hdr.rsvd = 0;
411 hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid));
412 hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
413 hdr.maxoffset = htonl(gl->length);
414 hdr.pgoffset = htonl(gl->offset);
415
416 err = set_ddp_map(ddp, &hdr, idx, npods, gl);
417 if (err < 0)
418 goto free_gl_skb;
419
420 ddp->idx_last = idx;
421 ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
422 gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
423 idx, npods);
424 *tagp = tag;
425 return 0;
426
427free_gl_skb:
428 ddp_free_gl_skb(ddp, idx, npods);
429unmark_entries:
430 ddp_unmark_entries(ddp, idx, npods);
431 return err;
432}
433EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_reserve);
434
435/**
436 * cxgb3i_ddp_tag_release - release a ddp tag
437 * @tdev: t3cdev adapter
438 * @tag: ddp tag
439 * ddp cleanup for a given ddp tag and release all the resources held
440 */
441void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag)
442{
443 struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
444 u32 idx;
445
446 if (!ddp) {
447 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
448 return;
449 }
450
451 idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
452 if (idx < ddp->nppods) {
453 struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
454 unsigned int npods;
455
456 if (!gl) {
457 ddp_log_error("release ddp 0x%x, idx 0x%x, gl NULL.\n",
458 tag, idx);
459 return;
460 }
461 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
462 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
463 tag, idx, npods);
464 clear_ddp_map(ddp, idx, npods);
465 ddp_unmark_entries(ddp, idx, npods);
466 cxgb3i_ddp_release_gl(gl, ddp->pdev);
467 } else
468 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
469 tag, idx, ddp->nppods);
470}
471EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_release);
472
473static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
474 int reply)
475{
476 struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
477 GFP_KERNEL);
478 struct cpl_set_tcb_field *req;
479 u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
480
481 if (!skb)
482 return -ENOMEM;
483
484 /* set up ulp submode and page size */
485 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
486 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
487 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
488 req->reply = V_NO_REPLY(reply ? 0 : 1);
489 req->cpu_idx = 0;
490 req->word = htons(31);
491 req->mask = cpu_to_be64(0xF0000000);
492 req->val = cpu_to_be64(val << 28);
493 skb->priority = CPL_PRIORITY_CONTROL;
494
495 cxgb3_ofld_send(tdev, skb);
496 return 0;
497}
498
499/**
500 * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
501 * @tdev: t3cdev adapter
502 * @tid: connection id
503 * @reply: request reply from h/w
504 * set up the ddp page size based on the host PAGE_SIZE for a connection
505 * identified by tid
506 */
507int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid,
508 int reply)
509{
510 return setup_conn_pgidx(tdev, tid, page_idx, reply);
511}
512EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_host_pagesize);
513
514/**
515 * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
516 * @tdev: t3cdev adapter
517 * @tid: connection id
518 * @reply: request reply from h/w
519 * @pgsz: ddp page size
520 * set up the ddp page size for a connection identified by tid
521 */
522int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid,
523 int reply, unsigned long pgsz)
524{
525 int pgidx = cxgb3i_ddp_find_page_index(pgsz);
526
527 return setup_conn_pgidx(tdev, tid, pgidx, reply);
528}
529EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_pagesize);
530
531/**
532 * cxgb3i_setup_conn_digest - setup conn. digest setting
533 * @tdev: t3cdev adapter
534 * @tid: connection id
535 * @hcrc: header digest enabled
536 * @dcrc: data digest enabled
537 * @reply: request reply from h/w
538 * set up the iscsi digest settings for a connection identified by tid
539 */
540int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
541 int hcrc, int dcrc, int reply)
542{
543 struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
544 GFP_KERNEL);
545 struct cpl_set_tcb_field *req;
546 u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
547
548 if (!skb)
549 return -ENOMEM;
550
551 /* set up ulp submode and page size */
552 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
553 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
554 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
555 req->reply = V_NO_REPLY(reply ? 0 : 1);
556 req->cpu_idx = 0;
557 req->word = htons(31);
558 req->mask = cpu_to_be64(0x0F000000);
559 req->val = cpu_to_be64(val << 24);
560 skb->priority = CPL_PRIORITY_CONTROL;
561
562 cxgb3_ofld_send(tdev, skb);
563 return 0;
564}
565EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_digest);
566
567static int ddp_init(struct t3cdev *tdev)
568{
569 struct cxgb3i_ddp_info *ddp;
570 struct ulp_iscsi_info uinfo;
571 unsigned int ppmax, bits;
572 int i, err;
573 static int vers_printed;
574
575 if (!vers_printed) {
576 printk(KERN_INFO "%s", version);
577 vers_printed = 1;
578 }
579
580 err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
581 if (err < 0) {
582 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
583 tdev->name, err);
584 return err;
585 }
586
587 ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
588 bits = __ilog2_u32(ppmax) + 1;
589 if (bits > PPOD_IDX_MAX_SIZE)
590 bits = PPOD_IDX_MAX_SIZE;
591 ppmax = (1 << (bits - 1)) - 1;
592
593 ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) +
594 ppmax *
595 (sizeof(struct cxgb3i_gather_list *) +
596 sizeof(struct sk_buff *)),
597 GFP_KERNEL);
598 if (!ddp) {
599 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
600 tdev->name, ppmax);
601 return 0;
602 }
603 ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1);
604 ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
605 ppmax *
606 sizeof(struct cxgb3i_gather_list *));
607 spin_lock_init(&ddp->map_lock);
608
609 ddp->tdev = tdev;
610 ddp->pdev = uinfo.pdev;
611 ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE);
612 ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE);
613 ddp->llimit = uinfo.llimit;
614 ddp->ulimit = uinfo.ulimit;
615 ddp->nppods = ppmax;
616 ddp->idx_last = ppmax;
617 ddp->idx_bits = bits;
618 ddp->idx_mask = (1 << bits) - 1;
619 ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
620
621 uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
622 for (i = 0; i < DDP_PGIDX_MAX; i++)
623 uinfo.pgsz_factor[i] = ddp_page_order[i];
624 uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
625
626 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
627 if (err < 0) {
628 ddp_log_warn("%s unable to set iscsi param err=%d, "
629 "ddp disabled.\n", tdev->name, err);
630 goto free_ddp_map;
631 }
632
633 tdev->ulp_iscsi = ddp;
634
635 /* add to the list */
636 write_lock(&cxgb3i_ddp_rwlock);
637 list_add_tail(&ddp->list, &cxgb3i_ddp_list);
638 write_unlock(&cxgb3i_ddp_rwlock);
639
640 ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
641 "pkt %u,%u.\n",
642 ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits,
643 ddp->idx_mask, ddp->rsvd_tag_mask,
644 ddp->max_txsz, ddp->max_rxsz);
645 return 0;
646
647free_ddp_map:
648 cxgb3i_free_big_mem(ddp);
649 return err;
650}
651
652/**
653 * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
654 * @tdev: t3cdev adapter
655 * @tformat: tag format
656 * @txsz: max tx pkt size, filled in by this func.
657 * @rxsz: max rx pkt size, filled in by this func.
658 * initialize the ddp pagepod manager for a given adapter if needed and
659 * setup the tag format for a given iscsi entity
660 */
661int cxgb3i_adapter_ddp_init(struct t3cdev *tdev,
662 struct cxgb3i_tag_format *tformat,
663 unsigned int *txsz, unsigned int *rxsz)
664{
665 struct cxgb3i_ddp_info *ddp;
666 unsigned char idx_bits;
667
668 if (!tformat)
669 return -EINVAL;
670
671 if (!tdev->ulp_iscsi) {
672 int err = ddp_init(tdev);
673 if (err < 0)
674 return err;
675 }
676 ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
677
678 idx_bits = 32 - tformat->sw_bits;
679 tformat->rsvd_bits = ddp->idx_bits;
680 tformat->rsvd_shift = PPOD_IDX_SHIFT;
681 tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
682
683 ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
684 tformat->sw_bits, tformat->rsvd_bits,
685 tformat->rsvd_shift, tformat->rsvd_mask);
686
687 *txsz = ddp->max_txsz;
688 *rxsz = ddp->max_rxsz;
689 ddp_log_info("ddp max pkt size: %u, %u.\n",
690 ddp->max_txsz, ddp->max_rxsz);
691 return 0;
692}
693EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init);
694
695static void ddp_release(struct cxgb3i_ddp_info *ddp)
696{
697 int i = 0;
698 struct t3cdev *tdev = ddp->tdev;
699
700 tdev->ulp_iscsi = NULL;
701 while (i < ddp->nppods) {
702 struct cxgb3i_gather_list *gl = ddp->gl_map[i];
703 if (gl) {
704 int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
705 >> PPOD_PAGES_SHIFT;
706
707 kfree(gl);
708 ddp_free_gl_skb(ddp, i, npods);
709 } else
710 i++;
711 }
712 cxgb3i_free_big_mem(ddp);
713}
714
715/**
716 * cxgb3i_adapter_ddp_cleanup - release the adapter's ddp resource
717 * @tdev: t3cdev adapter
718 * release all the resource held by the ddp pagepod manager for a given
719 * adapter if needed
720 */
721void cxgb3i_adapter_ddp_cleanup(struct t3cdev *tdev)
722{
723 struct cxgb3i_ddp_info *ddp;
724
725 /* remove from the list */
726 write_lock(&cxgb3i_ddp_rwlock);
727 list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
728 if (ddp->tdev == tdev) {
729 list_del(&ddp->list);
730 break;
731 }
732 }
733 write_unlock(&cxgb3i_ddp_rwlock);
734
735 if (ddp)
736 ddp_release(ddp);
737}
738EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_cleanup);
739
740/**
741 * cxgb3i_ddp_init_module - module init entry point
742 * initialize any driver wide global data structures
743 */
744static int __init cxgb3i_ddp_init_module(void)
745{
746 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
747 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
748 PAGE_SIZE, page_idx);
749 return 0;
750}
751
752/**
753 * cxgb3i_ddp_exit_module - module cleanup/exit entry point
754 * go through the ddp list and release any resource held.
755 */
756static void __exit cxgb3i_ddp_exit_module(void)
757{
758 struct cxgb3i_ddp_info *ddp;
759
760 /* release all ddp manager if there is any */
761 write_lock(&cxgb3i_ddp_rwlock);
762 list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
763 list_del(&ddp->list);
764 ddp_release(ddp);
765 }
766 write_unlock(&cxgb3i_ddp_rwlock);
767}
768
769module_init(cxgb3i_ddp_init_module);
770module_exit(cxgb3i_ddp_exit_module);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
new file mode 100644
index 000000000000..5c7c4d95c493
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -0,0 +1,306 @@
1/*
2 * cxgb3i_ddp.h: Chelsio S3xx iSCSI DDP Manager.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_ULP2_DDP_H__
14#define __CXGB3I_ULP2_DDP_H__
15
16/**
17 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity
18 *
19 * @sw_bits: # of bits used by iscsi software layer
20 * @rsvd_bits: # of bits used by h/w
21 * @rsvd_shift: h/w bits shift left
22 * @rsvd_mask: reserved bit mask
23 */
24struct cxgb3i_tag_format {
25 unsigned char sw_bits;
26 unsigned char rsvd_bits;
27 unsigned char rsvd_shift;
28 unsigned char filler[1];
29 u32 rsvd_mask;
30};
31
32/**
33 * struct cxgb3i_gather_list - cxgb3i direct data placement memory
34 *
35 * @tag: ddp tag
36 * @length: total data buffer length
37 * @offset: initial offset to the 1st page
38 * @nelem: # of pages
39 * @pages: page pointers
40 * @phys_addr: physical address
41 */
42struct cxgb3i_gather_list {
43 u32 tag;
44 unsigned int length;
45 unsigned int offset;
46 unsigned int nelem;
47 struct page **pages;
48 dma_addr_t phys_addr[0];
49};
50
51/**
52 * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload
53 *
54 * @list: list head to link elements
55 * @tdev: pointer to t3cdev used by cxgb3 driver
56 * @max_txsz: max tx packet size for ddp
57 * @max_rxsz: max rx packet size for ddp
58 * @llimit: lower bound of the page pod memory
59 * @ulimit: upper bound of the page pod memory
60 * @nppods: # of page pod entries
61 * @idx_last: page pod entry last used
62 * @idx_bits: # of bits the pagepod index would take
63 * @idx_mask: pagepod index mask
64 * @rsvd_tag_mask: tag mask
65 * @map_lock: lock to synchonize access to the page pod map
66 * @gl_map: ddp memory gather list
67 * @gl_skb: skb used to program the pagepod
68 */
69struct cxgb3i_ddp_info {
70 struct list_head list;
71 struct t3cdev *tdev;
72 struct pci_dev *pdev;
73 unsigned int max_txsz;
74 unsigned int max_rxsz;
75 unsigned int llimit;
76 unsigned int ulimit;
77 unsigned int nppods;
78 unsigned int idx_last;
79 unsigned char idx_bits;
80 unsigned char filler[3];
81 u32 idx_mask;
82 u32 rsvd_tag_mask;
83 spinlock_t map_lock;
84 struct cxgb3i_gather_list **gl_map;
85 struct sk_buff **gl_skb;
86};
87
88#define ULP2_MAX_PKT_SIZE 16224
89#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_MAX)
90#define PPOD_PAGES_MAX 4
91#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
92
93/*
94 * struct pagepod_hdr, pagepod - pagepod format
95 */
96struct pagepod_hdr {
97 u32 vld_tid;
98 u32 pgsz_tag_clr;
99 u32 maxoffset;
100 u32 pgoffset;
101 u64 rsvd;
102};
103
104struct pagepod {
105 struct pagepod_hdr hdr;
106 u64 addr[PPOD_PAGES_MAX + 1];
107};
108
109#define PPOD_SIZE sizeof(struct pagepod) /* 64 */
110#define PPOD_SIZE_SHIFT 6
111
112#define PPOD_COLOR_SHIFT 0
113#define PPOD_COLOR_SIZE 6
114#define PPOD_COLOR_MASK ((1 << PPOD_COLOR_SIZE) - 1)
115
116#define PPOD_IDX_SHIFT PPOD_COLOR_SIZE
117#define PPOD_IDX_MAX_SIZE 24
118
119#define S_PPOD_TID 0
120#define M_PPOD_TID 0xFFFFFF
121#define V_PPOD_TID(x) ((x) << S_PPOD_TID)
122
123#define S_PPOD_VALID 24
124#define V_PPOD_VALID(x) ((x) << S_PPOD_VALID)
125#define F_PPOD_VALID V_PPOD_VALID(1U)
126
127#define S_PPOD_COLOR 0
128#define M_PPOD_COLOR 0x3F
129#define V_PPOD_COLOR(x) ((x) << S_PPOD_COLOR)
130
131#define S_PPOD_TAG 6
132#define M_PPOD_TAG 0xFFFFFF
133#define V_PPOD_TAG(x) ((x) << S_PPOD_TAG)
134
135#define S_PPOD_PGSZ 30
136#define M_PPOD_PGSZ 0x3
137#define V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ)
138
139/*
140 * large memory chunk allocation/release
141 * use vmalloc() if kmalloc() fails
142 */
143static inline void *cxgb3i_alloc_big_mem(unsigned int size,
144 gfp_t gfp)
145{
146 void *p = kmalloc(size, gfp);
147 if (!p)
148 p = vmalloc(size);
149 if (p)
150 memset(p, 0, size);
151 return p;
152}
153
154static inline void cxgb3i_free_big_mem(void *addr)
155{
156 if (is_vmalloc_addr(addr))
157 vfree(addr);
158 else
159 kfree(addr);
160}
161
162/*
163 * cxgb3i ddp tag are 32 bits, it consists of reserved bits used by h/w and
164 * non-reserved bits that can be used by the iscsi s/w.
165 * The reserved bits are identified by the rsvd_bits and rsvd_shift fields
166 * in struct cxgb3i_tag_format.
167 *
168 * The upper most reserved bit can be used to check if a tag is ddp tag or not:
169 * if the bit is 0, the tag is a valid ddp tag
170 */
171
172/**
173 * cxgb3i_is_ddp_tag - check if a given tag is a hw/ddp tag
174 * @tformat: tag format information
175 * @tag: tag to be checked
176 *
177 * return true if the tag is a ddp tag, false otherwise.
178 */
179static inline int cxgb3i_is_ddp_tag(struct cxgb3i_tag_format *tformat, u32 tag)
180{
181 return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
182}
183
184/**
185 * cxgb3i_sw_tag_usable - check if a given s/w tag has enough bits left for
186 * the reserved/hw bits
187 * @tformat: tag format information
188 * @sw_tag: s/w tag to be checked
189 *
190 * return true if the tag is a ddp tag, false otherwise.
191 */
192static inline int cxgb3i_sw_tag_usable(struct cxgb3i_tag_format *tformat,
193 u32 sw_tag)
194{
195 sw_tag >>= (32 - tformat->rsvd_bits);
196 return !sw_tag;
197}
198
199/**
200 * cxgb3i_set_non_ddp_tag - mark a given s/w tag as an invalid ddp tag
201 * @tformat: tag format information
202 * @sw_tag: s/w tag to be checked
203 *
204 * insert 1 at the upper most reserved bit to mark it as an invalid ddp tag.
205 */
206static inline u32 cxgb3i_set_non_ddp_tag(struct cxgb3i_tag_format *tformat,
207 u32 sw_tag)
208{
209 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
210 u32 mask = (1 << shift) - 1;
211
212 if (sw_tag && (sw_tag & ~mask)) {
213 u32 v1 = sw_tag & ((1 << shift) - 1);
214 u32 v2 = (sw_tag >> (shift - 1)) << shift;
215
216 return v2 | v1 | 1 << shift;
217 }
218 return sw_tag | 1 << shift;
219}
220
221/**
222 * cxgb3i_ddp_tag_base - shift the s/w tag bits so that reserved bits are not
223 * used.
224 * @tformat: tag format information
225 * @sw_tag: s/w tag to be checked
226 */
227static inline u32 cxgb3i_ddp_tag_base(struct cxgb3i_tag_format *tformat,
228 u32 sw_tag)
229{
230 u32 mask = (1 << tformat->rsvd_shift) - 1;
231
232 if (sw_tag && (sw_tag & ~mask)) {
233 u32 v1 = sw_tag & mask;
234 u32 v2 = sw_tag >> tformat->rsvd_shift;
235
236 v2 <<= tformat->rsvd_shift + tformat->rsvd_bits;
237 return v2 | v1;
238 }
239 return sw_tag;
240}
241
242/**
243 * cxgb3i_tag_rsvd_bits - get the reserved bits used by the h/w
244 * @tformat: tag format information
245 * @tag: tag to be checked
246 *
247 * return the reserved bits in the tag
248 */
249static inline u32 cxgb3i_tag_rsvd_bits(struct cxgb3i_tag_format *tformat,
250 u32 tag)
251{
252 if (cxgb3i_is_ddp_tag(tformat, tag))
253 return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
254 return 0;
255}
256
257/**
258 * cxgb3i_tag_nonrsvd_bits - get the non-reserved bits used by the s/w
259 * @tformat: tag format information
260 * @tag: tag to be checked
261 *
262 * return the non-reserved bits in the tag.
263 */
264static inline u32 cxgb3i_tag_nonrsvd_bits(struct cxgb3i_tag_format *tformat,
265 u32 tag)
266{
267 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
268 u32 v1, v2;
269
270 if (cxgb3i_is_ddp_tag(tformat, tag)) {
271 v1 = tag & ((1 << tformat->rsvd_shift) - 1);
272 v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
273 } else {
274 u32 mask = (1 << shift) - 1;
275
276 tag &= ~(1 << shift);
277 v1 = tag & mask;
278 v2 = (tag >> 1) & ~mask;
279 }
280 return v1 | v2;
281}
282
283int cxgb3i_ddp_tag_reserve(struct t3cdev *, unsigned int tid,
284 struct cxgb3i_tag_format *, u32 *tag,
285 struct cxgb3i_gather_list *, gfp_t gfp);
286void cxgb3i_ddp_tag_release(struct t3cdev *, u32 tag);
287
288struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
289 struct scatterlist *sgl,
290 unsigned int sgcnt,
291 struct pci_dev *pdev,
292 gfp_t gfp);
293void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
294 struct pci_dev *pdev);
295
296int cxgb3i_setup_conn_host_pagesize(struct t3cdev *, unsigned int tid,
297 int reply);
298int cxgb3i_setup_conn_pagesize(struct t3cdev *, unsigned int tid, int reply,
299 unsigned long pgsz);
300int cxgb3i_setup_conn_digest(struct t3cdev *, unsigned int tid,
301 int hcrc, int dcrc, int reply);
302int cxgb3i_ddp_find_page_index(unsigned long pgsz);
303int cxgb3i_adapter_ddp_init(struct t3cdev *, struct cxgb3i_tag_format *,
304 unsigned int *txsz, unsigned int *rxsz);
305void cxgb3i_adapter_ddp_cleanup(struct t3cdev *);
306#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
new file mode 100644
index 000000000000..091ecb4d9f3d
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -0,0 +1,107 @@
1/* cxgb3i_init.c: Chelsio S3xx iSCSI driver.
2 *
3 * Copyright (c) 2008 Chelsio Communications, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Karen Xie (kxie@chelsio.com)
10 */
11
12#include "cxgb3i.h"
13
14#define DRV_MODULE_NAME "cxgb3i"
15#define DRV_MODULE_VERSION "1.0.0"
16#define DRV_MODULE_RELDATE "Jun. 1, 2008"
17
18static char version[] =
19 "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
20 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
21
22MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
23MODULE_DESCRIPTION("Chelsio S3xx iSCSI Driver");
24MODULE_LICENSE("GPL");
25MODULE_VERSION(DRV_MODULE_VERSION);
26
27static void open_s3_dev(struct t3cdev *);
28static void close_s3_dev(struct t3cdev *);
29
30static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
31static struct cxgb3_client t3c_client = {
32 .name = "iscsi_cxgb3",
33 .handlers = cxgb3i_cpl_handlers,
34 .add = open_s3_dev,
35 .remove = close_s3_dev,
36};
37
38/**
39 * open_s3_dev - register with cxgb3 LLD
40 * @t3dev: cxgb3 adapter instance
41 */
42static void open_s3_dev(struct t3cdev *t3dev)
43{
44 static int vers_printed;
45
46 if (!vers_printed) {
47 printk(KERN_INFO "%s", version);
48 vers_printed = 1;
49 }
50
51 cxgb3i_sdev_add(t3dev, &t3c_client);
52 cxgb3i_adapter_add(t3dev);
53}
54
55/**
56 * close_s3_dev - de-register with cxgb3 LLD
57 * @t3dev: cxgb3 adapter instance
58 */
59static void close_s3_dev(struct t3cdev *t3dev)
60{
61 cxgb3i_adapter_remove(t3dev);
62 cxgb3i_sdev_remove(t3dev);
63}
64
65/**
66 * cxgb3i_init_module - module init entry point
67 *
68 * initialize any driver wide global data structures and register itself
69 * with the cxgb3 module
70 */
71static int __init cxgb3i_init_module(void)
72{
73 int err;
74
75 err = cxgb3i_sdev_init(cxgb3i_cpl_handlers);
76 if (err < 0)
77 return err;
78
79 err = cxgb3i_iscsi_init();
80 if (err < 0)
81 return err;
82
83 err = cxgb3i_pdu_init();
84 if (err < 0)
85 return err;
86
87 cxgb3_register_client(&t3c_client);
88
89 return 0;
90}
91
92/**
93 * cxgb3i_exit_module - module cleanup/exit entry point
94 *
95 * go through the driver hba list and for each hba, release any resource held.
96 * and unregisters iscsi transport and the cxgb3 module
97 */
98static void __exit cxgb3i_exit_module(void)
99{
100 cxgb3_unregister_client(&t3c_client);
101 cxgb3i_pdu_cleanup();
102 cxgb3i_iscsi_cleanup();
103 cxgb3i_sdev_cleanup();
104}
105
106module_init(cxgb3i_init_module);
107module_exit(cxgb3i_exit_module);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
new file mode 100644
index 000000000000..d83464b9b3f9
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -0,0 +1,951 @@
1/* cxgb3i_iscsi.c: Chelsio S3xx iSCSI driver.
2 *
3 * Copyright (c) 2008 Chelsio Communications, Inc.
4 * Copyright (c) 2008 Mike Christie
5 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Karen Xie (kxie@chelsio.com)
12 */
13
14#include <linux/inet.h>
15#include <linux/crypto.h>
16#include <net/tcp.h>
17#include <scsi/scsi_cmnd.h>
18#include <scsi/scsi_device.h>
19#include <scsi/scsi_eh.h>
20#include <scsi/scsi_host.h>
21#include <scsi/scsi.h>
22#include <scsi/iscsi_proto.h>
23#include <scsi/libiscsi.h>
24#include <scsi/scsi_transport_iscsi.h>
25
26#include "cxgb3i.h"
27#include "cxgb3i_pdu.h"
28
29#ifdef __DEBUG_CXGB3I_TAG__
30#define cxgb3i_tag_debug cxgb3i_log_debug
31#else
32#define cxgb3i_tag_debug(fmt...)
33#endif
34
35#ifdef __DEBUG_CXGB3I_API__
36#define cxgb3i_api_debug cxgb3i_log_debug
37#else
38#define cxgb3i_api_debug(fmt...)
39#endif
40
41/*
42 * align pdu size to multiple of 512 for better performance
43 */
44#define align_pdu_size(n) do { n = (n) & (~511); } while (0)
45
46static struct scsi_transport_template *cxgb3i_scsi_transport;
47static struct scsi_host_template cxgb3i_host_template;
48static struct iscsi_transport cxgb3i_iscsi_transport;
49static unsigned char sw_tag_idx_bits;
50static unsigned char sw_tag_age_bits;
51
52static LIST_HEAD(cxgb3i_snic_list);
53static DEFINE_RWLOCK(cxgb3i_snic_rwlock);
54
55/**
56 * cxgb3i_adapter_add - init a s3 adapter structure and any h/w settings
57 * @t3dev: t3cdev adapter
58 * return the resulting cxgb3i_adapter struct
59 */
60struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *t3dev)
61{
62 struct cxgb3i_adapter *snic;
63 struct adapter *adapter = tdev2adap(t3dev);
64 int i;
65
66 snic = kzalloc(sizeof(*snic), GFP_KERNEL);
67 if (!snic) {
68 cxgb3i_api_debug("cxgb3 %s, OOM.\n", t3dev->name);
69 return NULL;
70 }
71 spin_lock_init(&snic->lock);
72
73 snic->tdev = t3dev;
74 snic->pdev = adapter->pdev;
75 snic->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
76
77 if (cxgb3i_adapter_ddp_init(t3dev, &snic->tag_format,
78 &snic->tx_max_size,
79 &snic->rx_max_size) < 0)
80 goto free_snic;
81
82 for_each_port(adapter, i) {
83 snic->hba[i] = cxgb3i_hba_host_add(snic, adapter->port[i]);
84 if (!snic->hba[i])
85 goto ulp_cleanup;
86 }
87 snic->hba_cnt = adapter->params.nports;
88
89 /* add to the list */
90 write_lock(&cxgb3i_snic_rwlock);
91 list_add_tail(&snic->list_head, &cxgb3i_snic_list);
92 write_unlock(&cxgb3i_snic_rwlock);
93
94 return snic;
95
96ulp_cleanup:
97 cxgb3i_adapter_ddp_cleanup(t3dev);
98free_snic:
99 kfree(snic);
100 return NULL;
101}
102
103/**
104 * cxgb3i_adapter_remove - release all the resources held and cleanup any
105 * h/w settings
106 * @t3dev: t3cdev adapter
107 */
108void cxgb3i_adapter_remove(struct t3cdev *t3dev)
109{
110 int i;
111 struct cxgb3i_adapter *snic;
112
113 /* remove from the list */
114 write_lock(&cxgb3i_snic_rwlock);
115 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
116 if (snic->tdev == t3dev) {
117 list_del(&snic->list_head);
118 break;
119 }
120 }
121 write_unlock(&cxgb3i_snic_rwlock);
122
123 if (snic) {
124 for (i = 0; i < snic->hba_cnt; i++) {
125 if (snic->hba[i]) {
126 cxgb3i_hba_host_remove(snic->hba[i]);
127 snic->hba[i] = NULL;
128 }
129 }
130
131 /* release ddp resources */
132 cxgb3i_adapter_ddp_cleanup(snic->tdev);
133 kfree(snic);
134 }
135}
136
137/**
138 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure with a given
139 * net_device
140 * @t3dev: t3cdev adapter
141 */
142struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
143{
144 struct cxgb3i_adapter *snic;
145 int i;
146
147 read_lock(&cxgb3i_snic_rwlock);
148 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
149 for (i = 0; i < snic->hba_cnt; i++) {
150 if (snic->hba[i]->ndev == ndev) {
151 read_unlock(&cxgb3i_snic_rwlock);
152 return snic->hba[i];
153 }
154 }
155 }
156 read_unlock(&cxgb3i_snic_rwlock);
157 return NULL;
158}
159
160/**
161 * cxgb3i_hba_host_add - register a new host with scsi/iscsi
162 * @snic: the cxgb3i adapter
163 * @ndev: associated net_device
164 */
165struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic,
166 struct net_device *ndev)
167{
168 struct cxgb3i_hba *hba;
169 struct Scsi_Host *shost;
170 int err;
171
172 shost = iscsi_host_alloc(&cxgb3i_host_template,
173 sizeof(struct cxgb3i_hba),
174 CXGB3I_SCSI_QDEPTH_DFLT);
175 if (!shost) {
176 cxgb3i_log_info("iscsi_host_alloc failed.\n");
177 return NULL;
178 }
179
180 shost->transportt = cxgb3i_scsi_transport;
181 shost->max_lun = CXGB3I_MAX_LUN;
182 shost->max_id = CXGB3I_MAX_TARGET;
183 shost->max_channel = 0;
184 shost->max_cmd_len = 16;
185
186 hba = iscsi_host_priv(shost);
187 hba->snic = snic;
188 hba->ndev = ndev;
189 hba->shost = shost;
190
191 pci_dev_get(snic->pdev);
192 err = iscsi_host_add(shost, &snic->pdev->dev);
193 if (err) {
194 cxgb3i_log_info("iscsi_host_add failed.\n");
195 goto pci_dev_put;
196 }
197
198 cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
199 shost, hba, shost->host_no);
200
201 return hba;
202
203pci_dev_put:
204 pci_dev_put(snic->pdev);
205 scsi_host_put(shost);
206 return NULL;
207}
208
209/**
210 * cxgb3i_hba_host_remove - de-register the host with scsi/iscsi
211 * @hba: the cxgb3i hba
212 */
213void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
214{
215 cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
216 hba->shost, hba, hba->shost->host_no);
217 iscsi_host_remove(hba->shost);
218 pci_dev_put(hba->snic->pdev);
219 iscsi_host_free(hba->shost);
220}
221
222/**
223 * cxgb3i_ep_connect - establish TCP connection to target portal
224 * @dst_addr: target IP address
225 * @non_blocking: blocking or non-blocking call
226 *
227 * Initiates a TCP/IP connection to the dst_addr
228 */
229static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
230 int non_blocking)
231{
232 struct iscsi_endpoint *ep;
233 struct cxgb3i_endpoint *cep;
234 struct cxgb3i_hba *hba;
235 struct s3_conn *c3cn = NULL;
236 int err = 0;
237
238 c3cn = cxgb3i_c3cn_create();
239 if (!c3cn) {
240 cxgb3i_log_info("ep connect OOM.\n");
241 err = -ENOMEM;
242 goto release_conn;
243 }
244
245 err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr);
246 if (err < 0) {
247 cxgb3i_log_info("ep connect failed.\n");
248 goto release_conn;
249 }
250 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
251 if (!hba) {
252 err = -ENOSPC;
253 cxgb3i_log_info("NOT going through cxgbi device.\n");
254 goto release_conn;
255 }
256 if (c3cn_is_closing(c3cn)) {
257 err = -ENOSPC;
258 cxgb3i_log_info("ep connect unable to connect.\n");
259 goto release_conn;
260 }
261
262 ep = iscsi_create_endpoint(sizeof(*cep));
263 if (!ep) {
264 err = -ENOMEM;
265 cxgb3i_log_info("iscsi alloc ep, OOM.\n");
266 goto release_conn;
267 }
268 cep = ep->dd_data;
269 cep->c3cn = c3cn;
270 cep->hba = hba;
271
272 cxgb3i_api_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n",
273 ep, cep, c3cn, hba);
274 return ep;
275
276release_conn:
277 cxgb3i_api_debug("conn 0x%p failed, release.\n", c3cn);
278 if (c3cn)
279 cxgb3i_c3cn_release(c3cn);
280 return ERR_PTR(err);
281}
282
283/**
284 * cxgb3i_ep_poll - polls for TCP connection establishement
285 * @ep: TCP connection (endpoint) handle
286 * @timeout_ms: timeout value in milli secs
287 *
288 * polls for TCP connect request to complete
289 */
290static int cxgb3i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
291{
292 struct cxgb3i_endpoint *cep = ep->dd_data;
293 struct s3_conn *c3cn = cep->c3cn;
294
295 if (!c3cn_is_established(c3cn))
296 return 0;
297 cxgb3i_api_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn);
298 return 1;
299}
300
301/**
302 * cxgb3i_ep_disconnect - teardown TCP connection
303 * @ep: TCP connection (endpoint) handle
304 *
305 * teardown TCP connection
306 */
307static void cxgb3i_ep_disconnect(struct iscsi_endpoint *ep)
308{
309 struct cxgb3i_endpoint *cep = ep->dd_data;
310 struct cxgb3i_conn *cconn = cep->cconn;
311
312 cxgb3i_api_debug("ep 0x%p, cep 0x%p.\n", ep, cep);
313
314 if (cconn && cconn->conn) {
315 /*
316 * stop the xmit path so the xmit_pdu function is
317 * not being called
318 */
319 iscsi_suspend_tx(cconn->conn);
320
321 write_lock_bh(&cep->c3cn->callback_lock);
322 cep->c3cn->user_data = NULL;
323 cconn->cep = NULL;
324 write_unlock_bh(&cep->c3cn->callback_lock);
325 }
326
327 cxgb3i_api_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n",
328 ep, cep, cep->c3cn);
329 cxgb3i_c3cn_release(cep->c3cn);
330 iscsi_destroy_endpoint(ep);
331}
332
333/**
334 * cxgb3i_session_create - create a new iscsi session
335 * @cmds_max: max # of commands
336 * @qdepth: scsi queue depth
337 * @initial_cmdsn: initial iscsi CMDSN for this session
338 * @host_no: pointer to return host no
339 *
340 * Creates a new iSCSI session
341 */
342static struct iscsi_cls_session *
343cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
344 u32 initial_cmdsn, u32 *host_no)
345{
346 struct cxgb3i_endpoint *cep;
347 struct cxgb3i_hba *hba;
348 struct Scsi_Host *shost;
349 struct iscsi_cls_session *cls_session;
350 struct iscsi_session *session;
351
352 if (!ep) {
353 cxgb3i_log_error("%s, missing endpoint.\n", __func__);
354 return NULL;
355 }
356
357 cep = ep->dd_data;
358 hba = cep->hba;
359 shost = hba->shost;
360 cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba);
361 BUG_ON(hba != iscsi_host_priv(shost));
362
363 *host_no = shost->host_no;
364
365 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
366 cmds_max,
367 sizeof(struct iscsi_tcp_task),
368 initial_cmdsn, ISCSI_MAX_TARGET);
369 if (!cls_session)
370 return NULL;
371 session = cls_session->dd_data;
372 if (iscsi_tcp_r2tpool_alloc(session))
373 goto remove_session;
374
375 return cls_session;
376
377remove_session:
378 iscsi_session_teardown(cls_session);
379 return NULL;
380}
381
382/**
383 * cxgb3i_session_destroy - destroys iscsi session
384 * @cls_session: pointer to iscsi cls session
385 *
386 * Destroys an iSCSI session instance and releases its all resources held
387 */
388static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session)
389{
390 cxgb3i_api_debug("sess 0x%p.\n", cls_session);
391 iscsi_tcp_r2tpool_free(cls_session->dd_data);
392 iscsi_session_teardown(cls_session);
393}
394
395/**
396 * cxgb3i_conn_max_xmit_dlength -- check the max. xmit pdu segment size,
397 * reduce it to be within the hardware limit if needed
398 * @conn: iscsi connection
399 */
400static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
401
402{
403 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
404 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
405 unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
406 cconn->hba->snic->tx_max_size -
407 ISCSI_PDU_NONPAYLOAD_MAX);
408
409 if (conn->max_xmit_dlength)
410 conn->max_xmit_dlength = min_t(unsigned int,
411 conn->max_xmit_dlength, max);
412 else
413 conn->max_xmit_dlength = max;
414 align_pdu_size(conn->max_xmit_dlength);
415 cxgb3i_log_info("conn 0x%p, max xmit %u.\n",
416 conn, conn->max_xmit_dlength);
417 return 0;
418}
419
420/**
421 * cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size against
422 * the hardware limit
423 * @conn: iscsi connection
424 * return 0 if the value is valid, < 0 otherwise.
425 */
426static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
427{
428 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
429 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
430 unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
431 cconn->hba->snic->rx_max_size -
432 ISCSI_PDU_NONPAYLOAD_MAX);
433
434 align_pdu_size(max);
435 if (conn->max_recv_dlength) {
436 if (conn->max_recv_dlength > max) {
437 cxgb3i_log_error("MaxRecvDataSegmentLength %u too big."
438 " Need to be <= %u.\n",
439 conn->max_recv_dlength, max);
440 return -EINVAL;
441 }
442 conn->max_recv_dlength = min_t(unsigned int,
443 conn->max_recv_dlength, max);
444 align_pdu_size(conn->max_recv_dlength);
445 } else
446 conn->max_recv_dlength = max;
447 cxgb3i_api_debug("conn 0x%p, max recv %u.\n",
448 conn, conn->max_recv_dlength);
449 return 0;
450}
451
452/**
453 * cxgb3i_conn_create - create iscsi connection instance
454 * @cls_session: pointer to iscsi cls session
455 * @cid: iscsi cid
456 *
457 * Creates a new iSCSI connection instance for a given session
458 */
459static struct iscsi_cls_conn *cxgb3i_conn_create(struct iscsi_cls_session
460 *cls_session, u32 cid)
461{
462 struct iscsi_cls_conn *cls_conn;
463 struct iscsi_conn *conn;
464 struct iscsi_tcp_conn *tcp_conn;
465 struct cxgb3i_conn *cconn;
466
467 cxgb3i_api_debug("sess 0x%p, cid %u.\n", cls_session, cid);
468
469 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
470 if (!cls_conn)
471 return NULL;
472 conn = cls_conn->dd_data;
473 tcp_conn = conn->dd_data;
474 cconn = tcp_conn->dd_data;
475
476 cconn->conn = conn;
477 return cls_conn;
478}
479
480/**
481 * cxgb3i_conn_bind - binds iscsi sess, conn and endpoint together
482 * @cls_session: pointer to iscsi cls session
483 * @cls_conn: pointer to iscsi cls conn
484 * @transport_eph: 64-bit EP handle
485 * @is_leading: leading connection on this session?
486 *
487 * Binds together an iSCSI session, an iSCSI connection and a
488 * TCP connection. This routine returns error code if the TCP
489 * connection does not belong on the device iSCSI sess/conn is bound
490 */
491
492static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session,
493 struct iscsi_cls_conn *cls_conn,
494 u64 transport_eph, int is_leading)
495{
496 struct iscsi_conn *conn = cls_conn->dd_data;
497 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
498 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
499 struct cxgb3i_adapter *snic;
500 struct iscsi_endpoint *ep;
501 struct cxgb3i_endpoint *cep;
502 struct s3_conn *c3cn;
503 int err;
504
505 ep = iscsi_lookup_endpoint(transport_eph);
506 if (!ep)
507 return -EINVAL;
508
509 /* setup ddp pagesize */
510 cep = ep->dd_data;
511 c3cn = cep->c3cn;
512 snic = cep->hba->snic;
513 err = cxgb3i_setup_conn_host_pagesize(snic->tdev, c3cn->tid, 0);
514 if (err < 0)
515 return err;
516
517 cxgb3i_api_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n",
518 ep, cls_session, cls_conn);
519
520 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
521 if (err)
522 return -EINVAL;
523
524 /* calculate the tag idx bits needed for this conn based on cmds_max */
525 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
526 cxgb3i_api_debug("session cmds_max 0x%x, bits %u.\n",
527 conn->session->cmds_max, cconn->task_idx_bits);
528
529 read_lock(&c3cn->callback_lock);
530 c3cn->user_data = conn;
531 cconn->hba = cep->hba;
532 cconn->cep = cep;
533 cep->cconn = cconn;
534 read_unlock(&c3cn->callback_lock);
535
536 cxgb3i_conn_max_xmit_dlength(conn);
537 cxgb3i_conn_max_recv_dlength(conn);
538
539 spin_lock_bh(&conn->session->lock);
540 sprintf(conn->portal_address, NIPQUAD_FMT,
541 NIPQUAD(c3cn->daddr.sin_addr.s_addr));
542 conn->portal_port = ntohs(c3cn->daddr.sin_port);
543 spin_unlock_bh(&conn->session->lock);
544
545 /* init recv engine */
546 iscsi_tcp_hdr_recv_prep(tcp_conn);
547
548 return 0;
549}
550
551/**
552 * cxgb3i_conn_get_param - return iscsi connection parameter to caller
553 * @cls_conn: pointer to iscsi cls conn
554 * @param: parameter type identifier
555 * @buf: buffer pointer
556 *
557 * returns iSCSI connection parameters
558 */
559static int cxgb3i_conn_get_param(struct iscsi_cls_conn *cls_conn,
560 enum iscsi_param param, char *buf)
561{
562 struct iscsi_conn *conn = cls_conn->dd_data;
563 int len;
564
565 cxgb3i_api_debug("cls_conn 0x%p, param %d.\n", cls_conn, param);
566
567 switch (param) {
568 case ISCSI_PARAM_CONN_PORT:
569 spin_lock_bh(&conn->session->lock);
570 len = sprintf(buf, "%hu\n", conn->portal_port);
571 spin_unlock_bh(&conn->session->lock);
572 break;
573 case ISCSI_PARAM_CONN_ADDRESS:
574 spin_lock_bh(&conn->session->lock);
575 len = sprintf(buf, "%s\n", conn->portal_address);
576 spin_unlock_bh(&conn->session->lock);
577 break;
578 default:
579 return iscsi_conn_get_param(cls_conn, param, buf);
580 }
581
582 return len;
583}
584
585/**
586 * cxgb3i_conn_set_param - set iscsi connection parameter
587 * @cls_conn: pointer to iscsi cls conn
588 * @param: parameter type identifier
589 * @buf: buffer pointer
590 * @buflen: buffer length
591 *
592 * set iSCSI connection parameters
593 */
594static int cxgb3i_conn_set_param(struct iscsi_cls_conn *cls_conn,
595 enum iscsi_param param, char *buf, int buflen)
596{
597 struct iscsi_conn *conn = cls_conn->dd_data;
598 struct iscsi_session *session = conn->session;
599 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
600 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
601 struct cxgb3i_adapter *snic = cconn->hba->snic;
602 struct s3_conn *c3cn = cconn->cep->c3cn;
603 int value, err = 0;
604
605 switch (param) {
606 case ISCSI_PARAM_HDRDGST_EN:
607 err = iscsi_set_param(cls_conn, param, buf, buflen);
608 if (!err && conn->hdrdgst_en)
609 err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
610 conn->hdrdgst_en,
611 conn->datadgst_en, 0);
612 break;
613 case ISCSI_PARAM_DATADGST_EN:
614 err = iscsi_set_param(cls_conn, param, buf, buflen);
615 if (!err && conn->datadgst_en)
616 err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
617 conn->hdrdgst_en,
618 conn->datadgst_en, 0);
619 break;
620 case ISCSI_PARAM_MAX_R2T:
621 sscanf(buf, "%d", &value);
622 if (value <= 0 || !is_power_of_2(value))
623 return -EINVAL;
624 if (session->max_r2t == value)
625 break;
626 iscsi_tcp_r2tpool_free(session);
627 err = iscsi_set_param(cls_conn, param, buf, buflen);
628 if (!err && iscsi_tcp_r2tpool_alloc(session))
629 return -ENOMEM;
630 case ISCSI_PARAM_MAX_RECV_DLENGTH:
631 err = iscsi_set_param(cls_conn, param, buf, buflen);
632 if (!err)
633 err = cxgb3i_conn_max_recv_dlength(conn);
634 break;
635 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
636 err = iscsi_set_param(cls_conn, param, buf, buflen);
637 if (!err)
638 err = cxgb3i_conn_max_xmit_dlength(conn);
639 break;
640 default:
641 return iscsi_set_param(cls_conn, param, buf, buflen);
642 }
643 return err;
644}
645
646/**
647 * cxgb3i_host_set_param - configure host (adapter) related parameters
648 * @shost: scsi host pointer
649 * @param: parameter type identifier
650 * @buf: buffer pointer
651 */
652static int cxgb3i_host_set_param(struct Scsi_Host *shost,
653 enum iscsi_host_param param,
654 char *buf, int buflen)
655{
656 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
657
658 cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
659
660 switch (param) {
661 case ISCSI_HOST_PARAM_IPADDRESS:
662 {
663 __be32 addr = in_aton(buf);
664 cxgb3i_set_private_ipv4addr(hba->ndev, addr);
665 return 0;
666 }
667 case ISCSI_HOST_PARAM_HWADDRESS:
668 case ISCSI_HOST_PARAM_NETDEV_NAME:
669 /* ignore */
670 return 0;
671 default:
672 return iscsi_host_set_param(shost, param, buf, buflen);
673 }
674}
675
676/**
677 * cxgb3i_host_get_param - returns host (adapter) related parameters
678 * @shost: scsi host pointer
679 * @param: parameter type identifier
680 * @buf: buffer pointer
681 */
682static int cxgb3i_host_get_param(struct Scsi_Host *shost,
683 enum iscsi_host_param param, char *buf)
684{
685 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
686 int len = 0;
687
688 cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
689
690 switch (param) {
691 case ISCSI_HOST_PARAM_HWADDRESS:
692 len = sysfs_format_mac(buf, hba->ndev->dev_addr, 6);
693 break;
694 case ISCSI_HOST_PARAM_NETDEV_NAME:
695 len = sprintf(buf, "%s\n", hba->ndev->name);
696 break;
697 case ISCSI_HOST_PARAM_IPADDRESS:
698 {
699 __be32 addr;
700
701 addr = cxgb3i_get_private_ipv4addr(hba->ndev);
702 len = sprintf(buf, NIPQUAD_FMT, NIPQUAD(addr));
703 break;
704 }
705 default:
706 return iscsi_host_get_param(shost, param, buf);
707 }
708 return len;
709}
710
711/**
712 * cxgb3i_conn_get_stats - returns iSCSI stats
713 * @cls_conn: pointer to iscsi cls conn
714 * @stats: pointer to iscsi statistic struct
715 */
716static void cxgb3i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
717 struct iscsi_stats *stats)
718{
719 struct iscsi_conn *conn = cls_conn->dd_data;
720
721 stats->txdata_octets = conn->txdata_octets;
722 stats->rxdata_octets = conn->rxdata_octets;
723 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
724 stats->dataout_pdus = conn->dataout_pdus_cnt;
725 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
726 stats->datain_pdus = conn->datain_pdus_cnt;
727 stats->r2t_pdus = conn->r2t_pdus_cnt;
728 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
729 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
730 stats->digest_err = 0;
731 stats->timeout_err = 0;
732 stats->custom_length = 1;
733 strcpy(stats->custom[0].desc, "eh_abort_cnt");
734 stats->custom[0].value = conn->eh_abort_cnt;
735}
736
737/**
738 * cxgb3i_parse_itt - get the idx and age bits from a given tag
739 * @conn: iscsi connection
740 * @itt: itt tag
741 * @idx: task index, filled in by this function
742 * @age: session age, filled in by this function
743 */
744static void cxgb3i_parse_itt(struct iscsi_conn *conn, itt_t itt,
745 int *idx, int *age)
746{
747 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
748 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
749 struct cxgb3i_adapter *snic = cconn->hba->snic;
750 u32 tag = ntohl((__force u32) itt);
751 u32 sw_bits;
752
753 sw_bits = cxgb3i_tag_nonrsvd_bits(&snic->tag_format, tag);
754 if (idx)
755 *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
756 if (age)
757 *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
758
759 cxgb3i_tag_debug("parse tag 0x%x/0x%x, sw 0x%x, itt 0x%x, age 0x%x.\n",
760 tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
761 age ? *age : 0xFF);
762}
763
764/**
765 * cxgb3i_reserve_itt - generate tag for a give task
766 * Try to set up ddp for a scsi read task.
767 * @task: iscsi task
768 * @hdr_itt: tag, filled in by this function
769 */
770int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
771{
772 struct scsi_cmnd *sc = task->sc;
773 struct iscsi_conn *conn = task->conn;
774 struct iscsi_session *sess = conn->session;
775 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
776 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
777 struct cxgb3i_adapter *snic = cconn->hba->snic;
778 struct cxgb3i_tag_format *tformat = &snic->tag_format;
779 u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
780 u32 tag;
781 int err = -EINVAL;
782
783 if (sc &&
784 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
785 cxgb3i_sw_tag_usable(tformat, sw_tag)) {
786 struct s3_conn *c3cn = cconn->cep->c3cn;
787 struct cxgb3i_gather_list *gl;
788
789 gl = cxgb3i_ddp_make_gl(scsi_in(sc)->length,
790 scsi_in(sc)->table.sgl,
791 scsi_in(sc)->table.nents,
792 snic->pdev,
793 GFP_ATOMIC);
794 if (gl) {
795 tag = sw_tag;
796 err = cxgb3i_ddp_tag_reserve(snic->tdev, c3cn->tid,
797 tformat, &tag,
798 gl, GFP_ATOMIC);
799 if (err < 0)
800 cxgb3i_ddp_release_gl(gl, snic->pdev);
801 }
802 }
803
804 if (err < 0)
805 tag = cxgb3i_set_non_ddp_tag(tformat, sw_tag);
806 /* the itt need to sent in big-endian order */
807 *hdr_itt = (__force itt_t)htonl(tag);
808
809 cxgb3i_tag_debug("new tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n",
810 tag, *hdr_itt, task->itt, sess->age);
811 return 0;
812}
813
814/**
815 * cxgb3i_release_itt - release the tag for a given task
816 * if the tag is a ddp tag, release the ddp setup
817 * @task: iscsi task
818 * @hdr_itt: tag
819 */
820void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt)
821{
822 struct scsi_cmnd *sc = task->sc;
823 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
824 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
825 struct cxgb3i_adapter *snic = cconn->hba->snic;
826 struct cxgb3i_tag_format *tformat = &snic->tag_format;
827 u32 tag = ntohl((__force u32)hdr_itt);
828
829 cxgb3i_tag_debug("release tag 0x%x.\n", tag);
830
831 if (sc &&
832 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
833 cxgb3i_is_ddp_tag(tformat, tag))
834 cxgb3i_ddp_tag_release(snic->tdev, tag);
835}
836
837/**
838 * cxgb3i_host_template -- Scsi_Host_Template structure
839 * used when registering with the scsi mid layer
840 */
841static struct scsi_host_template cxgb3i_host_template = {
842 .module = THIS_MODULE,
843 .name = "Chelsio S3xx iSCSI Initiator",
844 .proc_name = "cxgb3i",
845 .queuecommand = iscsi_queuecommand,
846 .change_queue_depth = iscsi_change_queue_depth,
847 .can_queue = 128 * (ISCSI_DEF_XMIT_CMDS_MAX - 1),
848 .sg_tablesize = SG_ALL,
849 .max_sectors = 0xFFFF,
850 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
851 .eh_abort_handler = iscsi_eh_abort,
852 .eh_device_reset_handler = iscsi_eh_device_reset,
853 .eh_target_reset_handler = iscsi_eh_target_reset,
854 .use_clustering = DISABLE_CLUSTERING,
855 .this_id = -1,
856};
857
858static struct iscsi_transport cxgb3i_iscsi_transport = {
859 .owner = THIS_MODULE,
860 .name = "cxgb3i",
861 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
862 | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
863 CAP_PADDING_OFFLOAD,
864 .param_mask = ISCSI_MAX_RECV_DLENGTH |
865 ISCSI_MAX_XMIT_DLENGTH |
866 ISCSI_HDRDGST_EN |
867 ISCSI_DATADGST_EN |
868 ISCSI_INITIAL_R2T_EN |
869 ISCSI_MAX_R2T |
870 ISCSI_IMM_DATA_EN |
871 ISCSI_FIRST_BURST |
872 ISCSI_MAX_BURST |
873 ISCSI_PDU_INORDER_EN |
874 ISCSI_DATASEQ_INORDER_EN |
875 ISCSI_ERL |
876 ISCSI_CONN_PORT |
877 ISCSI_CONN_ADDRESS |
878 ISCSI_EXP_STATSN |
879 ISCSI_PERSISTENT_PORT |
880 ISCSI_PERSISTENT_ADDRESS |
881 ISCSI_TARGET_NAME | ISCSI_TPGT |
882 ISCSI_USERNAME | ISCSI_PASSWORD |
883 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
884 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
885 ISCSI_LU_RESET_TMO |
886 ISCSI_PING_TMO | ISCSI_RECV_TMO |
887 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
888 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
889 ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_NETDEV_NAME,
890 .get_host_param = cxgb3i_host_get_param,
891 .set_host_param = cxgb3i_host_set_param,
892 /* session management */
893 .create_session = cxgb3i_session_create,
894 .destroy_session = cxgb3i_session_destroy,
895 .get_session_param = iscsi_session_get_param,
896 /* connection management */
897 .create_conn = cxgb3i_conn_create,
898 .bind_conn = cxgb3i_conn_bind,
899 .destroy_conn = iscsi_tcp_conn_teardown,
900 .start_conn = iscsi_conn_start,
901 .stop_conn = iscsi_conn_stop,
902 .get_conn_param = cxgb3i_conn_get_param,
903 .set_param = cxgb3i_conn_set_param,
904 .get_stats = cxgb3i_conn_get_stats,
905 /* pdu xmit req. from user space */
906 .send_pdu = iscsi_conn_send_pdu,
907 /* task */
908 .init_task = iscsi_tcp_task_init,
909 .xmit_task = iscsi_tcp_task_xmit,
910 .cleanup_task = cxgb3i_conn_cleanup_task,
911
912 /* pdu */
913 .alloc_pdu = cxgb3i_conn_alloc_pdu,
914 .init_pdu = cxgb3i_conn_init_pdu,
915 .xmit_pdu = cxgb3i_conn_xmit_pdu,
916 .parse_pdu_itt = cxgb3i_parse_itt,
917
918 /* TCP connect/disconnect */
919 .ep_connect = cxgb3i_ep_connect,
920 .ep_poll = cxgb3i_ep_poll,
921 .ep_disconnect = cxgb3i_ep_disconnect,
922 /* Error recovery timeout call */
923 .session_recovery_timedout = iscsi_session_recovery_timedout,
924};
925
926int cxgb3i_iscsi_init(void)
927{
928 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
929 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
930 cxgb3i_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
931 ISCSI_ITT_MASK, sw_tag_idx_bits,
932 ISCSI_AGE_MASK, sw_tag_age_bits);
933
934 cxgb3i_scsi_transport =
935 iscsi_register_transport(&cxgb3i_iscsi_transport);
936 if (!cxgb3i_scsi_transport) {
937 cxgb3i_log_error("Could not register cxgb3i transport.\n");
938 return -ENODEV;
939 }
940 cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport);
941 return 0;
942}
943
944void cxgb3i_iscsi_cleanup(void)
945{
946 if (cxgb3i_scsi_transport) {
947 cxgb3i_api_debug("cxgb3i transport 0x%p.\n",
948 cxgb3i_scsi_transport);
949 iscsi_unregister_transport(&cxgb3i_iscsi_transport);
950 }
951}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
new file mode 100644
index 000000000000..a865f1fefe8b
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -0,0 +1,1810 @@
1/*
2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15#include <linux/if_vlan.h>
16#include <linux/version.h>
17
18#include "cxgb3_defs.h"
19#include "cxgb3_ctl_defs.h"
20#include "firmware_exports.h"
21#include "cxgb3i_offload.h"
22#include "cxgb3i_pdu.h"
23#include "cxgb3i_ddp.h"
24
25#ifdef __DEBUG_C3CN_CONN__
26#define c3cn_conn_debug cxgb3i_log_info
27#else
28#define c3cn_conn_debug(fmt...)
29#endif
30
31#ifdef __DEBUG_C3CN_TX__
32#define c3cn_tx_debug cxgb3i_log_debug
33#else
34#define c3cn_tx_debug(fmt...)
35#endif
36
37#ifdef __DEBUG_C3CN_RX__
38#define c3cn_rx_debug cxgb3i_log_debug
39#else
40#define c3cn_rx_debug(fmt...)
41#endif
42
43/*
44 * module parameters releated to offloaded iscsi connection
45 */
46static int cxgb3_rcv_win = 256 * 1024;
47module_param(cxgb3_rcv_win, int, 0644);
48MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)");
49
50static int cxgb3_snd_win = 64 * 1024;
51module_param(cxgb3_snd_win, int, 0644);
52MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=64KB)");
53
54static int cxgb3_rx_credit_thres = 10 * 1024;
55module_param(cxgb3_rx_credit_thres, int, 0644);
56MODULE_PARM_DESC(rx_credit_thres,
57 "RX credits return threshold in bytes (default=10KB)");
58
59static unsigned int cxgb3_max_connect = 8 * 1024;
60module_param(cxgb3_max_connect, uint, 0644);
61MODULE_PARM_DESC(cxgb3_max_connect, "Max. # of connections (default=8092)");
62
63static unsigned int cxgb3_sport_base = 20000;
64module_param(cxgb3_sport_base, uint, 0644);
65MODULE_PARM_DESC(cxgb3_sport_base, "starting port number (default=20000)");
66
67/*
68 * cxgb3i tcp connection data(per adapter) list
69 */
70static LIST_HEAD(cdata_list);
71static DEFINE_RWLOCK(cdata_rwlock);
72
73static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion);
74static void c3cn_release_offload_resources(struct s3_conn *c3cn);
75
76/*
77 * iscsi source port management
78 *
79 * Find a free source port in the port allocation map. We use a very simple
80 * rotor scheme to look for the next free port.
81 *
82 * If a source port has been specified make sure that it doesn't collide with
83 * our normal source port allocation map. If it's outside the range of our
84 * allocation/deallocation scheme just let them use it.
85 *
86 * If the source port is outside our allocation range, the caller is
87 * responsible for keeping track of their port usage.
88 */
89static int c3cn_get_port(struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata)
90{
91 unsigned int start;
92 int idx;
93
94 if (!cdata)
95 goto error_out;
96
97 if (c3cn->saddr.sin_port != 0) {
98 idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
99 if (idx < 0 || idx >= cxgb3_max_connect)
100 return 0;
101 if (!test_and_set_bit(idx, cdata->sport_map))
102 return -EADDRINUSE;
103 }
104
105 /* the sport_map_next may not be accurate but that is okay, sport_map
106 should be */
107 start = idx = cdata->sport_map_next;
108 do {
109 if (++idx >= cxgb3_max_connect)
110 idx = 0;
111 if (!(test_and_set_bit(idx, cdata->sport_map))) {
112 c3cn->saddr.sin_port = htons(cxgb3_sport_base + idx);
113 cdata->sport_map_next = idx;
114 c3cn_conn_debug("%s reserve port %u.\n",
115 cdata->cdev->name,
116 cxgb3_sport_base + idx);
117 return 0;
118 }
119 } while (idx != start);
120
121error_out:
122 return -EADDRNOTAVAIL;
123}
124
125static void c3cn_put_port(struct s3_conn *c3cn)
126{
127 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(c3cn->cdev);
128
129 if (c3cn->saddr.sin_port) {
130 int idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
131
132 c3cn->saddr.sin_port = 0;
133 if (idx < 0 || idx >= cxgb3_max_connect)
134 return;
135 clear_bit(idx, cdata->sport_map);
136 c3cn_conn_debug("%s, release port %u.\n",
137 cdata->cdev->name, cxgb3_sport_base + idx);
138 }
139}
140
141static inline void c3cn_set_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
142{
143 __set_bit(flag, &c3cn->flags);
144 c3cn_conn_debug("c3cn 0x%p, set %d, s %u, f 0x%lx.\n",
145 c3cn, flag, c3cn->state, c3cn->flags);
146}
147
148static inline void c3cn_clear_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
149{
150 __clear_bit(flag, &c3cn->flags);
151 c3cn_conn_debug("c3cn 0x%p, clear %d, s %u, f 0x%lx.\n",
152 c3cn, flag, c3cn->state, c3cn->flags);
153}
154
155static inline int c3cn_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
156{
157 if (c3cn == NULL)
158 return 0;
159 return test_bit(flag, &c3cn->flags);
160}
161
162static void c3cn_set_state(struct s3_conn *c3cn, int state)
163{
164 c3cn_conn_debug("c3cn 0x%p state -> %u.\n", c3cn, state);
165 c3cn->state = state;
166}
167
168static inline void c3cn_hold(struct s3_conn *c3cn)
169{
170 atomic_inc(&c3cn->refcnt);
171}
172
173static inline void c3cn_put(struct s3_conn *c3cn)
174{
175 if (atomic_dec_and_test(&c3cn->refcnt)) {
176 c3cn_conn_debug("free c3cn 0x%p, s %u, f 0x%lx.\n",
177 c3cn, c3cn->state, c3cn->flags);
178 kfree(c3cn);
179 }
180}
181
182static void c3cn_closed(struct s3_conn *c3cn)
183{
184 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
185 c3cn, c3cn->state, c3cn->flags);
186
187 c3cn_put_port(c3cn);
188 c3cn_release_offload_resources(c3cn);
189 c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
190 cxgb3i_conn_closing(c3cn);
191}
192
193/*
194 * CPL (Chelsio Protocol Language) defines a message passing interface between
195 * the host driver and T3 asic.
196 * The section below implments CPLs that related to iscsi tcp connection
197 * open/close/abort and data send/receive.
198 */
199
200/*
201 * CPL connection active open request: host ->
202 */
203static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
204{
205 int i = 0;
206
207 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
208 ++i;
209 return i;
210}
211
212static unsigned int select_mss(struct s3_conn *c3cn, unsigned int pmtu)
213{
214 unsigned int idx;
215 struct dst_entry *dst = c3cn->dst_cache;
216 struct t3cdev *cdev = c3cn->cdev;
217 const struct t3c_data *td = T3C_DATA(cdev);
218 u16 advmss = dst_metric(dst, RTAX_ADVMSS);
219
220 if (advmss > pmtu - 40)
221 advmss = pmtu - 40;
222 if (advmss < td->mtus[0] - 40)
223 advmss = td->mtus[0] - 40;
224 idx = find_best_mtu(td, advmss + 40);
225 return idx;
226}
227
228static inline int compute_wscale(int win)
229{
230 int wscale = 0;
231 while (wscale < 14 && (65535<<wscale) < win)
232 wscale++;
233 return wscale;
234}
235
236static inline unsigned int calc_opt0h(struct s3_conn *c3cn)
237{
238 int wscale = compute_wscale(cxgb3_rcv_win);
239 return V_KEEP_ALIVE(1) |
240 F_TCAM_BYPASS |
241 V_WND_SCALE(wscale) |
242 V_MSS_IDX(c3cn->mss_idx);
243}
244
245static inline unsigned int calc_opt0l(struct s3_conn *c3cn)
246{
247 return V_ULP_MODE(ULP_MODE_ISCSI) |
248 V_RCV_BUFSIZ(cxgb3_rcv_win>>10);
249}
250
251static void make_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb,
252 unsigned int atid, const struct l2t_entry *e)
253{
254 struct cpl_act_open_req *req;
255
256 c3cn_conn_debug("c3cn 0x%p, atid 0x%x.\n", c3cn, atid);
257
258 skb->priority = CPL_PRIORITY_SETUP;
259 req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
260 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
261 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid));
262 req->local_port = c3cn->saddr.sin_port;
263 req->peer_port = c3cn->daddr.sin_port;
264 req->local_ip = c3cn->saddr.sin_addr.s_addr;
265 req->peer_ip = c3cn->daddr.sin_addr.s_addr;
266 req->opt0h = htonl(calc_opt0h(c3cn) | V_L2T_IDX(e->idx) |
267 V_TX_CHANNEL(e->smt_idx));
268 req->opt0l = htonl(calc_opt0l(c3cn));
269 req->params = 0;
270}
271
272static void fail_act_open(struct s3_conn *c3cn, int errno)
273{
274 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
275 c3cn, c3cn->state, c3cn->flags);
276 c3cn->err = errno;
277 c3cn_closed(c3cn);
278}
279
280static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
281{
282 struct s3_conn *c3cn = (struct s3_conn *)skb->sk;
283
284 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
285
286 c3cn_hold(c3cn);
287 spin_lock_bh(&c3cn->lock);
288 if (c3cn->state == C3CN_STATE_CONNECTING)
289 fail_act_open(c3cn, EHOSTUNREACH);
290 spin_unlock_bh(&c3cn->lock);
291 c3cn_put(c3cn);
292 __kfree_skb(skb);
293}
294
295/*
296 * CPL connection close request: host ->
297 *
298 * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
299 * the write queue (i.e., after any unsent txt data).
300 */
301static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
302 int flags)
303{
304 CXGB3_SKB_CB(skb)->seq = c3cn->write_seq;
305 CXGB3_SKB_CB(skb)->flags = flags;
306 __skb_queue_tail(&c3cn->write_queue, skb);
307}
308
309static void send_close_req(struct s3_conn *c3cn)
310{
311 struct sk_buff *skb = c3cn->cpl_close;
312 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
313 unsigned int tid = c3cn->tid;
314
315 c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
316 c3cn, c3cn->state, c3cn->flags);
317
318 c3cn->cpl_close = NULL;
319
320 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
321 req->wr.wr_lo = htonl(V_WR_TID(tid));
322 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
323 req->rsvd = htonl(c3cn->write_seq);
324
325 skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND);
326 if (c3cn->state != C3CN_STATE_CONNECTING)
327 c3cn_push_tx_frames(c3cn, 1);
328}
329
330/*
331 * CPL connection abort request: host ->
332 *
333 * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
334 * for the same connection and also that we do not try to send a message
335 * after the connection has closed.
336 */
337static void abort_arp_failure(struct t3cdev *cdev, struct sk_buff *skb)
338{
339 struct cpl_abort_req *req = cplhdr(skb);
340
341 c3cn_conn_debug("tdev 0x%p.\n", cdev);
342
343 req->cmd = CPL_ABORT_NO_RST;
344 cxgb3_ofld_send(cdev, skb);
345}
346
347static inline void c3cn_purge_write_queue(struct s3_conn *c3cn)
348{
349 struct sk_buff *skb;
350
351 while ((skb = __skb_dequeue(&c3cn->write_queue)))
352 __kfree_skb(skb);
353}
354
355static void send_abort_req(struct s3_conn *c3cn)
356{
357 struct sk_buff *skb = c3cn->cpl_abort_req;
358 struct cpl_abort_req *req;
359 unsigned int tid = c3cn->tid;
360
361 if (unlikely(c3cn->state == C3CN_STATE_ABORTING) || !skb ||
362 !c3cn->cdev)
363 return;
364
365 c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
366
367 c3cn_conn_debug("c3cn 0x%p, flag ABORT_RPL + ABORT_SHUT.\n", c3cn);
368
369 c3cn_set_flag(c3cn, C3CN_ABORT_RPL_PENDING);
370
371 /* Purge the send queue so we don't send anything after an abort. */
372 c3cn_purge_write_queue(c3cn);
373
374 c3cn->cpl_abort_req = NULL;
375 req = (struct cpl_abort_req *)skb->head;
376
377 skb->priority = CPL_PRIORITY_DATA;
378 set_arp_failure_handler(skb, abort_arp_failure);
379
380 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
381 req->wr.wr_lo = htonl(V_WR_TID(tid));
382 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
383 req->rsvd0 = htonl(c3cn->snd_nxt);
384 req->rsvd1 = !c3cn_flag(c3cn, C3CN_TX_DATA_SENT);
385 req->cmd = CPL_ABORT_SEND_RST;
386
387 l2t_send(c3cn->cdev, skb, c3cn->l2t);
388}
389
390/*
391 * CPL connection abort reply: host ->
392 *
393 * Send an ABORT_RPL message in response of the ABORT_REQ received.
394 */
395static void send_abort_rpl(struct s3_conn *c3cn, int rst_status)
396{
397 struct sk_buff *skb = c3cn->cpl_abort_rpl;
398 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
399
400 c3cn->cpl_abort_rpl = NULL;
401
402 skb->priority = CPL_PRIORITY_DATA;
403 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
404 rpl->wr.wr_lo = htonl(V_WR_TID(c3cn->tid));
405 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, c3cn->tid));
406 rpl->cmd = rst_status;
407
408 cxgb3_ofld_send(c3cn->cdev, skb);
409}
410
411/*
412 * CPL connection rx data ack: host ->
413 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
414 * credits sent.
415 */
416static u32 send_rx_credits(struct s3_conn *c3cn, u32 credits, u32 dack)
417{
418 struct sk_buff *skb;
419 struct cpl_rx_data_ack *req;
420
421 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
422 if (!skb)
423 return 0;
424
425 req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req));
426 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
427 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid));
428 req->credit_dack = htonl(dack | V_RX_CREDITS(credits));
429 skb->priority = CPL_PRIORITY_ACK;
430 cxgb3_ofld_send(c3cn->cdev, skb);
431 return credits;
432}
433
434/*
435 * CPL connection tx data: host ->
436 *
437 * Send iscsi PDU via TX_DATA CPL message. Returns the number of
438 * credits sent.
439 * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
440 * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
441 */
442
443/*
444 * For ULP connections HW may inserts digest bytes into the pdu. Those digest
445 * bytes are not sent by the host but are part of the TCP payload and therefore
446 * consume TCP sequence space.
447 */
448static const unsigned int cxgb3_ulp_extra_len[] = { 0, 4, 4, 8 };
449static inline unsigned int ulp_extra_len(const struct sk_buff *skb)
450{
451 return cxgb3_ulp_extra_len[skb_ulp_mode(skb) & 3];
452}
453
454static unsigned int wrlen __read_mostly;
455
456/*
457 * The number of WRs needed for an skb depends on the number of fragments
458 * in the skb and whether it has any payload in its main body. This maps the
459 * length of the gather list represented by an skb into the # of necessary WRs.
460 *
461 * The max. length of an skb is controlled by the max pdu size which is ~16K.
462 * Also, assume the min. fragment length is the sector size (512), then add
463 * extra fragment counts for iscsi bhs and payload padding.
464 */
465#define SKB_WR_LIST_SIZE (16384/512 + 3)
466static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
467
468static void s3_init_wr_tab(unsigned int wr_len)
469{
470 int i;
471
472 if (skb_wrs[1]) /* already initialized */
473 return;
474
475 for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
476 int sgl_len = (3 * i) / 2 + (i & 1);
477
478 sgl_len += 3;
479 skb_wrs[i] = (sgl_len <= wr_len
480 ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
481 }
482
483 wrlen = wr_len * 8;
484}
485
486static inline void reset_wr_list(struct s3_conn *c3cn)
487{
488 c3cn->wr_pending_head = NULL;
489}
490
491/*
492 * Add a WR to a connections's list of pending WRs. This is a singly-linked
493 * list of sk_buffs operating as a FIFO. The head is kept in wr_pending_head
494 * and the tail in wr_pending_tail.
495 */
496static inline void enqueue_wr(struct s3_conn *c3cn,
497 struct sk_buff *skb)
498{
499 skb_wr_data(skb) = NULL;
500
501 /*
502 * We want to take an extra reference since both us and the driver
503 * need to free the packet before it's really freed. We know there's
504 * just one user currently so we use atomic_set rather than skb_get
505 * to avoid the atomic op.
506 */
507 atomic_set(&skb->users, 2);
508
509 if (!c3cn->wr_pending_head)
510 c3cn->wr_pending_head = skb;
511 else
512 skb_wr_data(skb) = skb;
513 c3cn->wr_pending_tail = skb;
514}
515
516static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
517{
518 return c3cn->wr_pending_head;
519}
520
521static inline void free_wr_skb(struct sk_buff *skb)
522{
523 kfree_skb(skb);
524}
525
526static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn)
527{
528 struct sk_buff *skb = c3cn->wr_pending_head;
529
530 if (likely(skb)) {
531 /* Don't bother clearing the tail */
532 c3cn->wr_pending_head = skb_wr_data(skb);
533 skb_wr_data(skb) = NULL;
534 }
535 return skb;
536}
537
538static void purge_wr_queue(struct s3_conn *c3cn)
539{
540 struct sk_buff *skb;
541 while ((skb = dequeue_wr(c3cn)) != NULL)
542 free_wr_skb(skb);
543}
544
545static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb,
546 int len)
547{
548 struct tx_data_wr *req;
549
550 skb_reset_transport_header(skb);
551 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
552 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
553 req->wr_lo = htonl(V_WR_TID(c3cn->tid));
554 req->sndseq = htonl(c3cn->snd_nxt);
555 /* len includes the length of any HW ULP additions */
556 req->len = htonl(len);
557 req->param = htonl(V_TX_PORT(c3cn->l2t->smt_idx));
558 /* V_TX_ULP_SUBMODE sets both the mode and submode */
559 req->flags = htonl(V_TX_ULP_SUBMODE(skb_ulp_mode(skb)) |
560 V_TX_SHOVE((skb_peek(&c3cn->write_queue) ? 0 : 1)));
561
562 if (!c3cn_flag(c3cn, C3CN_TX_DATA_SENT)) {
563 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
564 V_TX_CPU_IDX(c3cn->qset));
565 /* Sendbuffer is in units of 32KB. */
566 req->param |= htonl(V_TX_SNDBUF(cxgb3_snd_win >> 15));
567 c3cn_set_flag(c3cn, C3CN_TX_DATA_SENT);
568 }
569}
570
571/**
572 * c3cn_push_tx_frames -- start transmit
573 * @c3cn: the offloaded connection
574 * @req_completion: request wr_ack or not
575 *
576 * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
577 * connection's send queue and sends them on to T3. Must be called with the
578 * connection's lock held. Returns the amount of send buffer space that was
579 * freed as a result of sending queued data to T3.
580 */
581static void arp_failure_discard(struct t3cdev *cdev, struct sk_buff *skb)
582{
583 kfree_skb(skb);
584}
585
586static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
587{
588 int total_size = 0;
589 struct sk_buff *skb;
590 struct t3cdev *cdev;
591 struct cxgb3i_sdev_data *cdata;
592
593 if (unlikely(c3cn->state == C3CN_STATE_CONNECTING ||
594 c3cn->state == C3CN_STATE_CLOSE_WAIT_1 ||
595 c3cn->state == C3CN_STATE_ABORTING)) {
596 c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
597 c3cn, c3cn->state);
598 return 0;
599 }
600
601 cdev = c3cn->cdev;
602 cdata = CXGB3_SDEV_DATA(cdev);
603
604 while (c3cn->wr_avail
605 && (skb = skb_peek(&c3cn->write_queue)) != NULL) {
606 int len = skb->len; /* length before skb_push */
607 int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
608 int wrs_needed = skb_wrs[frags];
609
610 if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
611 wrs_needed = 1;
612
613 WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
614
615 if (c3cn->wr_avail < wrs_needed) {
616 c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
617 "wr %d < %u.\n",
618 c3cn, skb->len, skb->datalen, frags,
619 wrs_needed, c3cn->wr_avail);
620 break;
621 }
622
623 __skb_unlink(skb, &c3cn->write_queue);
624 skb->priority = CPL_PRIORITY_DATA;
625 skb->csum = wrs_needed; /* remember this until the WR_ACK */
626 c3cn->wr_avail -= wrs_needed;
627 c3cn->wr_unacked += wrs_needed;
628 enqueue_wr(c3cn, skb);
629
630 if (likely(CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_NEED_HDR)) {
631 len += ulp_extra_len(skb);
632 make_tx_data_wr(c3cn, skb, len);
633 c3cn->snd_nxt += len;
634 if ((req_completion
635 && c3cn->wr_unacked == wrs_needed)
636 || (CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_COMPL)
637 || c3cn->wr_unacked >= c3cn->wr_max / 2) {
638 struct work_request_hdr *wr = cplhdr(skb);
639
640 wr->wr_hi |= htonl(F_WR_COMPL);
641 c3cn->wr_unacked = 0;
642 }
643 CXGB3_SKB_CB(skb)->flags &= ~C3CB_FLAG_NEED_HDR;
644 }
645
646 total_size += skb->truesize;
647 set_arp_failure_handler(skb, arp_failure_discard);
648 l2t_send(cdev, skb, c3cn->l2t);
649 }
650 return total_size;
651}
652
653/*
654 * process_cpl_msg: -> host
655 * Top-level CPL message processing used by most CPL messages that
656 * pertain to connections.
657 */
658static inline void process_cpl_msg(void (*fn)(struct s3_conn *,
659 struct sk_buff *),
660 struct s3_conn *c3cn,
661 struct sk_buff *skb)
662{
663 spin_lock_bh(&c3cn->lock);
664 fn(c3cn, skb);
665 spin_unlock_bh(&c3cn->lock);
666}
667
668/*
669 * process_cpl_msg_ref: -> host
670 * Similar to process_cpl_msg() but takes an extra connection reference around
671 * the call to the handler. Should be used if the handler may drop a
672 * connection reference.
673 */
674static inline void process_cpl_msg_ref(void (*fn) (struct s3_conn *,
675 struct sk_buff *),
676 struct s3_conn *c3cn,
677 struct sk_buff *skb)
678{
679 c3cn_hold(c3cn);
680 process_cpl_msg(fn, c3cn, skb);
681 c3cn_put(c3cn);
682}
683
684/*
685 * Process a CPL_ACT_ESTABLISH message: -> host
686 * Updates connection state from an active establish CPL message. Runs with
687 * the connection lock held.
688 */
689
690static inline void s3_free_atid(struct t3cdev *cdev, unsigned int tid)
691{
692 struct s3_conn *c3cn = cxgb3_free_atid(cdev, tid);
693 if (c3cn)
694 c3cn_put(c3cn);
695}
696
697static void c3cn_established(struct s3_conn *c3cn, u32 snd_isn,
698 unsigned int opt)
699{
700 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
701
702 c3cn->write_seq = c3cn->snd_nxt = c3cn->snd_una = snd_isn;
703
704 /*
705 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
706 * pass through opt0.
707 */
708 if (cxgb3_rcv_win > (M_RCV_BUFSIZ << 10))
709 c3cn->rcv_wup -= cxgb3_rcv_win - (M_RCV_BUFSIZ << 10);
710
711 dst_confirm(c3cn->dst_cache);
712
713 smp_mb();
714
715 c3cn_set_state(c3cn, C3CN_STATE_ESTABLISHED);
716}
717
718static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb)
719{
720 struct cpl_act_establish *req = cplhdr(skb);
721 u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
722
723 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
724 c3cn, c3cn->state, c3cn->flags);
725
726 if (unlikely(c3cn->state != C3CN_STATE_CONNECTING))
727 cxgb3i_log_error("TID %u expected SYN_SENT, got EST., s %u\n",
728 c3cn->tid, c3cn->state);
729
730 c3cn->copied_seq = c3cn->rcv_wup = c3cn->rcv_nxt = rcv_isn;
731 c3cn_established(c3cn, ntohl(req->snd_isn), ntohs(req->tcp_opt));
732
733 __kfree_skb(skb);
734
735 if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED)))
736 /* upper layer has requested closing */
737 send_abort_req(c3cn);
738 else if (c3cn_push_tx_frames(c3cn, 1))
739 cxgb3i_conn_tx_open(c3cn);
740}
741
742static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
743 void *ctx)
744{
745 struct cpl_act_establish *req = cplhdr(skb);
746 unsigned int tid = GET_TID(req);
747 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
748 struct s3_conn *c3cn = ctx;
749 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
750
751 c3cn_conn_debug("rcv, tid 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
752 tid, c3cn, c3cn->state, c3cn->flags);
753
754 c3cn->tid = tid;
755 c3cn_hold(c3cn);
756 cxgb3_insert_tid(cdata->cdev, cdata->client, c3cn, tid);
757 s3_free_atid(cdev, atid);
758
759 c3cn->qset = G_QNUM(ntohl(skb->csum));
760
761 process_cpl_msg(process_act_establish, c3cn, skb);
762 return 0;
763}
764
765/*
766 * Process a CPL_ACT_OPEN_RPL message: -> host
767 * Handle active open failures.
768 */
769static int act_open_rpl_status_to_errno(int status)
770{
771 switch (status) {
772 case CPL_ERR_CONN_RESET:
773 return ECONNREFUSED;
774 case CPL_ERR_ARP_MISS:
775 return EHOSTUNREACH;
776 case CPL_ERR_CONN_TIMEDOUT:
777 return ETIMEDOUT;
778 case CPL_ERR_TCAM_FULL:
779 return ENOMEM;
780 case CPL_ERR_CONN_EXIST:
781 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
782 return EADDRINUSE;
783 default:
784 return EIO;
785 }
786}
787
788static void act_open_retry_timer(unsigned long data)
789{
790 struct sk_buff *skb;
791 struct s3_conn *c3cn = (struct s3_conn *)data;
792
793 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
794
795 spin_lock_bh(&c3cn->lock);
796 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
797 if (!skb)
798 fail_act_open(c3cn, ENOMEM);
799 else {
800 skb->sk = (struct sock *)c3cn;
801 set_arp_failure_handler(skb, act_open_req_arp_failure);
802 make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
803 l2t_send(c3cn->cdev, skb, c3cn->l2t);
804 }
805 spin_unlock_bh(&c3cn->lock);
806 c3cn_put(c3cn);
807}
808
809static void process_act_open_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
810{
811 struct cpl_act_open_rpl *rpl = cplhdr(skb);
812
813 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
814 c3cn, c3cn->state, c3cn->flags);
815
816 if (rpl->status == CPL_ERR_CONN_EXIST &&
817 c3cn->retry_timer.function != act_open_retry_timer) {
818 c3cn->retry_timer.function = act_open_retry_timer;
819 if (!mod_timer(&c3cn->retry_timer, jiffies + HZ / 2))
820 c3cn_hold(c3cn);
821 } else
822 fail_act_open(c3cn, act_open_rpl_status_to_errno(rpl->status));
823 __kfree_skb(skb);
824}
825
826static int do_act_open_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
827{
828 struct s3_conn *c3cn = ctx;
829 struct cpl_act_open_rpl *rpl = cplhdr(skb);
830
831 c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
832 rpl->status, c3cn, c3cn->state, c3cn->flags);
833
834 if (rpl->status != CPL_ERR_TCAM_FULL &&
835 rpl->status != CPL_ERR_CONN_EXIST &&
836 rpl->status != CPL_ERR_ARP_MISS)
837 cxgb3_queue_tid_release(cdev, GET_TID(rpl));
838
839 process_cpl_msg_ref(process_act_open_rpl, c3cn, skb);
840 return 0;
841}
842
843/*
844 * Process PEER_CLOSE CPL messages: -> host
845 * Handle peer FIN.
846 */
847static void process_peer_close(struct s3_conn *c3cn, struct sk_buff *skb)
848{
849 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
850 c3cn, c3cn->state, c3cn->flags);
851
852 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
853 goto out;
854
855 switch (c3cn->state) {
856 case C3CN_STATE_ESTABLISHED:
857 c3cn_set_state(c3cn, C3CN_STATE_PASSIVE_CLOSE);
858 break;
859 case C3CN_STATE_ACTIVE_CLOSE:
860 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
861 break;
862 case C3CN_STATE_CLOSE_WAIT_1:
863 c3cn_closed(c3cn);
864 break;
865 case C3CN_STATE_ABORTING:
866 break;
867 default:
868 cxgb3i_log_error("%s: peer close, TID %u in bad state %u\n",
869 c3cn->cdev->name, c3cn->tid, c3cn->state);
870 }
871
872 cxgb3i_conn_closing(c3cn);
873out:
874 __kfree_skb(skb);
875}
876
877static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
878{
879 struct s3_conn *c3cn = ctx;
880
881 c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
882 c3cn, c3cn->state, c3cn->flags);
883 process_cpl_msg_ref(process_peer_close, c3cn, skb);
884 return 0;
885}
886
887/*
888 * Process CLOSE_CONN_RPL CPL message: -> host
889 * Process a peer ACK to our FIN.
890 */
891static void process_close_con_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
892{
893 struct cpl_close_con_rpl *rpl = cplhdr(skb);
894
895 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
896 c3cn, c3cn->state, c3cn->flags);
897
898 c3cn->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
899
900 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
901 goto out;
902
903 switch (c3cn->state) {
904 case C3CN_STATE_ACTIVE_CLOSE:
905 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_1);
906 break;
907 case C3CN_STATE_CLOSE_WAIT_1:
908 case C3CN_STATE_CLOSE_WAIT_2:
909 c3cn_closed(c3cn);
910 break;
911 case C3CN_STATE_ABORTING:
912 break;
913 default:
914 cxgb3i_log_error("%s: close_rpl, TID %u in bad state %u\n",
915 c3cn->cdev->name, c3cn->tid, c3cn->state);
916 }
917
918out:
919 kfree_skb(skb);
920}
921
922static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
923 void *ctx)
924{
925 struct s3_conn *c3cn = ctx;
926
927 c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
928 c3cn, c3cn->state, c3cn->flags);
929
930 process_cpl_msg_ref(process_close_con_rpl, c3cn, skb);
931 return 0;
932}
933
934/*
935 * Process ABORT_REQ_RSS CPL message: -> host
936 * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
937 * request except that we need to reply to it.
938 */
939
940static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
941 int *need_rst)
942{
943 switch (abort_reason) {
944 case CPL_ERR_BAD_SYN: /* fall through */
945 case CPL_ERR_CONN_RESET:
946 return c3cn->state > C3CN_STATE_ESTABLISHED ?
947 EPIPE : ECONNRESET;
948 case CPL_ERR_XMIT_TIMEDOUT:
949 case CPL_ERR_PERSIST_TIMEDOUT:
950 case CPL_ERR_FINWAIT2_TIMEDOUT:
951 case CPL_ERR_KEEPALIVE_TIMEDOUT:
952 return ETIMEDOUT;
953 default:
954 return EIO;
955 }
956}
957
958static void process_abort_req(struct s3_conn *c3cn, struct sk_buff *skb)
959{
960 int rst_status = CPL_ABORT_NO_RST;
961 const struct cpl_abort_req_rss *req = cplhdr(skb);
962
963 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
964 c3cn, c3cn->state, c3cn->flags);
965
966 if (!c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) {
967 c3cn_set_flag(c3cn, C3CN_ABORT_REQ_RCVD);
968 c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
969 __kfree_skb(skb);
970 return;
971 }
972
973 c3cn_clear_flag(c3cn, C3CN_ABORT_REQ_RCVD);
974 send_abort_rpl(c3cn, rst_status);
975
976 if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
977 c3cn->err =
978 abort_status_to_errno(c3cn, req->status, &rst_status);
979 c3cn_closed(c3cn);
980 }
981}
982
983static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
984{
985 const struct cpl_abort_req_rss *req = cplhdr(skb);
986 struct s3_conn *c3cn = ctx;
987
988 c3cn_conn_debug("rcv, c3cn 0x%p, s 0x%x, f 0x%lx.\n",
989 c3cn, c3cn->state, c3cn->flags);
990
991 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
992 req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
993 __kfree_skb(skb);
994 return 0;
995 }
996
997 process_cpl_msg_ref(process_abort_req, c3cn, skb);
998 return 0;
999}
1000
1001/*
1002 * Process ABORT_RPL_RSS CPL message: -> host
1003 * Process abort replies. We only process these messages if we anticipate
1004 * them as the coordination between SW and HW in this area is somewhat lacking
1005 * and sometimes we get ABORT_RPLs after we are done with the connection that
1006 * originated the ABORT_REQ.
1007 */
1008static void process_abort_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
1009{
1010 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1011 c3cn, c3cn->state, c3cn->flags);
1012
1013 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
1014 if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_RCVD))
1015 c3cn_set_flag(c3cn, C3CN_ABORT_RPL_RCVD);
1016 else {
1017 c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_RCVD);
1018 c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_PENDING);
1019 if (c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD))
1020 cxgb3i_log_error("%s tid %u, ABORT_RPL_RSS\n",
1021 c3cn->cdev->name, c3cn->tid);
1022 c3cn_closed(c3cn);
1023 }
1024 }
1025 __kfree_skb(skb);
1026}
1027
1028static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1029{
1030 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1031 struct s3_conn *c3cn = ctx;
1032
1033 c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, 0x%lx.\n",
1034 rpl->status, c3cn, c3cn ? c3cn->state : 0,
1035 c3cn ? c3cn->flags : 0UL);
1036
1037 /*
1038 * Ignore replies to post-close aborts indicating that the abort was
1039 * requested too late. These connections are terminated when we get
1040 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
1041 * arrives the TID is either no longer used or it has been recycled.
1042 */
1043 if (rpl->status == CPL_ERR_ABORT_FAILED)
1044 goto discard;
1045
1046 /*
1047 * Sometimes we've already closed the connection, e.g., a post-close
1048 * abort races with ABORT_REQ_RSS, the latter frees the connection
1049 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
1050 * but FW turns the ABORT_REQ into a regular one and so we get
1051 * ABORT_RPL_RSS with status 0 and no connection.
1052 */
1053 if (!c3cn)
1054 goto discard;
1055
1056 process_cpl_msg_ref(process_abort_rpl, c3cn, skb);
1057 return 0;
1058
1059discard:
1060 __kfree_skb(skb);
1061 return 0;
1062}
1063
1064/*
1065 * Process RX_ISCSI_HDR CPL message: -> host
1066 * Handle received PDUs, the payload could be DDP'ed. If not, the payload
1067 * follow after the bhs.
1068 */
1069static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
1070{
1071 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
1072 struct cpl_iscsi_hdr_norss data_cpl;
1073 struct cpl_rx_data_ddp_norss ddp_cpl;
1074 unsigned int hdr_len, data_len, status;
1075 unsigned int len;
1076 int err;
1077
1078 if (unlikely(c3cn->state >= C3CN_STATE_PASSIVE_CLOSE)) {
1079 if (c3cn->state != C3CN_STATE_ABORTING)
1080 send_abort_req(c3cn);
1081 __kfree_skb(skb);
1082 return;
1083 }
1084
1085 CXGB3_SKB_CB(skb)->seq = ntohl(hdr_cpl->seq);
1086 CXGB3_SKB_CB(skb)->flags = 0;
1087
1088 skb_reset_transport_header(skb);
1089 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
1090
1091 len = hdr_len = ntohs(hdr_cpl->len);
1092 /* msg coalesce is off or not enough data received */
1093 if (skb->len <= hdr_len) {
1094 cxgb3i_log_error("%s: TID %u, ISCSI_HDR, skb len %u < %u.\n",
1095 c3cn->cdev->name, c3cn->tid,
1096 skb->len, hdr_len);
1097 goto abort_conn;
1098 }
1099
1100 err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
1101 sizeof(ddp_cpl));
1102 if (err < 0)
1103 goto abort_conn;
1104
1105 skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY;
1106 skb_ulp_pdulen(skb) = ntohs(ddp_cpl.len);
1107 skb_ulp_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
1108 status = ntohl(ddp_cpl.ddp_status);
1109
1110 c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
1111 skb, skb->len, skb_ulp_pdulen(skb), status);
1112
1113 if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
1114 skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
1115 if (status & (1 << RX_DDP_STATUS_DCRC_SHIFT))
1116 skb_ulp_mode(skb) |= ULP2_FLAG_DCRC_ERROR;
1117 if (status & (1 << RX_DDP_STATUS_PAD_SHIFT))
1118 skb_ulp_mode(skb) |= ULP2_FLAG_PAD_ERROR;
1119
1120 if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
1121 err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
1122 if (err < 0)
1123 goto abort_conn;
1124 data_len = ntohs(data_cpl.len);
1125 len += sizeof(data_cpl) + data_len;
1126 } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT))
1127 skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
1128
1129 c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_ulp_pdulen(skb);
1130 __pskb_trim(skb, len);
1131 __skb_queue_tail(&c3cn->receive_queue, skb);
1132 cxgb3i_conn_pdu_ready(c3cn);
1133
1134 return;
1135
1136abort_conn:
1137 send_abort_req(c3cn);
1138 __kfree_skb(skb);
1139}
1140
1141static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
1142{
1143 struct s3_conn *c3cn = ctx;
1144
1145 process_cpl_msg(process_rx_iscsi_hdr, c3cn, skb);
1146 return 0;
1147}
1148
1149/*
1150 * Process TX_DATA_ACK CPL messages: -> host
1151 * Process an acknowledgment of WR completion. Advance snd_una and send the
1152 * next batch of work requests from the write queue.
1153 */
1154static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
1155{
1156 struct cpl_wr_ack *hdr = cplhdr(skb);
1157 unsigned int credits = ntohs(hdr->credits);
1158 u32 snd_una = ntohl(hdr->snd_una);
1159
1160 c3cn->wr_avail += credits;
1161 if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail)
1162 c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail;
1163
1164 while (credits) {
1165 struct sk_buff *p = peek_wr(c3cn);
1166
1167 if (unlikely(!p)) {
1168 cxgb3i_log_error("%u WR_ACK credits for TID %u with "
1169 "nothing pending, state %u\n",
1170 credits, c3cn->tid, c3cn->state);
1171 break;
1172 }
1173 if (unlikely(credits < p->csum)) {
1174 p->csum -= credits;
1175 break;
1176 } else {
1177 dequeue_wr(c3cn);
1178 credits -= p->csum;
1179 free_wr_skb(p);
1180 }
1181 }
1182
1183 if (unlikely(before(snd_una, c3cn->snd_una)))
1184 goto out_free;
1185
1186 if (c3cn->snd_una != snd_una) {
1187 c3cn->snd_una = snd_una;
1188 dst_confirm(c3cn->dst_cache);
1189 }
1190
1191 if (skb_queue_len(&c3cn->write_queue) && c3cn_push_tx_frames(c3cn, 0))
1192 cxgb3i_conn_tx_open(c3cn);
1193out_free:
1194 __kfree_skb(skb);
1195}
1196
1197static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1198{
1199 struct s3_conn *c3cn = ctx;
1200
1201 process_cpl_msg(process_wr_ack, c3cn, skb);
1202 return 0;
1203}
1204
1205/*
1206 * for each connection, pre-allocate skbs needed for close/abort requests. So
1207 * that we can service the request right away.
1208 */
1209static void c3cn_free_cpl_skbs(struct s3_conn *c3cn)
1210{
1211 if (c3cn->cpl_close)
1212 kfree_skb(c3cn->cpl_close);
1213 if (c3cn->cpl_abort_req)
1214 kfree_skb(c3cn->cpl_abort_req);
1215 if (c3cn->cpl_abort_rpl)
1216 kfree_skb(c3cn->cpl_abort_rpl);
1217}
1218
1219static int c3cn_alloc_cpl_skbs(struct s3_conn *c3cn)
1220{
1221 c3cn->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req),
1222 GFP_KERNEL);
1223 if (!c3cn->cpl_close)
1224 return -ENOMEM;
1225 skb_put(c3cn->cpl_close, sizeof(struct cpl_close_con_req));
1226
1227 c3cn->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req),
1228 GFP_KERNEL);
1229 if (!c3cn->cpl_abort_req)
1230 goto free_cpl_skbs;
1231 skb_put(c3cn->cpl_abort_req, sizeof(struct cpl_abort_req));
1232
1233 c3cn->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl),
1234 GFP_KERNEL);
1235 if (!c3cn->cpl_abort_rpl)
1236 goto free_cpl_skbs;
1237 skb_put(c3cn->cpl_abort_rpl, sizeof(struct cpl_abort_rpl));
1238
1239 return 0;
1240
1241free_cpl_skbs:
1242 c3cn_free_cpl_skbs(c3cn);
1243 return -ENOMEM;
1244}
1245
1246/**
1247 * c3cn_release_offload_resources - release offload resource
1248 * @c3cn: the offloaded iscsi tcp connection.
1249 * Release resources held by an offload connection (TID, L2T entry, etc.)
1250 */
1251static void c3cn_release_offload_resources(struct s3_conn *c3cn)
1252{
1253 struct t3cdev *cdev = c3cn->cdev;
1254 unsigned int tid = c3cn->tid;
1255
1256 if (!cdev)
1257 return;
1258
1259 c3cn->qset = 0;
1260
1261 c3cn_free_cpl_skbs(c3cn);
1262
1263 if (c3cn->wr_avail != c3cn->wr_max) {
1264 purge_wr_queue(c3cn);
1265 reset_wr_list(c3cn);
1266 }
1267
1268 if (c3cn->l2t) {
1269 l2t_release(L2DATA(cdev), c3cn->l2t);
1270 c3cn->l2t = NULL;
1271 }
1272
1273 if (c3cn->state == C3CN_STATE_CONNECTING) /* we have ATID */
1274 s3_free_atid(cdev, tid);
1275 else { /* we have TID */
1276 cxgb3_remove_tid(cdev, (void *)c3cn, tid);
1277 c3cn_put(c3cn);
1278 }
1279
1280 c3cn->cdev = NULL;
1281}
1282
1283/**
1284 * cxgb3i_c3cn_create - allocate and initialize an s3_conn structure
1285 * returns the s3_conn structure allocated.
1286 */
1287struct s3_conn *cxgb3i_c3cn_create(void)
1288{
1289 struct s3_conn *c3cn;
1290
1291 c3cn = kzalloc(sizeof(*c3cn), GFP_KERNEL);
1292 if (!c3cn)
1293 return NULL;
1294
1295 /* pre-allocate close/abort cpl, so we don't need to wait for memory
1296 when close/abort is requested. */
1297 if (c3cn_alloc_cpl_skbs(c3cn) < 0)
1298 goto free_c3cn;
1299
1300 c3cn_conn_debug("alloc c3cn 0x%p.\n", c3cn);
1301
1302 c3cn->flags = 0;
1303 spin_lock_init(&c3cn->lock);
1304 atomic_set(&c3cn->refcnt, 1);
1305 skb_queue_head_init(&c3cn->receive_queue);
1306 skb_queue_head_init(&c3cn->write_queue);
1307 setup_timer(&c3cn->retry_timer, NULL, (unsigned long)c3cn);
1308 rwlock_init(&c3cn->callback_lock);
1309
1310 return c3cn;
1311
1312free_c3cn:
1313 kfree(c3cn);
1314 return NULL;
1315}
1316
1317static void c3cn_active_close(struct s3_conn *c3cn)
1318{
1319 int data_lost;
1320 int close_req = 0;
1321
1322 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1323 c3cn, c3cn->state, c3cn->flags);
1324
1325 dst_confirm(c3cn->dst_cache);
1326
1327 c3cn_hold(c3cn);
1328 spin_lock_bh(&c3cn->lock);
1329
1330 data_lost = skb_queue_len(&c3cn->receive_queue);
1331 __skb_queue_purge(&c3cn->receive_queue);
1332
1333 switch (c3cn->state) {
1334 case C3CN_STATE_CLOSED:
1335 case C3CN_STATE_ACTIVE_CLOSE:
1336 case C3CN_STATE_CLOSE_WAIT_1:
1337 case C3CN_STATE_CLOSE_WAIT_2:
1338 case C3CN_STATE_ABORTING:
1339 /* nothing need to be done */
1340 break;
1341 case C3CN_STATE_CONNECTING:
1342 /* defer until cpl_act_open_rpl or cpl_act_establish */
1343 c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
1344 break;
1345 case C3CN_STATE_ESTABLISHED:
1346 close_req = 1;
1347 c3cn_set_state(c3cn, C3CN_STATE_ACTIVE_CLOSE);
1348 break;
1349 case C3CN_STATE_PASSIVE_CLOSE:
1350 close_req = 1;
1351 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
1352 break;
1353 }
1354
1355 if (close_req) {
1356 if (data_lost)
1357 /* Unread data was tossed, zap the connection. */
1358 send_abort_req(c3cn);
1359 else
1360 send_close_req(c3cn);
1361 }
1362
1363 spin_unlock_bh(&c3cn->lock);
1364 c3cn_put(c3cn);
1365}
1366
1367/**
1368 * cxgb3i_c3cn_release - close and release an iscsi tcp connection and any
1369 * resource held
1370 * @c3cn: the iscsi tcp connection
1371 */
1372void cxgb3i_c3cn_release(struct s3_conn *c3cn)
1373{
1374 c3cn_conn_debug("c3cn 0x%p, s %u, f 0x%lx.\n",
1375 c3cn, c3cn->state, c3cn->flags);
1376 if (likely(c3cn->state != C3CN_STATE_CONNECTING))
1377 c3cn_active_close(c3cn);
1378 else
1379 c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
1380 c3cn_put(c3cn);
1381}
1382
1383static int is_cxgb3_dev(struct net_device *dev)
1384{
1385 struct cxgb3i_sdev_data *cdata;
1386
1387 write_lock(&cdata_rwlock);
1388 list_for_each_entry(cdata, &cdata_list, list) {
1389 struct adap_ports *ports = &cdata->ports;
1390 int i;
1391
1392 for (i = 0; i < ports->nports; i++)
1393 if (dev == ports->lldevs[i]) {
1394 write_unlock(&cdata_rwlock);
1395 return 1;
1396 }
1397 }
1398 write_unlock(&cdata_rwlock);
1399 return 0;
1400}
1401
1402/**
1403 * cxgb3_egress_dev - return the cxgb3 egress device
1404 * @root_dev: the root device anchoring the search
1405 * @c3cn: the connection used to determine egress port in bonding mode
1406 * @context: in bonding mode, indicates a connection set up or failover
1407 *
1408 * Return egress device or NULL if the egress device isn't one of our ports.
1409 */
1410static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
1411 struct s3_conn *c3cn,
1412 int context)
1413{
1414 while (root_dev) {
1415 if (root_dev->priv_flags & IFF_802_1Q_VLAN)
1416 root_dev = vlan_dev_real_dev(root_dev);
1417 else if (is_cxgb3_dev(root_dev))
1418 return root_dev;
1419 else
1420 return NULL;
1421 }
1422 return NULL;
1423}
1424
1425static struct rtable *find_route(__be32 saddr, __be32 daddr,
1426 __be16 sport, __be16 dport)
1427{
1428 struct rtable *rt;
1429 struct flowi fl = {
1430 .oif = 0,
1431 .nl_u = {
1432 .ip4_u = {
1433 .daddr = daddr,
1434 .saddr = saddr,
1435 .tos = 0 } },
1436 .proto = IPPROTO_TCP,
1437 .uli_u = {
1438 .ports = {
1439 .sport = sport,
1440 .dport = dport } } };
1441
1442 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
1443 return NULL;
1444 return rt;
1445}
1446
1447/*
1448 * Assign offload parameters to some connection fields.
1449 */
1450static void init_offload_conn(struct s3_conn *c3cn,
1451 struct t3cdev *cdev,
1452 struct dst_entry *dst)
1453{
1454 BUG_ON(c3cn->cdev != cdev);
1455 c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs;
1456 c3cn->wr_unacked = 0;
1457 c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst));
1458
1459 reset_wr_list(c3cn);
1460}
1461
1462static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev)
1463{
1464 struct cxgb3i_sdev_data *cdata = NDEV2CDATA(dev);
1465 struct t3cdev *cdev = cdata->cdev;
1466 struct dst_entry *dst = c3cn->dst_cache;
1467 struct sk_buff *skb;
1468
1469 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1470 c3cn, c3cn->state, c3cn->flags);
1471 /*
1472 * Initialize connection data. Note that the flags and ULP mode are
1473 * initialized higher up ...
1474 */
1475 c3cn->dev = dev;
1476 c3cn->cdev = cdev;
1477 c3cn->tid = cxgb3_alloc_atid(cdev, cdata->client, c3cn);
1478 if (c3cn->tid < 0)
1479 goto out_err;
1480
1481 c3cn->qset = 0;
1482 c3cn->l2t = t3_l2t_get(cdev, dst->neighbour, dev);
1483 if (!c3cn->l2t)
1484 goto free_tid;
1485
1486 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL);
1487 if (!skb)
1488 goto free_l2t;
1489
1490 skb->sk = (struct sock *)c3cn;
1491 set_arp_failure_handler(skb, act_open_req_arp_failure);
1492
1493 c3cn_hold(c3cn);
1494
1495 init_offload_conn(c3cn, cdev, dst);
1496 c3cn->err = 0;
1497
1498 make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
1499 l2t_send(cdev, skb, c3cn->l2t);
1500 return 0;
1501
1502free_l2t:
1503 l2t_release(L2DATA(cdev), c3cn->l2t);
1504free_tid:
1505 s3_free_atid(cdev, c3cn->tid);
1506 c3cn->tid = 0;
1507out_err:
1508 return -1;
1509}
1510
1511
1512/**
1513 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
1514 * @c3cn: the iscsi tcp connection
1515 * @usin: destination address
1516 *
1517 * return 0 if active open request is sent, < 0 otherwise.
1518 */
1519int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin)
1520{
1521 struct rtable *rt;
1522 struct net_device *dev;
1523 struct cxgb3i_sdev_data *cdata;
1524 struct t3cdev *cdev;
1525 __be32 sipv4;
1526 int err;
1527
1528 if (usin->sin_family != AF_INET)
1529 return -EAFNOSUPPORT;
1530
1531 c3cn->daddr.sin_port = usin->sin_port;
1532 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1533
1534 rt = find_route(c3cn->saddr.sin_addr.s_addr,
1535 c3cn->daddr.sin_addr.s_addr,
1536 c3cn->saddr.sin_port,
1537 c3cn->daddr.sin_port);
1538 if (rt == NULL) {
1539 c3cn_conn_debug("NO route to 0x%x, port %u.\n",
1540 c3cn->daddr.sin_addr.s_addr,
1541 ntohs(c3cn->daddr.sin_port));
1542 return -ENETUNREACH;
1543 }
1544
1545 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
1546 c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n",
1547 c3cn->daddr.sin_addr.s_addr,
1548 ntohs(c3cn->daddr.sin_port));
1549 ip_rt_put(rt);
1550 return -ENETUNREACH;
1551 }
1552
1553 if (!c3cn->saddr.sin_addr.s_addr)
1554 c3cn->saddr.sin_addr.s_addr = rt->rt_src;
1555
1556 /* now commit destination to connection */
1557 c3cn->dst_cache = &rt->u.dst;
1558
1559 /* try to establish an offloaded connection */
1560 dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0);
1561 if (dev == NULL) {
1562 c3cn_conn_debug("c3cn 0x%p, egress dev NULL.\n", c3cn);
1563 return -ENETUNREACH;
1564 }
1565 cdata = NDEV2CDATA(dev);
1566 cdev = cdata->cdev;
1567
1568 /* get a source port if one hasn't been provided */
1569 err = c3cn_get_port(c3cn, cdata);
1570 if (err)
1571 return err;
1572
1573 c3cn_conn_debug("c3cn 0x%p get port %u.\n",
1574 c3cn, ntohs(c3cn->saddr.sin_port));
1575
1576 sipv4 = cxgb3i_get_private_ipv4addr(dev);
1577 if (!sipv4) {
1578 c3cn_conn_debug("c3cn 0x%p, iscsi ip not configured.\n", c3cn);
1579 sipv4 = c3cn->saddr.sin_addr.s_addr;
1580 cxgb3i_set_private_ipv4addr(dev, sipv4);
1581 } else
1582 c3cn->saddr.sin_addr.s_addr = sipv4;
1583
1584 c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n",
1585 c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr),
1586 ntohs(c3cn->saddr.sin_port),
1587 NIPQUAD(c3cn->daddr.sin_addr.s_addr),
1588 ntohs(c3cn->daddr.sin_port));
1589
1590 c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);
1591 if (!initiate_act_open(c3cn, dev))
1592 return 0;
1593
1594 /*
1595 * If we get here, we don't have an offload connection so simply
1596 * return a failure.
1597 */
1598 err = -ENOTSUPP;
1599
1600 /*
1601 * This trashes the connection and releases the local port,
1602 * if necessary.
1603 */
1604 c3cn_conn_debug("c3cn 0x%p -> CLOSED.\n", c3cn);
1605 c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
1606 ip_rt_put(rt);
1607 c3cn_put_port(c3cn);
1608 c3cn->daddr.sin_port = 0;
1609 return err;
1610}
1611
1612/**
1613 * cxgb3i_c3cn_rx_credits - ack received tcp data.
1614 * @c3cn: iscsi tcp connection
1615 * @copied: # of bytes processed
1616 *
1617 * Called after some received data has been read. It returns RX credits
1618 * to the HW for the amount of data processed.
1619 */
1620void cxgb3i_c3cn_rx_credits(struct s3_conn *c3cn, int copied)
1621{
1622 struct t3cdev *cdev;
1623 int must_send;
1624 u32 credits, dack = 0;
1625
1626 if (c3cn->state != C3CN_STATE_ESTABLISHED)
1627 return;
1628
1629 credits = c3cn->copied_seq - c3cn->rcv_wup;
1630 if (unlikely(!credits))
1631 return;
1632
1633 cdev = c3cn->cdev;
1634
1635 if (unlikely(cxgb3_rx_credit_thres == 0))
1636 return;
1637
1638 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
1639
1640 /*
1641 * For coalescing to work effectively ensure the receive window has
1642 * at least 16KB left.
1643 */
1644 must_send = credits + 16384 >= cxgb3_rcv_win;
1645
1646 if (must_send || credits >= cxgb3_rx_credit_thres)
1647 c3cn->rcv_wup += send_rx_credits(c3cn, credits, dack);
1648}
1649
1650/**
1651 * cxgb3i_c3cn_send_pdus - send the skbs containing iscsi pdus
1652 * @c3cn: iscsi tcp connection
1653 * @skb: skb contains the iscsi pdu
1654 *
1655 * Add a list of skbs to a connection send queue. The skbs must comply with
1656 * the max size limit of the device and have a headroom of at least
1657 * TX_HEADER_LEN bytes.
1658 * Return # of bytes queued.
1659 */
1660int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
1661{
1662 struct sk_buff *next;
1663 int err, copied = 0;
1664
1665 spin_lock_bh(&c3cn->lock);
1666
1667 if (c3cn->state != C3CN_STATE_ESTABLISHED) {
1668 c3cn_tx_debug("c3cn 0x%p, not in est. state %u.\n",
1669 c3cn, c3cn->state);
1670 err = -EAGAIN;
1671 goto out_err;
1672 }
1673
1674 err = -EPIPE;
1675 if (c3cn->err) {
1676 c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err);
1677 goto out_err;
1678 }
1679
1680 while (skb) {
1681 int frags = skb_shinfo(skb)->nr_frags +
1682 (skb->len != skb->data_len);
1683
1684 if (unlikely(skb_headroom(skb) < TX_HEADER_LEN)) {
1685 c3cn_tx_debug("c3cn 0x%p, skb head.\n", c3cn);
1686 err = -EINVAL;
1687 goto out_err;
1688 }
1689
1690 if (frags >= SKB_WR_LIST_SIZE) {
1691 cxgb3i_log_error("c3cn 0x%p, tx frags %d, len %u,%u.\n",
1692 c3cn, skb_shinfo(skb)->nr_frags,
1693 skb->len, skb->data_len);
1694 err = -EINVAL;
1695 goto out_err;
1696 }
1697
1698 next = skb->next;
1699 skb->next = NULL;
1700 skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND | C3CB_FLAG_NEED_HDR);
1701 copied += skb->len;
1702 c3cn->write_seq += skb->len + ulp_extra_len(skb);
1703 skb = next;
1704 }
1705done:
1706 if (likely(skb_queue_len(&c3cn->write_queue)))
1707 c3cn_push_tx_frames(c3cn, 1);
1708 spin_unlock_bh(&c3cn->lock);
1709 return copied;
1710
1711out_err:
1712 if (copied == 0 && err == -EPIPE)
1713 copied = c3cn->err ? c3cn->err : -EPIPE;
1714 goto done;
1715}
1716
1717static void sdev_data_cleanup(struct cxgb3i_sdev_data *cdata)
1718{
1719 struct adap_ports *ports = &cdata->ports;
1720 int i;
1721
1722 for (i = 0; i < ports->nports; i++)
1723 NDEV2CDATA(ports->lldevs[i]) = NULL;
1724 cxgb3i_free_big_mem(cdata);
1725}
1726
1727void cxgb3i_sdev_cleanup(void)
1728{
1729 struct cxgb3i_sdev_data *cdata;
1730
1731 write_lock(&cdata_rwlock);
1732 list_for_each_entry(cdata, &cdata_list, list) {
1733 list_del(&cdata->list);
1734 sdev_data_cleanup(cdata);
1735 }
1736 write_unlock(&cdata_rwlock);
1737}
1738
1739int cxgb3i_sdev_init(cxgb3_cpl_handler_func *cpl_handlers)
1740{
1741 cpl_handlers[CPL_ACT_ESTABLISH] = do_act_establish;
1742 cpl_handlers[CPL_ACT_OPEN_RPL] = do_act_open_rpl;
1743 cpl_handlers[CPL_PEER_CLOSE] = do_peer_close;
1744 cpl_handlers[CPL_ABORT_REQ_RSS] = do_abort_req;
1745 cpl_handlers[CPL_ABORT_RPL_RSS] = do_abort_rpl;
1746 cpl_handlers[CPL_CLOSE_CON_RPL] = do_close_con_rpl;
1747 cpl_handlers[CPL_TX_DMA_ACK] = do_wr_ack;
1748 cpl_handlers[CPL_ISCSI_HDR] = do_iscsi_hdr;
1749
1750 if (cxgb3_max_connect > CXGB3I_MAX_CONN)
1751 cxgb3_max_connect = CXGB3I_MAX_CONN;
1752 return 0;
1753}
1754
1755/**
1756 * cxgb3i_sdev_add - allocate and initialize resources for each adapter found
1757 * @cdev: t3cdev adapter
1758 * @client: cxgb3 driver client
1759 */
1760void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client)
1761{
1762 struct cxgb3i_sdev_data *cdata;
1763 struct ofld_page_info rx_page_info;
1764 unsigned int wr_len;
1765 int mapsize = DIV_ROUND_UP(cxgb3_max_connect,
1766 8 * sizeof(unsigned long));
1767 int i;
1768
1769 cdata = cxgb3i_alloc_big_mem(sizeof(*cdata) + mapsize, GFP_KERNEL);
1770 if (!cdata)
1771 return;
1772
1773 if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0 ||
1774 cdev->ctl(cdev, GET_PORTS, &cdata->ports) < 0 ||
1775 cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0)
1776 goto free_cdata;
1777
1778 s3_init_wr_tab(wr_len);
1779
1780 INIT_LIST_HEAD(&cdata->list);
1781 cdata->cdev = cdev;
1782 cdata->client = client;
1783
1784 for (i = 0; i < cdata->ports.nports; i++)
1785 NDEV2CDATA(cdata->ports.lldevs[i]) = cdata;
1786
1787 write_lock(&cdata_rwlock);
1788 list_add_tail(&cdata->list, &cdata_list);
1789 write_unlock(&cdata_rwlock);
1790
1791 return;
1792
1793free_cdata:
1794 cxgb3i_free_big_mem(cdata);
1795}
1796
1797/**
1798 * cxgb3i_sdev_remove - free the allocated resources for the adapter
1799 * @cdev: t3cdev adapter
1800 */
1801void cxgb3i_sdev_remove(struct t3cdev *cdev)
1802{
1803 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
1804
1805 write_lock(&cdata_rwlock);
1806 list_del(&cdata->list);
1807 write_unlock(&cdata_rwlock);
1808
1809 sdev_data_cleanup(cdata);
1810}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
new file mode 100644
index 000000000000..d23156907ffd
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -0,0 +1,231 @@
1/*
2 * cxgb3i_offload.h: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15#ifndef _CXGB3I_OFFLOAD_H
16#define _CXGB3I_OFFLOAD_H
17
18#include <linux/skbuff.h>
19#include <net/tcp.h>
20
21#include "common.h"
22#include "adapter.h"
23#include "t3cdev.h"
24#include "cxgb3_offload.h"
25
26#define cxgb3i_log_error(fmt...) printk(KERN_ERR "cxgb3i: ERR! " fmt)
27#define cxgb3i_log_warn(fmt...) printk(KERN_WARNING "cxgb3i: WARN! " fmt)
28#define cxgb3i_log_info(fmt...) printk(KERN_INFO "cxgb3i: " fmt)
29#define cxgb3i_log_debug(fmt, args...) \
30 printk(KERN_INFO "cxgb3i: %s - " fmt, __func__ , ## args)
31
32/**
33 * struct s3_conn - an iscsi tcp connection structure
34 *
35 * @dev: net device of with connection
36 * @cdev: adapter t3cdev for net device
37 * @flags: see c3cn_flags below
38 * @tid: connection id assigned by the h/w
39 * @qset: queue set used by connection
40 * @mss_idx: Maximum Segment Size table index
41 * @l2t: ARP resolution entry for offload packets
42 * @wr_max: maximum in-flight writes
43 * @wr_avail: number of writes available
44 * @wr_unacked: writes since last request for completion notification
45 * @wr_pending_head: head of pending write queue
46 * @wr_pending_tail: tail of pending write queue
47 * @cpl_close: skb for cpl_close_req
48 * @cpl_abort_req: skb for cpl_abort_req
49 * @cpl_abort_rpl: skb for cpl_abort_rpl
50 * @lock: connection status lock
51 * @refcnt: reference count on connection
52 * @state: connection state
53 * @saddr: source ip/port address
54 * @daddr: destination ip/port address
55 * @dst_cache: reference to destination route
56 * @receive_queue: received PDUs
57 * @write_queue: un-pushed pending writes
58 * @retry_timer: retry timer for various operations
59 * @err: connection error status
60 * @callback_lock: lock for opaque user context
61 * @user_data: opaque user context
62 * @rcv_nxt: next receive seq. #
63 * @copied_seq: head of yet unread data
64 * @rcv_wup: rcv_nxt on last window update sent
65 * @snd_nxt: next sequence we send
66 * @snd_una: first byte we want an ack for
67 * @write_seq: tail+1 of data held in send buffer
68 */
69struct s3_conn {
70 struct net_device *dev;
71 struct t3cdev *cdev;
72 unsigned long flags;
73 int tid;
74 int qset;
75 int mss_idx;
76 struct l2t_entry *l2t;
77 int wr_max;
78 int wr_avail;
79 int wr_unacked;
80 struct sk_buff *wr_pending_head;
81 struct sk_buff *wr_pending_tail;
82 struct sk_buff *cpl_close;
83 struct sk_buff *cpl_abort_req;
84 struct sk_buff *cpl_abort_rpl;
85 spinlock_t lock;
86 atomic_t refcnt;
87 volatile unsigned int state;
88 struct sockaddr_in saddr;
89 struct sockaddr_in daddr;
90 struct dst_entry *dst_cache;
91 struct sk_buff_head receive_queue;
92 struct sk_buff_head write_queue;
93 struct timer_list retry_timer;
94 int err;
95 rwlock_t callback_lock;
96 void *user_data;
97
98 u32 rcv_nxt;
99 u32 copied_seq;
100 u32 rcv_wup;
101 u32 snd_nxt;
102 u32 snd_una;
103 u32 write_seq;
104};
105
106/*
107 * connection state
108 */
109enum conn_states {
110 C3CN_STATE_CONNECTING = 1,
111 C3CN_STATE_ESTABLISHED,
112 C3CN_STATE_ACTIVE_CLOSE,
113 C3CN_STATE_PASSIVE_CLOSE,
114 C3CN_STATE_CLOSE_WAIT_1,
115 C3CN_STATE_CLOSE_WAIT_2,
116 C3CN_STATE_ABORTING,
117 C3CN_STATE_CLOSED,
118};
119
120static inline unsigned int c3cn_is_closing(const struct s3_conn *c3cn)
121{
122 return c3cn->state >= C3CN_STATE_ACTIVE_CLOSE;
123}
124static inline unsigned int c3cn_is_established(const struct s3_conn *c3cn)
125{
126 return c3cn->state == C3CN_STATE_ESTABLISHED;
127}
128
129/*
130 * Connection flags -- many to track some close related events.
131 */
132enum c3cn_flags {
133 C3CN_ABORT_RPL_RCVD, /* received one ABORT_RPL_RSS message */
134 C3CN_ABORT_REQ_RCVD, /* received one ABORT_REQ_RSS message */
135 C3CN_ABORT_RPL_PENDING, /* expecting an abort reply */
136 C3CN_TX_DATA_SENT, /* already sent a TX_DATA WR */
137 C3CN_ACTIVE_CLOSE_NEEDED, /* need to be closed */
138};
139
140/**
141 * cxgb3i_sdev_data - Per adapter data.
142 * Linked off of each Ethernet device port on the adapter.
143 * Also available via the t3cdev structure since we have pointers to our port
144 * net_device's there ...
145 *
146 * @list: list head to link elements
147 * @cdev: t3cdev adapter
148 * @client: CPL client pointer
149 * @ports: array of adapter ports
150 * @sport_map_next: next index into the port map
151 * @sport_map: source port map
152 */
153struct cxgb3i_sdev_data {
154 struct list_head list;
155 struct t3cdev *cdev;
156 struct cxgb3_client *client;
157 struct adap_ports ports;
158 unsigned int sport_map_next;
159 unsigned long sport_map[0];
160};
161#define NDEV2CDATA(ndev) (*(struct cxgb3i_sdev_data **)&(ndev)->ec_ptr)
162#define CXGB3_SDEV_DATA(cdev) NDEV2CDATA((cdev)->lldev)
163
164void cxgb3i_sdev_cleanup(void);
165int cxgb3i_sdev_init(cxgb3_cpl_handler_func *);
166void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
167void cxgb3i_sdev_remove(struct t3cdev *);
168
169struct s3_conn *cxgb3i_c3cn_create(void);
170int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *);
171void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
172int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
173void cxgb3i_c3cn_release(struct s3_conn *);
174
175/**
176 * cxgb3_skb_cb - control block for received pdu state and ULP mode management.
177 *
178 * @flag: see C3CB_FLAG_* below
179 * @ulp_mode: ULP mode/submode of sk_buff
180 * @seq: tcp sequence number
181 * @ddigest: pdu data digest
182 * @pdulen: recovered pdu length
183 * @wr_data: scratch area for tx wr
184 */
185struct cxgb3_skb_cb {
186 __u8 flags;
187 __u8 ulp_mode;
188 __u32 seq;
189 __u32 ddigest;
190 __u32 pdulen;
191 struct sk_buff *wr_data;
192};
193
194#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0]))
195
196#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode)
197#define skb_ulp_ddigest(skb) (CXGB3_SKB_CB(skb)->ddigest)
198#define skb_ulp_pdulen(skb) (CXGB3_SKB_CB(skb)->pdulen)
199#define skb_wr_data(skb) (CXGB3_SKB_CB(skb)->wr_data)
200
201enum c3cb_flags {
202 C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
203 C3CB_FLAG_NO_APPEND = 1 << 1, /* don't grow this skb */
204 C3CB_FLAG_COMPL = 1 << 2, /* request WR completion */
205};
206
207/**
208 * sge_opaque_hdr -
209 * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
210 * and for which we must reserve space.
211 */
212struct sge_opaque_hdr {
213 void *dev;
214 dma_addr_t addr[MAX_SKB_FRAGS + 1];
215};
216
217/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
218#define TX_HEADER_LEN \
219 (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
220
221/*
222 * get and set private ip for iscsi traffic
223 */
224#define cxgb3i_get_private_ipv4addr(ndev) \
225 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
226#define cxgb3i_set_private_ipv4addr(ndev, addr) \
227 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
228
229/* max. connections per adapter */
230#define CXGB3I_MAX_CONN 16384
231#endif /* _CXGB3_OFFLOAD_H */
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
new file mode 100644
index 000000000000..ce7ce8c6094c
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -0,0 +1,402 @@
1/*
2 * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 * Copyright (c) 2008 Mike Christie
6 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Karen Xie (kxie@chelsio.com)
13 */
14
15#include <linux/skbuff.h>
16#include <linux/crypto.h>
17#include <scsi/scsi_cmnd.h>
18#include <scsi/scsi_host.h>
19
20#include "cxgb3i.h"
21#include "cxgb3i_pdu.h"
22
23#ifdef __DEBUG_CXGB3I_RX__
24#define cxgb3i_rx_debug cxgb3i_log_debug
25#else
26#define cxgb3i_rx_debug(fmt...)
27#endif
28
29#ifdef __DEBUG_CXGB3I_TX__
30#define cxgb3i_tx_debug cxgb3i_log_debug
31#else
32#define cxgb3i_tx_debug(fmt...)
33#endif
34
35static struct page *pad_page;
36
37/*
38 * pdu receive, interact with libiscsi_tcp
39 */
40static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
41 unsigned int offset, int offloaded)
42{
43 int status = 0;
44 int bytes_read;
45
46 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
47 switch (status) {
48 case ISCSI_TCP_CONN_ERR:
49 return -EIO;
50 case ISCSI_TCP_SUSPENDED:
51 /* no transfer - just have caller flush queue */
52 return bytes_read;
53 case ISCSI_TCP_SKB_DONE:
54 /*
55 * pdus should always fit in the skb and we should get
56 * segment done notifcation.
57 */
58 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
59 return -EFAULT;
60 case ISCSI_TCP_SEGMENT_DONE:
61 return bytes_read;
62 default:
63 iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
64 "status %d\n", status);
65 return -EINVAL;
66 }
67}
68
69static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn,
70 struct sk_buff *skb)
71{
72 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
73 bool offloaded = 0;
74 unsigned int offset;
75 int rc;
76
77 cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
78 conn, skb, skb->len, skb_ulp_mode(skb));
79
80 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
81 iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
82 return -EIO;
83 }
84
85 if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) {
86 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
87 return -EIO;
88 }
89
90 if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
91 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
92 return -EIO;
93 }
94
95 /* iscsi hdr */
96 rc = read_pdu_skb(conn, skb, 0, 0);
97 if (rc <= 0)
98 return rc;
99
100 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
101 return 0;
102
103 offset = rc;
104 if (conn->hdrdgst_en)
105 offset += ISCSI_DIGEST_SIZE;
106
107 /* iscsi data */
108 if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
109 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
110 "itt 0x%x.\n",
111 skb,
112 tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
113 tcp_conn->in.datalen,
114 ntohl(tcp_conn->in.hdr->itt));
115 offloaded = 1;
116 } else {
117 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
118 "itt 0x%x.\n",
119 skb,
120 tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
121 tcp_conn->in.datalen,
122 ntohl(tcp_conn->in.hdr->itt));
123 offset += sizeof(struct cpl_iscsi_hdr_norss);
124 }
125
126 rc = read_pdu_skb(conn, skb, offset, offloaded);
127 if (rc < 0)
128 return rc;
129 else
130 return 0;
131}
132
133/*
134 * pdu transmit, interact with libiscsi_tcp
135 */
136static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
137{
138 u8 submode = 0;
139
140 if (hcrc)
141 submode |= 1;
142 if (dcrc)
143 submode |= 2;
144 skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
145}
146
147void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
148{
149 struct iscsi_tcp_task *tcp_task = task->dd_data;
150
151 /* never reached the xmit task callout */
152 if (tcp_task->dd_data)
153 kfree_skb(tcp_task->dd_data);
154 tcp_task->dd_data = NULL;
155
156 /* MNC - Do we need a check in case this is called but
157 * cxgb3i_conn_alloc_pdu has never been called on the task */
158 cxgb3i_release_itt(task, task->hdr_itt);
159 iscsi_tcp_cleanup_task(task);
160}
161
162/*
163 * We do not support ahs yet
164 */
165int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
166{
167 struct iscsi_tcp_task *tcp_task = task->dd_data;
168 struct sk_buff *skb;
169
170 task->hdr = NULL;
171 /* always allocate rooms for AHS */
172 skb = alloc_skb(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE +
173 TX_HEADER_LEN, GFP_ATOMIC);
174 if (!skb)
175 return -ENOMEM;
176
177 cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
178 task, opcode, skb);
179
180 tcp_task->dd_data = skb;
181 skb_reserve(skb, TX_HEADER_LEN);
182 task->hdr = (struct iscsi_hdr *)skb->data;
183 task->hdr_max = sizeof(struct iscsi_hdr);
184
185 /* data_out uses scsi_cmd's itt */
186 if (opcode != ISCSI_OP_SCSI_DATA_OUT)
187 cxgb3i_reserve_itt(task, &task->hdr->itt);
188
189 return 0;
190}
191
192int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
193 unsigned int count)
194{
195 struct iscsi_tcp_task *tcp_task = task->dd_data;
196 struct sk_buff *skb = tcp_task->dd_data;
197 struct iscsi_conn *conn = task->conn;
198 struct page *pg;
199 unsigned int datalen = count;
200 int i, padlen = iscsi_padding(count);
201 skb_frag_t *frag;
202
203 cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
204 task, task->sc, offset, count, skb);
205
206 skb_put(skb, task->hdr_len);
207 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
208 if (!count)
209 return 0;
210
211 if (task->sc) {
212 struct scatterlist *sg;
213 struct scsi_data_buffer *sdb;
214 unsigned int sgoffset = offset;
215 struct page *sgpg;
216 unsigned int sglen;
217
218 sdb = scsi_out(task->sc);
219 sg = sdb->table.sgl;
220
221 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
222 cxgb3i_tx_debug("sg %d, page 0x%p, len %u offset %u\n",
223 i, sg_page(sg), sg->length, sg->offset);
224
225 if (sgoffset < sg->length)
226 break;
227 sgoffset -= sg->length;
228 }
229 sgpg = sg_page(sg);
230 sglen = sg->length - sgoffset;
231
232 do {
233 int j = skb_shinfo(skb)->nr_frags;
234 unsigned int copy;
235
236 if (!sglen) {
237 sg = sg_next(sg);
238 sgpg = sg_page(sg);
239 sgoffset = 0;
240 sglen = sg->length;
241 ++i;
242 }
243 copy = min(sglen, datalen);
244 if (j && skb_can_coalesce(skb, j, sgpg,
245 sg->offset + sgoffset)) {
246 skb_shinfo(skb)->frags[j - 1].size += copy;
247 } else {
248 get_page(sgpg);
249 skb_fill_page_desc(skb, j, sgpg,
250 sg->offset + sgoffset, copy);
251 }
252 sgoffset += copy;
253 sglen -= copy;
254 datalen -= copy;
255 } while (datalen);
256 } else {
257 pg = virt_to_page(task->data);
258
259 while (datalen) {
260 i = skb_shinfo(skb)->nr_frags;
261 frag = &skb_shinfo(skb)->frags[i];
262
263 get_page(pg);
264 frag->page = pg;
265 frag->page_offset = 0;
266 frag->size = min((unsigned int)PAGE_SIZE, datalen);
267
268 skb_shinfo(skb)->nr_frags++;
269 datalen -= frag->size;
270 pg++;
271 }
272 }
273
274 if (padlen) {
275 i = skb_shinfo(skb)->nr_frags;
276 frag = &skb_shinfo(skb)->frags[i];
277 frag->page = pad_page;
278 frag->page_offset = 0;
279 frag->size = padlen;
280 skb_shinfo(skb)->nr_frags++;
281 }
282
283 datalen = count + padlen;
284 skb->data_len += datalen;
285 skb->truesize += datalen;
286 skb->len += datalen;
287 return 0;
288}
289
290int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
291{
292 struct iscsi_tcp_task *tcp_task = task->dd_data;
293 struct sk_buff *skb = tcp_task->dd_data;
294 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
295 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
296 unsigned int datalen;
297 int err;
298
299 if (!skb)
300 return 0;
301
302 datalen = skb->data_len;
303 tcp_task->dd_data = NULL;
304 err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
305 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
306 task, skb, skb->len, skb->data_len, err);
307 if (err > 0) {
308 int pdulen = err;
309
310 if (task->conn->hdrdgst_en)
311 pdulen += ISCSI_DIGEST_SIZE;
312 if (datalen && task->conn->datadgst_en)
313 pdulen += ISCSI_DIGEST_SIZE;
314
315 task->conn->txdata_octets += pdulen;
316 return 0;
317 }
318
319 if (err < 0 && err != -EAGAIN) {
320 kfree_skb(skb);
321 cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
322 task->itt, skb, skb->len, skb->data_len, err);
323 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
324 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
325 return err;
326 }
327 /* reset skb to send when we are called again */
328 tcp_task->dd_data = skb;
329 return -EAGAIN;
330}
331
332int cxgb3i_pdu_init(void)
333{
334 pad_page = alloc_page(GFP_KERNEL);
335 if (!pad_page)
336 return -ENOMEM;
337 memset(page_address(pad_page), 0, PAGE_SIZE);
338 return 0;
339}
340
341void cxgb3i_pdu_cleanup(void)
342{
343 if (pad_page) {
344 __free_page(pad_page);
345 pad_page = NULL;
346 }
347}
348
349void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
350{
351 struct sk_buff *skb;
352 unsigned int read = 0;
353 struct iscsi_conn *conn = c3cn->user_data;
354 int err = 0;
355
356 cxgb3i_rx_debug("cn 0x%p.\n", c3cn);
357
358 read_lock(&c3cn->callback_lock);
359 if (unlikely(!conn || conn->suspend_rx)) {
360 cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
361 conn, conn ? conn->id : 0xFF,
362 conn ? conn->suspend_rx : 0xFF);
363 read_unlock(&c3cn->callback_lock);
364 return;
365 }
366 skb = skb_peek(&c3cn->receive_queue);
367 while (!err && skb) {
368 __skb_unlink(skb, &c3cn->receive_queue);
369 read += skb_ulp_pdulen(skb);
370 err = cxgb3i_conn_read_pdu_skb(conn, skb);
371 __kfree_skb(skb);
372 skb = skb_peek(&c3cn->receive_queue);
373 }
374 read_unlock(&c3cn->callback_lock);
375 if (c3cn) {
376 c3cn->copied_seq += read;
377 cxgb3i_c3cn_rx_credits(c3cn, read);
378 }
379 conn->rxdata_octets += read;
380}
381
382void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
383{
384 struct iscsi_conn *conn = c3cn->user_data;
385
386 cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
387 if (conn) {
388 cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
389 scsi_queue_work(conn->session->host, &conn->xmitwork);
390 }
391}
392
393void cxgb3i_conn_closing(struct s3_conn *c3cn)
394{
395 struct iscsi_conn *conn;
396
397 read_lock(&c3cn->callback_lock);
398 conn = c3cn->user_data;
399 if (conn && c3cn->state != C3CN_STATE_ESTABLISHED)
400 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
401 read_unlock(&c3cn->callback_lock);
402}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
new file mode 100644
index 000000000000..a3f685cc2362
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
@@ -0,0 +1,59 @@
1/*
2 * cxgb3i_ulp2.h: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_ULP2_PDU_H__
14#define __CXGB3I_ULP2_PDU_H__
15
16struct cpl_iscsi_hdr_norss {
17 union opcode_tid ot;
18 u16 pdu_len_ddp;
19 u16 len;
20 u32 seq;
21 u16 urg;
22 u8 rsvd;
23 u8 status;
24};
25
26struct cpl_rx_data_ddp_norss {
27 union opcode_tid ot;
28 u16 urg;
29 u16 len;
30 u32 seq;
31 u32 nxt_seq;
32 u32 ulp_crc;
33 u32 ddp_status;
34};
35
36#define RX_DDP_STATUS_IPP_SHIFT 27 /* invalid pagepod */
37#define RX_DDP_STATUS_TID_SHIFT 26 /* tid mismatch */
38#define RX_DDP_STATUS_COLOR_SHIFT 25 /* color mismatch */
39#define RX_DDP_STATUS_OFFSET_SHIFT 24 /* offset mismatch */
40#define RX_DDP_STATUS_ULIMIT_SHIFT 23 /* ulimit error */
41#define RX_DDP_STATUS_TAG_SHIFT 22 /* tag mismatch */
42#define RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
43#define RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
44#define RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
45#define RX_DDP_STATUS_PPP_SHIFT 18 /* pagepod parity error */
46#define RX_DDP_STATUS_LLIMIT_SHIFT 17 /* llimit error */
47#define RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
48#define RX_DDP_STATUS_PMM_SHIFT 15 /* pagepod mismatch */
49
50#define ULP2_FLAG_DATA_READY 0x1
51#define ULP2_FLAG_DATA_DDPED 0x2
52#define ULP2_FLAG_HCRC_ERROR 0x10
53#define ULP2_FLAG_DCRC_ERROR 0x20
54#define ULP2_FLAG_PAD_ERROR 0x40
55
56void cxgb3i_conn_closing(struct s3_conn *);
57void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
58void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
59#endif
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 3d50cabca7ee..53664765570a 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -24,6 +24,7 @@
24#include <scsi/scsi_dh.h> 24#include <scsi/scsi_dh.h>
25 25
26#define RDAC_NAME "rdac" 26#define RDAC_NAME "rdac"
27#define RDAC_RETRY_COUNT 5
27 28
28/* 29/*
29 * LSI mode page stuff 30 * LSI mode page stuff
@@ -386,6 +387,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
386 struct c9_inquiry *inqp; 387 struct c9_inquiry *inqp;
387 388
388 h->lun_state = RDAC_LUN_UNOWNED; 389 h->lun_state = RDAC_LUN_UNOWNED;
390 h->state = RDAC_STATE_ACTIVE;
389 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); 391 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
390 if (err == SCSI_DH_OK) { 392 if (err == SCSI_DH_OK) {
391 inqp = &h->inq.c9; 393 inqp = &h->inq.c9;
@@ -477,21 +479,27 @@ static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
477{ 479{
478 struct request *rq; 480 struct request *rq;
479 struct request_queue *q = sdev->request_queue; 481 struct request_queue *q = sdev->request_queue;
480 int err = SCSI_DH_RES_TEMP_UNAVAIL; 482 int err, retry_cnt = RDAC_RETRY_COUNT;
481 483
484retry:
485 err = SCSI_DH_RES_TEMP_UNAVAIL;
482 rq = rdac_failover_get(sdev, h); 486 rq = rdac_failover_get(sdev, h);
483 if (!rq) 487 if (!rq)
484 goto done; 488 goto done;
485 489
486 sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n"); 490 sdev_printk(KERN_INFO, sdev, "%s MODE_SELECT command.\n",
491 (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
487 492
488 err = blk_execute_rq(q, NULL, rq, 1); 493 err = blk_execute_rq(q, NULL, rq, 1);
489 if (err != SCSI_DH_OK) 494 blk_put_request(rq);
495 if (err != SCSI_DH_OK) {
490 err = mode_select_handle_sense(sdev, h->sense); 496 err = mode_select_handle_sense(sdev, h->sense);
497 if (err == SCSI_DH_RETRY && retry_cnt--)
498 goto retry;
499 }
491 if (err == SCSI_DH_OK) 500 if (err == SCSI_DH_OK)
492 h->state = RDAC_STATE_ACTIVE; 501 h->state = RDAC_STATE_ACTIVE;
493 502
494 blk_put_request(rq);
495done: 503done:
496 return err; 504 return err;
497} 505}
@@ -594,6 +602,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
594 {"SUN", "LCSM100_F"}, 602 {"SUN", "LCSM100_F"},
595 {"DELL", "MD3000"}, 603 {"DELL", "MD3000"},
596 {"DELL", "MD3000i"}, 604 {"DELL", "MD3000i"},
605 {"LSI", "INF-01-00"},
606 {"ENGENIO", "INF-01-00"},
597 {NULL, NULL}, 607 {NULL, NULL},
598}; 608};
599 609
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index a73a6bbb1b2b..976cdd5c94ef 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1626,8 +1626,15 @@ static void map_dma(unsigned int i, struct hostdata *ha)
1626 1626
1627 cpp->sense_len = SCSI_SENSE_BUFFERSIZE; 1627 cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
1628 1628
1629 count = scsi_dma_map(SCpnt); 1629 if (!scsi_sg_count(SCpnt)) {
1630 BUG_ON(count < 0); 1630 cpp->data_len = 0;
1631 return;
1632 }
1633
1634 count = pci_map_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
1635 pci_dir);
1636 BUG_ON(!count);
1637
1631 scsi_for_each_sg(SCpnt, sg, count, k) { 1638 scsi_for_each_sg(SCpnt, sg, count, k) {
1632 cpp->sglist[k].address = H2DEV(sg_dma_address(sg)); 1639 cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
1633 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg)); 1640 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
@@ -1655,7 +1662,9 @@ static void unmap_dma(unsigned int i, struct hostdata *ha)
1655 pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr), 1662 pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr),
1656 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); 1663 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
1657 1664
1658 scsi_dma_unmap(SCpnt); 1665 if (scsi_sg_count(SCpnt))
1666 pci_unmap_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
1667 pci_dir);
1659 1668
1660 if (!DEV2H(cpp->data_len)) 1669 if (!DEV2H(cpp->data_len))
1661 pci_dir = PCI_DMA_BIDIRECTIONAL; 1670 pci_dir = PCI_DMA_BIDIRECTIONAL;
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 952505c006df..152dd15db276 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -14,8 +14,8 @@
14 * neuffer@goofy.zdv.uni-mainz.de * 14 * neuffer@goofy.zdv.uni-mainz.de *
15 * a.arnold@kfa-juelich.de * 15 * a.arnold@kfa-juelich.de *
16 * * 16 * *
17 * Updated 2002 by Alan Cox <alan@redhat.com> for Linux * 17 * Updated 2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> for *
18 * 2.5.x and the newer locking and error handling * 18 * Linux 2.5.x and the newer locking and error handling *
19 * * 19 * *
20 * This program is free software; you can redistribute it * 20 * This program is free software; you can redistribute it *
21 * and/or modify it under the terms of the GNU General * 21 * and/or modify it under the terms of the GNU General *
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 62a4618530d0..a680e18b5f3b 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1453,7 +1453,7 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1453 offset = 0; 1453 offset = 0;
1454 1454
1455 if (offset) { 1455 if (offset) {
1456 int rounded_up, one_clock; 1456 int one_clock;
1457 1457
1458 if (period > esp->max_period) { 1458 if (period > esp->max_period) {
1459 period = offset = 0; 1459 period = offset = 0;
@@ -1463,9 +1463,7 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1463 goto do_reject; 1463 goto do_reject;
1464 1464
1465 one_clock = esp->ccycle / 1000; 1465 one_clock = esp->ccycle / 1000;
1466 rounded_up = (period << 2); 1466 stp = DIV_ROUND_UP(period << 2, one_clock);
1467 rounded_up = (rounded_up + one_clock - 1) / one_clock;
1468 stp = rounded_up;
1469 if (stp && esp->rev >= FAS236) { 1467 if (stp && esp->rev >= FAS236) {
1470 if (stp >= 50) 1468 if (stp >= 50)
1471 stp--; 1469 stp--;
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
new file mode 100644
index 000000000000..b78da06d7c0e
--- /dev/null
+++ b/drivers/scsi/fcoe/Makefile
@@ -0,0 +1,8 @@
1# $Id: Makefile
2
3obj-$(CONFIG_FCOE) += fcoe.o
4
5fcoe-y := \
6 libfcoe.o \
7 fcoe_sw.o \
8 fc_transport_fcoe.o
diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c
new file mode 100644
index 000000000000..bf7fe6fc0820
--- /dev/null
+++ b/drivers/scsi/fcoe/fc_transport_fcoe.c
@@ -0,0 +1,446 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/pci.h>
21#include <scsi/libfcoe.h>
22#include <scsi/fc_transport_fcoe.h>
23
24/* internal fcoe transport */
25struct fcoe_transport_internal {
26 struct fcoe_transport *t;
27 struct net_device *netdev;
28 struct list_head list;
29};
30
31/* fcoe transports list and its lock */
32static LIST_HEAD(fcoe_transports);
33static DEFINE_MUTEX(fcoe_transports_lock);
34
35/**
36 * fcoe_transport_default - returns ptr to the default transport fcoe_sw
37 **/
38struct fcoe_transport *fcoe_transport_default(void)
39{
40 return &fcoe_sw_transport;
41}
42
43/**
44 * fcoe_transport_to_pcidev - get the pci dev from a netdev
45 * @netdev: the netdev that pci dev will be retrived from
46 *
47 * Returns: NULL or the corrsponding pci_dev
48 **/
49struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev)
50{
51 if (!netdev->dev.parent)
52 return NULL;
53 return to_pci_dev(netdev->dev.parent);
54}
55
56/**
57 * fcoe_transport_device_lookup - find out netdev is managed by the
58 * transport
59 * assign a transport to a device
60 * @netdev: the netdev the transport to be attached to
61 *
62 * This will look for existing offload driver, if not found, it falls back to
63 * the default sw hba (fcoe_sw) as its fcoe transport.
64 *
65 * Returns: 0 for success
66 **/
67static struct fcoe_transport_internal *fcoe_transport_device_lookup(
68 struct fcoe_transport *t, struct net_device *netdev)
69{
70 struct fcoe_transport_internal *ti;
71
72 /* assign the transpor to this device */
73 mutex_lock(&t->devlock);
74 list_for_each_entry(ti, &t->devlist, list) {
75 if (ti->netdev == netdev) {
76 mutex_unlock(&t->devlock);
77 return ti;
78 }
79 }
80 mutex_unlock(&t->devlock);
81 return NULL;
82}
83/**
84 * fcoe_transport_device_add - assign a transport to a device
85 * @netdev: the netdev the transport to be attached to
86 *
87 * This will look for existing offload driver, if not found, it falls back to
88 * the default sw hba (fcoe_sw) as its fcoe transport.
89 *
90 * Returns: 0 for success
91 **/
92static int fcoe_transport_device_add(struct fcoe_transport *t,
93 struct net_device *netdev)
94{
95 struct fcoe_transport_internal *ti;
96
97 ti = fcoe_transport_device_lookup(t, netdev);
98 if (ti) {
99 printk(KERN_DEBUG "fcoe_transport_device_add:"
100 "device %s is already added to transport %s\n",
101 netdev->name, t->name);
102 return -EEXIST;
103 }
104 /* allocate an internal struct to host the netdev and the list */
105 ti = kzalloc(sizeof(*ti), GFP_KERNEL);
106 if (!ti)
107 return -ENOMEM;
108
109 ti->t = t;
110 ti->netdev = netdev;
111 INIT_LIST_HEAD(&ti->list);
112 dev_hold(ti->netdev);
113
114 mutex_lock(&t->devlock);
115 list_add(&ti->list, &t->devlist);
116 mutex_unlock(&t->devlock);
117
118 printk(KERN_DEBUG "fcoe_transport_device_add:"
119 "device %s added to transport %s\n",
120 netdev->name, t->name);
121
122 return 0;
123}
124
125/**
126 * fcoe_transport_device_remove - remove a device from its transport
127 * @netdev: the netdev the transport to be attached to
128 *
129 * this removes the device from the transport so the given transport will
130 * not manage this device any more
131 *
132 * Returns: 0 for success
133 **/
134static int fcoe_transport_device_remove(struct fcoe_transport *t,
135 struct net_device *netdev)
136{
137 struct fcoe_transport_internal *ti;
138
139 ti = fcoe_transport_device_lookup(t, netdev);
140 if (!ti) {
141 printk(KERN_DEBUG "fcoe_transport_device_remove:"
142 "device %s is not managed by transport %s\n",
143 netdev->name, t->name);
144 return -ENODEV;
145 }
146 mutex_lock(&t->devlock);
147 list_del(&ti->list);
148 mutex_unlock(&t->devlock);
149 printk(KERN_DEBUG "fcoe_transport_device_remove:"
150 "device %s removed from transport %s\n",
151 netdev->name, t->name);
152 dev_put(ti->netdev);
153 kfree(ti);
154 return 0;
155}
156
157/**
158 * fcoe_transport_device_remove_all - remove all from transport devlist
159 *
160 * this removes the device from the transport so the given transport will
161 * not manage this device any more
162 *
163 * Returns: 0 for success
164 **/
165static void fcoe_transport_device_remove_all(struct fcoe_transport *t)
166{
167 struct fcoe_transport_internal *ti, *tmp;
168
169 mutex_lock(&t->devlock);
170 list_for_each_entry_safe(ti, tmp, &t->devlist, list) {
171 list_del(&ti->list);
172 kfree(ti);
173 }
174 mutex_unlock(&t->devlock);
175}
176
177/**
178 * fcoe_transport_match - use the bus device match function to match the hw
179 * @t: the fcoe transport
180 * @netdev:
181 *
182 * This function is used to check if the givne transport wants to manage the
183 * input netdev. if the transports implements the match function, it will be
184 * called, o.w. we just compare the pci vendor and device id.
185 *
186 * Returns: true for match up
187 **/
188static bool fcoe_transport_match(struct fcoe_transport *t,
189 struct net_device *netdev)
190{
191 /* match transport by vendor and device id */
192 struct pci_dev *pci;
193
194 pci = fcoe_transport_pcidev(netdev);
195
196 if (pci) {
197 printk(KERN_DEBUG "fcoe_transport_match:"
198 "%s:%x:%x -- %s:%x:%x\n",
199 t->name, t->vendor, t->device,
200 netdev->name, pci->vendor, pci->device);
201
202 /* if transport supports match */
203 if (t->match)
204 return t->match(netdev);
205
206 /* else just compare the vendor and device id: pci only */
207 return (t->vendor == pci->vendor) && (t->device == pci->device);
208 }
209 return false;
210}
211
212/**
213 * fcoe_transport_lookup - check if the transport is already registered
214 * @t: the transport to be looked up
215 *
216 * This compares the parent device (pci) vendor and device id
217 *
218 * Returns: NULL if not found
219 *
220 * TODO - return default sw transport if no other transport is found
221 **/
222static struct fcoe_transport *fcoe_transport_lookup(
223 struct net_device *netdev)
224{
225 struct fcoe_transport *t;
226
227 mutex_lock(&fcoe_transports_lock);
228 list_for_each_entry(t, &fcoe_transports, list) {
229 if (fcoe_transport_match(t, netdev)) {
230 mutex_unlock(&fcoe_transports_lock);
231 return t;
232 }
233 }
234 mutex_unlock(&fcoe_transports_lock);
235
236 printk(KERN_DEBUG "fcoe_transport_lookup:"
237 "use default transport for %s\n", netdev->name);
238 return fcoe_transport_default();
239}
240
241/**
242 * fcoe_transport_register - adds a fcoe transport to the fcoe transports list
243 * @t: ptr to the fcoe transport to be added
244 *
245 * Returns: 0 for success
246 **/
247int fcoe_transport_register(struct fcoe_transport *t)
248{
249 struct fcoe_transport *tt;
250
251 /* TODO - add fcoe_transport specific initialization here */
252 mutex_lock(&fcoe_transports_lock);
253 list_for_each_entry(tt, &fcoe_transports, list) {
254 if (tt == t) {
255 mutex_unlock(&fcoe_transports_lock);
256 return -EEXIST;
257 }
258 }
259 list_add_tail(&t->list, &fcoe_transports);
260 mutex_unlock(&fcoe_transports_lock);
261
262 mutex_init(&t->devlock);
263 INIT_LIST_HEAD(&t->devlist);
264
265 printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name);
266
267 return 0;
268}
269EXPORT_SYMBOL_GPL(fcoe_transport_register);
270
271/**
272 * fcoe_transport_unregister - remove the tranport fro the fcoe transports list
273 * @t: ptr to the fcoe transport to be removed
274 *
275 * Returns: 0 for success
276 **/
277int fcoe_transport_unregister(struct fcoe_transport *t)
278{
279 struct fcoe_transport *tt, *tmp;
280
281 mutex_lock(&fcoe_transports_lock);
282 list_for_each_entry_safe(tt, tmp, &fcoe_transports, list) {
283 if (tt == t) {
284 list_del(&t->list);
285 mutex_unlock(&fcoe_transports_lock);
286 fcoe_transport_device_remove_all(t);
287 printk(KERN_DEBUG "fcoe_transport_unregister:%s\n",
288 t->name);
289 return 0;
290 }
291 }
292 mutex_unlock(&fcoe_transports_lock);
293 return -ENODEV;
294}
295EXPORT_SYMBOL_GPL(fcoe_transport_unregister);
296
297/*
298 * fcoe_load_transport_driver - load an offload driver by alias name
299 * @netdev: the target net device
300 *
301 * Requests for an offload driver module as the fcoe transport, if fails, it
302 * falls back to use the SW HBA (fcoe_sw) as its transport
303 *
304 * TODO -
305 * 1. supports only PCI device
306 * 2. needs fix for VLAn and bonding
307 * 3. pure hw fcoe hba may not have netdev
308 *
309 * Returns: 0 for success
310 **/
311int fcoe_load_transport_driver(struct net_device *netdev)
312{
313 struct pci_dev *pci;
314 struct device *dev = netdev->dev.parent;
315
316 if (fcoe_transport_lookup(netdev)) {
317 /* load default transport */
318 printk(KERN_DEBUG "fcoe: already loaded transport for %s\n",
319 netdev->name);
320 return -EEXIST;
321 }
322
323 pci = to_pci_dev(dev);
324 if (dev->bus != &pci_bus_type) {
325 printk(KERN_DEBUG "fcoe: support noly PCI device\n");
326 return -ENODEV;
327 }
328 printk(KERN_DEBUG "fcoe: loading driver fcoe-pci-0x%04x-0x%04x\n",
329 pci->vendor, pci->device);
330
331 return request_module("fcoe-pci-0x%04x-0x%04x",
332 pci->vendor, pci->device);
333
334}
335EXPORT_SYMBOL_GPL(fcoe_load_transport_driver);
336
337/**
338 * fcoe_transport_attach - load transport to fcoe
339 * @netdev: the netdev the transport to be attached to
340 *
341 * This will look for existing offload driver, if not found, it falls back to
342 * the default sw hba (fcoe_sw) as its fcoe transport.
343 *
344 * Returns: 0 for success
345 **/
346int fcoe_transport_attach(struct net_device *netdev)
347{
348 struct fcoe_transport *t;
349
350 /* find the corresponding transport */
351 t = fcoe_transport_lookup(netdev);
352 if (!t) {
353 printk(KERN_DEBUG "fcoe_transport_attach"
354 ":no transport for %s:use %s\n",
355 netdev->name, t->name);
356 return -ENODEV;
357 }
358 /* add to the transport */
359 if (fcoe_transport_device_add(t, netdev)) {
360 printk(KERN_DEBUG "fcoe_transport_attach"
361 ":failed to add %s to tramsport %s\n",
362 netdev->name, t->name);
363 return -EIO;
364 }
365 /* transport create function */
366 if (t->create)
367 t->create(netdev);
368
369 printk(KERN_DEBUG "fcoe_transport_attach:transport %s for %s\n",
370 t->name, netdev->name);
371 return 0;
372}
373EXPORT_SYMBOL_GPL(fcoe_transport_attach);
374
375/**
376 * fcoe_transport_release - unload transport from fcoe
377 * @netdev: the net device on which fcoe is to be released
378 *
379 * Returns: 0 for success
380 **/
381int fcoe_transport_release(struct net_device *netdev)
382{
383 struct fcoe_transport *t;
384
385 /* find the corresponding transport */
386 t = fcoe_transport_lookup(netdev);
387 if (!t) {
388 printk(KERN_DEBUG "fcoe_transport_release:"
389 "no transport for %s:use %s\n",
390 netdev->name, t->name);
391 return -ENODEV;
392 }
393 /* remove the device from the transport */
394 if (fcoe_transport_device_remove(t, netdev)) {
395 printk(KERN_DEBUG "fcoe_transport_release:"
396 "failed to add %s to tramsport %s\n",
397 netdev->name, t->name);
398 return -EIO;
399 }
400 /* transport destroy function */
401 if (t->destroy)
402 t->destroy(netdev);
403
404 printk(KERN_DEBUG "fcoe_transport_release:"
405 "device %s dettached from transport %s\n",
406 netdev->name, t->name);
407
408 return 0;
409}
410EXPORT_SYMBOL_GPL(fcoe_transport_release);
411
412/**
413 * fcoe_transport_init - initializes fcoe transport layer
414 *
415 * This prepares for the fcoe transport layer
416 *
417 * Returns: none
418 **/
419int __init fcoe_transport_init(void)
420{
421 INIT_LIST_HEAD(&fcoe_transports);
422 mutex_init(&fcoe_transports_lock);
423 return 0;
424}
425
426/**
427 * fcoe_transport_exit - cleans up the fcoe transport layer
428 * This cleans up the fcoe transport layer. removing any transport on the list,
429 * note that the transport destroy func is not called here.
430 *
431 * Returns: none
432 **/
433int __exit fcoe_transport_exit(void)
434{
435 struct fcoe_transport *t, *tmp;
436
437 mutex_lock(&fcoe_transports_lock);
438 list_for_each_entry_safe(t, tmp, &fcoe_transports, list) {
439 list_del(&t->list);
440 mutex_unlock(&fcoe_transports_lock);
441 fcoe_transport_device_remove_all(t);
442 mutex_lock(&fcoe_transports_lock);
443 }
444 mutex_unlock(&fcoe_transports_lock);
445 return 0;
446}
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c
new file mode 100644
index 000000000000..dc4cd5e25760
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sw.c
@@ -0,0 +1,494 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/module.h>
21#include <linux/version.h>
22#include <linux/kernel.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/if_vlan.h>
29#include <net/rtnetlink.h>
30
31#include <scsi/fc/fc_els.h>
32#include <scsi/fc/fc_encaps.h>
33#include <scsi/fc/fc_fs.h>
34#include <scsi/scsi_transport.h>
35#include <scsi/scsi_transport_fc.h>
36
37#include <scsi/libfc.h>
38#include <scsi/libfcoe.h>
39#include <scsi/fc_transport_fcoe.h>
40
41#define FCOE_SW_VERSION "0.1"
42#define FCOE_SW_NAME "fcoesw"
43#define FCOE_SW_VENDOR "Open-FCoE.org"
44
45#define FCOE_MAX_LUN 255
46#define FCOE_MAX_FCP_TARGET 256
47
48#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
49
50#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
51#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
52
53static struct scsi_transport_template *scsi_transport_fcoe_sw;
54
55struct fc_function_template fcoe_sw_transport_function = {
56 .show_host_node_name = 1,
57 .show_host_port_name = 1,
58 .show_host_supported_classes = 1,
59 .show_host_supported_fc4s = 1,
60 .show_host_active_fc4s = 1,
61 .show_host_maxframe_size = 1,
62
63 .show_host_port_id = 1,
64 .show_host_supported_speeds = 1,
65 .get_host_speed = fc_get_host_speed,
66 .show_host_speed = 1,
67 .show_host_port_type = 1,
68 .get_host_port_state = fc_get_host_port_state,
69 .show_host_port_state = 1,
70 .show_host_symbolic_name = 1,
71
72 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
73 .show_rport_maxframe_size = 1,
74 .show_rport_supported_classes = 1,
75
76 .show_host_fabric_name = 1,
77 .show_starget_node_name = 1,
78 .show_starget_port_name = 1,
79 .show_starget_port_id = 1,
80 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
81 .show_rport_dev_loss_tmo = 1,
82 .get_fc_host_stats = fc_get_host_stats,
83 .issue_fc_host_lip = fcoe_reset,
84
85 .terminate_rport_io = fc_rport_terminate_io,
86};
87
88static struct scsi_host_template fcoe_sw_shost_template = {
89 .module = THIS_MODULE,
90 .name = "FCoE Driver",
91 .proc_name = FCOE_SW_NAME,
92 .queuecommand = fc_queuecommand,
93 .eh_abort_handler = fc_eh_abort,
94 .eh_device_reset_handler = fc_eh_device_reset,
95 .eh_host_reset_handler = fc_eh_host_reset,
96 .slave_alloc = fc_slave_alloc,
97 .change_queue_depth = fc_change_queue_depth,
98 .change_queue_type = fc_change_queue_type,
99 .this_id = -1,
100 .cmd_per_lun = 32,
101 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
102 .use_clustering = ENABLE_CLUSTERING,
103 .sg_tablesize = SG_ALL,
104 .max_sectors = 0xffff,
105};
106
107/*
108 * fcoe_sw_lport_config - sets up the fc_lport
109 * @lp: ptr to the fc_lport
110 * @shost: ptr to the parent scsi host
111 *
112 * Returns: 0 for success
113 *
114 */
115static int fcoe_sw_lport_config(struct fc_lport *lp)
116{
117 int i = 0;
118
119 lp->link_status = 0;
120 lp->max_retry_count = 3;
121 lp->e_d_tov = 2 * 1000; /* FC-FS default */
122 lp->r_a_tov = 2 * 2 * 1000;
123 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
124 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
125
126 /*
127 * allocate per cpu stats block
128 */
129 for_each_online_cpu(i)
130 lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats),
131 GFP_KERNEL);
132
133 /* lport fc_lport related configuration */
134 fc_lport_config(lp);
135
136 return 0;
137}
138
139/*
140 * fcoe_sw_netdev_config - sets up fcoe_softc for lport and network
141 * related properties
142 * @lp : ptr to the fc_lport
143 * @netdev : ptr to the associated netdevice struct
144 *
145 * Must be called after fcoe_sw_lport_config() as it will use lport mutex
146 *
147 * Returns : 0 for success
148 *
149 */
150static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev)
151{
152 u32 mfs;
153 u64 wwnn, wwpn;
154 struct fcoe_softc *fc;
155 u8 flogi_maddr[ETH_ALEN];
156
157 /* Setup lport private data to point to fcoe softc */
158 fc = lport_priv(lp);
159 fc->lp = lp;
160 fc->real_dev = netdev;
161 fc->phys_dev = netdev;
162
163 /* Require support for get_pauseparam ethtool op. */
164 if (netdev->priv_flags & IFF_802_1Q_VLAN)
165 fc->phys_dev = vlan_dev_real_dev(netdev);
166
167 /* Do not support for bonding device */
168 if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
169 (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
170 (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
171 return -EOPNOTSUPP;
172 }
173
174 /*
175 * Determine max frame size based on underlying device and optional
176 * user-configured limit. If the MFS is too low, fcoe_link_ok()
177 * will return 0, so do this first.
178 */
179 mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
180 sizeof(struct fcoe_crc_eof));
181 if (fc_set_mfs(lp, mfs))
182 return -EINVAL;
183
184 lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
185 if (!fcoe_link_ok(lp))
186 lp->link_status |= FC_LINK_UP;
187
188 /* offload features support */
189 if (fc->real_dev->features & NETIF_F_SG)
190 lp->sg_supp = 1;
191
192
193 skb_queue_head_init(&fc->fcoe_pending_queue);
194
195 /* setup Source Mac Address */
196 memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
197 fc->real_dev->addr_len);
198
199 wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
200 fc_set_wwnn(lp, wwnn);
201 /* XXX - 3rd arg needs to be vlan id */
202 wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
203 fc_set_wwpn(lp, wwpn);
204
205 /*
206 * Add FCoE MAC address as second unicast MAC address
207 * or enter promiscuous mode if not capable of listening
208 * for multiple unicast MACs.
209 */
210 rtnl_lock();
211 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
212 dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
213 rtnl_unlock();
214
215 /*
216 * setup the receive function from ethernet driver
217 * on the ethertype for the given device
218 */
219 fc->fcoe_packet_type.func = fcoe_rcv;
220 fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
221 fc->fcoe_packet_type.dev = fc->real_dev;
222 dev_add_pack(&fc->fcoe_packet_type);
223
224 return 0;
225}
226
227/*
228 * fcoe_sw_shost_config - sets up fc_lport->host
229 * @lp : ptr to the fc_lport
230 * @shost : ptr to the associated scsi host
231 * @dev : device associated to scsi host
232 *
233 * Must be called after fcoe_sw_lport_config) and fcoe_sw_netdev_config()
234 *
235 * Returns : 0 for success
236 *
237 */
238static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
239 struct device *dev)
240{
241 int rc = 0;
242
243 /* lport scsi host config */
244 lp->host = shost;
245
246 lp->host->max_lun = FCOE_MAX_LUN;
247 lp->host->max_id = FCOE_MAX_FCP_TARGET;
248 lp->host->max_channel = 0;
249 lp->host->transportt = scsi_transport_fcoe_sw;
250
251 /* add the new host to the SCSI-ml */
252 rc = scsi_add_host(lp->host, dev);
253 if (rc) {
254 FC_DBG("fcoe_sw_shost_config:error on scsi_add_host\n");
255 return rc;
256 }
257 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
258 FCOE_SW_NAME, FCOE_SW_VERSION,
259 fcoe_netdev(lp)->name);
260
261 return 0;
262}
263
264/*
265 * fcoe_sw_em_config - allocates em for this lport
266 * @lp: the port that em is to allocated for
267 *
268 * Returns : 0 on success
269 */
270static inline int fcoe_sw_em_config(struct fc_lport *lp)
271{
272 BUG_ON(lp->emp);
273
274 lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
275 FCOE_MIN_XID, FCOE_MAX_XID);
276 if (!lp->emp)
277 return -ENOMEM;
278
279 return 0;
280}
281
282/*
283 * fcoe_sw_destroy - FCoE software HBA tear-down function
284 * @netdev: ptr to the associated net_device
285 *
286 * Returns: 0 if link is OK for use by FCoE.
287 */
288static int fcoe_sw_destroy(struct net_device *netdev)
289{
290 int cpu;
291 struct fc_lport *lp = NULL;
292 struct fcoe_softc *fc;
293 u8 flogi_maddr[ETH_ALEN];
294
295 BUG_ON(!netdev);
296
297 printk(KERN_DEBUG "fcoe_sw_destroy:interface on %s\n",
298 netdev->name);
299
300 lp = fcoe_hostlist_lookup(netdev);
301 if (!lp)
302 return -ENODEV;
303
304 fc = fcoe_softc(lp);
305
306 /* Logout of the fabric */
307 fc_fabric_logoff(lp);
308
309 /* Remove the instance from fcoe's list */
310 fcoe_hostlist_remove(lp);
311
312 /* Don't listen for Ethernet packets anymore */
313 dev_remove_pack(&fc->fcoe_packet_type);
314
315 /* Cleanup the fc_lport */
316 fc_lport_destroy(lp);
317 fc_fcp_destroy(lp);
318
319 /* Detach from the scsi-ml */
320 fc_remove_host(lp->host);
321 scsi_remove_host(lp->host);
322
323 /* There are no more rports or I/O, free the EM */
324 if (lp->emp)
325 fc_exch_mgr_free(lp->emp);
326
327 /* Delete secondary MAC addresses */
328 rtnl_lock();
329 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
330 dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
331 if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
332 dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
333 rtnl_unlock();
334
335 /* Free the per-CPU revieve threads */
336 fcoe_percpu_clean(lp);
337
338 /* Free existing skbs */
339 fcoe_clean_pending_queue(lp);
340
341 /* Free memory used by statistical counters */
342 for_each_online_cpu(cpu)
343 kfree(lp->dev_stats[cpu]);
344
345 /* Release the net_device and Scsi_Host */
346 dev_put(fc->real_dev);
347 scsi_host_put(lp->host);
348
349 return 0;
350}
351
352static struct libfc_function_template fcoe_sw_libfc_fcn_templ = {
353 .frame_send = fcoe_xmit,
354};
355
356/*
357 * fcoe_sw_create - this function creates the fcoe interface
358 * @netdev: pointer the associated netdevice
359 *
360 * Creates fc_lport struct and scsi_host for lport, configures lport
361 * and starts fabric login.
362 *
363 * Returns : 0 on success
364 */
365static int fcoe_sw_create(struct net_device *netdev)
366{
367 int rc;
368 struct fc_lport *lp = NULL;
369 struct fcoe_softc *fc;
370 struct Scsi_Host *shost;
371
372 BUG_ON(!netdev);
373
374 printk(KERN_DEBUG "fcoe_sw_create:interface on %s\n",
375 netdev->name);
376
377 lp = fcoe_hostlist_lookup(netdev);
378 if (lp)
379 return -EEXIST;
380
381 shost = fcoe_host_alloc(&fcoe_sw_shost_template,
382 sizeof(struct fcoe_softc));
383 if (!shost) {
384 FC_DBG("Could not allocate host structure\n");
385 return -ENOMEM;
386 }
387 lp = shost_priv(shost);
388 fc = lport_priv(lp);
389
390 /* configure fc_lport, e.g., em */
391 rc = fcoe_sw_lport_config(lp);
392 if (rc) {
393 FC_DBG("Could not configure lport\n");
394 goto out_host_put;
395 }
396
397 /* configure lport network properties */
398 rc = fcoe_sw_netdev_config(lp, netdev);
399 if (rc) {
400 FC_DBG("Could not configure netdev for lport\n");
401 goto out_host_put;
402 }
403
404 /* configure lport scsi host properties */
405 rc = fcoe_sw_shost_config(lp, shost, &netdev->dev);
406 if (rc) {
407 FC_DBG("Could not configure shost for lport\n");
408 goto out_host_put;
409 }
410
411 /* lport exch manager allocation */
412 rc = fcoe_sw_em_config(lp);
413 if (rc) {
414 FC_DBG("Could not configure em for lport\n");
415 goto out_host_put;
416 }
417
418 /* Initialize the library */
419 rc = fcoe_libfc_config(lp, &fcoe_sw_libfc_fcn_templ);
420 if (rc) {
421 FC_DBG("Could not configure libfc for lport!\n");
422 goto out_lp_destroy;
423 }
424
425 /* add to lports list */
426 fcoe_hostlist_add(lp);
427
428 lp->boot_time = jiffies;
429
430 fc_fabric_login(lp);
431
432 dev_hold(netdev);
433
434 return rc;
435
436out_lp_destroy:
437 fc_exch_mgr_free(lp->emp); /* Free the EM */
438out_host_put:
439 scsi_host_put(lp->host);
440 return rc;
441}
442
443/*
444 * fcoe_sw_match - the fcoe sw transport match function
445 *
446 * Returns : false always
447 */
448static bool fcoe_sw_match(struct net_device *netdev)
449{
450 /* FIXME - for sw transport, always return false */
451 return false;
452}
453
454/* the sw hba fcoe transport */
455struct fcoe_transport fcoe_sw_transport = {
456 .name = "fcoesw",
457 .create = fcoe_sw_create,
458 .destroy = fcoe_sw_destroy,
459 .match = fcoe_sw_match,
460 .vendor = 0x0,
461 .device = 0xffff,
462};
463
464/*
465 * fcoe_sw_init - registers fcoe_sw_transport
466 *
467 * Returns : 0 on success
468 */
469int __init fcoe_sw_init(void)
470{
471 /* attach to scsi transport */
472 scsi_transport_fcoe_sw =
473 fc_attach_transport(&fcoe_sw_transport_function);
474 if (!scsi_transport_fcoe_sw) {
475 printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n");
476 return -ENODEV;
477 }
478 /* register sw transport */
479 fcoe_transport_register(&fcoe_sw_transport);
480 return 0;
481}
482
483/*
484 * fcoe_sw_exit - unregisters fcoe_sw_transport
485 *
486 * Returns : 0 on success
487 */
488int __exit fcoe_sw_exit(void)
489{
490 /* dettach the transport */
491 fc_release_transport(scsi_transport_fcoe_sw);
492 fcoe_transport_unregister(&fcoe_sw_transport);
493 return 0;
494}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
new file mode 100644
index 000000000000..e419f486cdb3
--- /dev/null
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -0,0 +1,1510 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/module.h>
21#include <linux/version.h>
22#include <linux/kernel.h>
23#include <linux/spinlock.h>
24#include <linux/skbuff.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/if_ether.h>
29#include <linux/if_vlan.h>
30#include <linux/kthread.h>
31#include <linux/crc32.h>
32#include <linux/cpu.h>
33#include <linux/fs.h>
34#include <linux/sysfs.h>
35#include <linux/ctype.h>
36#include <scsi/scsi_tcq.h>
37#include <scsi/scsicam.h>
38#include <scsi/scsi_transport.h>
39#include <scsi/scsi_transport_fc.h>
40#include <net/rtnetlink.h>
41
42#include <scsi/fc/fc_encaps.h>
43
44#include <scsi/libfc.h>
45#include <scsi/fc_frame.h>
46#include <scsi/libfcoe.h>
47#include <scsi/fc_transport_fcoe.h>
48
49static int debug_fcoe;
50
51#define FCOE_MAX_QUEUE_DEPTH 256
52
53/* destination address mode */
54#define FCOE_GW_ADDR_MODE 0x00
55#define FCOE_FCOUI_ADDR_MODE 0x01
56
57#define FCOE_WORD_TO_BYTE 4
58
59MODULE_AUTHOR("Open-FCoE.org");
60MODULE_DESCRIPTION("FCoE");
61MODULE_LICENSE("GPL");
62
63/* fcoe host list */
64LIST_HEAD(fcoe_hostlist);
65DEFINE_RWLOCK(fcoe_hostlist_lock);
66DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
67struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
68
69
70/* Function Prototyes */
71static int fcoe_check_wait_queue(struct fc_lport *);
72static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
73static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
74static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
75#ifdef CONFIG_HOTPLUG_CPU
76static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
77#endif /* CONFIG_HOTPLUG_CPU */
78static int fcoe_device_notification(struct notifier_block *, ulong, void *);
79static void fcoe_dev_setup(void);
80static void fcoe_dev_cleanup(void);
81
82/* notification function from net device */
83static struct notifier_block fcoe_notifier = {
84 .notifier_call = fcoe_device_notification,
85};
86
87
88#ifdef CONFIG_HOTPLUG_CPU
89static struct notifier_block fcoe_cpu_notifier = {
90 .notifier_call = fcoe_cpu_callback,
91};
92
93/**
94 * fcoe_create_percpu_data - creates the associated cpu data
95 * @cpu: index for the cpu where fcoe cpu data will be created
96 *
97 * create percpu stats block, from cpu add notifier
98 *
99 * Returns: none
100 **/
101static void fcoe_create_percpu_data(int cpu)
102{
103 struct fc_lport *lp;
104 struct fcoe_softc *fc;
105
106 write_lock_bh(&fcoe_hostlist_lock);
107 list_for_each_entry(fc, &fcoe_hostlist, list) {
108 lp = fc->lp;
109 if (lp->dev_stats[cpu] == NULL)
110 lp->dev_stats[cpu] =
111 kzalloc(sizeof(struct fcoe_dev_stats),
112 GFP_KERNEL);
113 }
114 write_unlock_bh(&fcoe_hostlist_lock);
115}
116
117/**
118 * fcoe_destroy_percpu_data - destroys the associated cpu data
119 * @cpu: index for the cpu where fcoe cpu data will destroyed
120 *
121 * destroy percpu stats block called by cpu add/remove notifier
122 *
123 * Retuns: none
124 **/
125static void fcoe_destroy_percpu_data(int cpu)
126{
127 struct fc_lport *lp;
128 struct fcoe_softc *fc;
129
130 write_lock_bh(&fcoe_hostlist_lock);
131 list_for_each_entry(fc, &fcoe_hostlist, list) {
132 lp = fc->lp;
133 kfree(lp->dev_stats[cpu]);
134 lp->dev_stats[cpu] = NULL;
135 }
136 write_unlock_bh(&fcoe_hostlist_lock);
137}
138
139/**
140 * fcoe_cpu_callback - fcoe cpu hotplug event callback
141 * @nfb: callback data block
142 * @action: event triggering the callback
143 * @hcpu: index for the cpu of this event
144 *
145 * this creates or destroys per cpu data for fcoe
146 *
147 * Returns NOTIFY_OK always.
148 **/
149static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
150 void *hcpu)
151{
152 unsigned int cpu = (unsigned long)hcpu;
153
154 switch (action) {
155 case CPU_ONLINE:
156 fcoe_create_percpu_data(cpu);
157 break;
158 case CPU_DEAD:
159 fcoe_destroy_percpu_data(cpu);
160 break;
161 default:
162 break;
163 }
164 return NOTIFY_OK;
165}
166#endif /* CONFIG_HOTPLUG_CPU */
167
168/**
169 * fcoe_rcv - this is the fcoe receive function called by NET_RX_SOFTIRQ
170 * @skb: the receive skb
171 * @dev: associated net device
172 * @ptype: context
173 * @odldev: last device
174 *
175 * this function will receive the packet and build fc frame and pass it up
176 *
177 * Returns: 0 for success
178 **/
179int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
180 struct packet_type *ptype, struct net_device *olddev)
181{
182 struct fc_lport *lp;
183 struct fcoe_rcv_info *fr;
184 struct fcoe_softc *fc;
185 struct fcoe_dev_stats *stats;
186 struct fc_frame_header *fh;
187 unsigned short oxid;
188 int cpu_idx;
189 struct fcoe_percpu_s *fps;
190
191 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
192 lp = fc->lp;
193 if (unlikely(lp == NULL)) {
194 FC_DBG("cannot find hba structure");
195 goto err2;
196 }
197
198 if (unlikely(debug_fcoe)) {
199 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
200 "end:%p sum:%d dev:%s", skb->len, skb->data_len,
201 skb->head, skb->data, skb_tail_pointer(skb),
202 skb_end_pointer(skb), skb->csum,
203 skb->dev ? skb->dev->name : "<NULL>");
204
205 }
206
207 /* check for FCOE packet type */
208 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
209 FC_DBG("wrong FC type frame");
210 goto err;
211 }
212
213 /*
214 * Check for minimum frame length, and make sure required FCoE
215 * and FC headers are pulled into the linear data area.
216 */
217 if (unlikely((skb->len < FCOE_MIN_FRAME) ||
218 !pskb_may_pull(skb, FCOE_HEADER_LEN)))
219 goto err;
220
221 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
222 fh = (struct fc_frame_header *) skb_transport_header(skb);
223
224 oxid = ntohs(fh->fh_ox_id);
225
226 fr = fcoe_dev_from_skb(skb);
227 fr->fr_dev = lp;
228 fr->ptype = ptype;
229 cpu_idx = 0;
230#ifdef CONFIG_SMP
231 /*
232 * The incoming frame exchange id(oxid) is ANDed with num of online
233 * cpu bits to get cpu_idx and then this cpu_idx is used for selecting
234 * a per cpu kernel thread from fcoe_percpu. In case the cpu is
235 * offline or no kernel thread for derived cpu_idx then cpu_idx is
236 * initialize to first online cpu index.
237 */
238 cpu_idx = oxid & (num_online_cpus() - 1);
239 if (!fcoe_percpu[cpu_idx] || !cpu_online(cpu_idx))
240 cpu_idx = first_cpu(cpu_online_map);
241#endif
242 fps = fcoe_percpu[cpu_idx];
243
244 spin_lock_bh(&fps->fcoe_rx_list.lock);
245 __skb_queue_tail(&fps->fcoe_rx_list, skb);
246 if (fps->fcoe_rx_list.qlen == 1)
247 wake_up_process(fps->thread);
248
249 spin_unlock_bh(&fps->fcoe_rx_list.lock);
250
251 return 0;
252err:
253#ifdef CONFIG_SMP
254 stats = lp->dev_stats[smp_processor_id()];
255#else
256 stats = lp->dev_stats[0];
257#endif
258 if (stats)
259 stats->ErrorFrames++;
260
261err2:
262 kfree_skb(skb);
263 return -1;
264}
265EXPORT_SYMBOL_GPL(fcoe_rcv);
266
267/**
268 * fcoe_start_io - pass to netdev to start xmit for fcoe
269 * @skb: the skb to be xmitted
270 *
271 * Returns: 0 for success
272 **/
273static inline int fcoe_start_io(struct sk_buff *skb)
274{
275 int rc;
276
277 skb_get(skb);
278 rc = dev_queue_xmit(skb);
279 if (rc != 0)
280 return rc;
281 kfree_skb(skb);
282 return 0;
283}
284
285/**
286 * fcoe_get_paged_crc_eof - in case we need alloc a page for crc_eof
287 * @skb: the skb to be xmitted
288 * @tlen: total len
289 *
290 * Returns: 0 for success
291 **/
292static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
293{
294 struct fcoe_percpu_s *fps;
295 struct page *page;
296 int cpu_idx;
297
298 cpu_idx = get_cpu();
299 fps = fcoe_percpu[cpu_idx];
300 page = fps->crc_eof_page;
301 if (!page) {
302 page = alloc_page(GFP_ATOMIC);
303 if (!page) {
304 put_cpu();
305 return -ENOMEM;
306 }
307 fps->crc_eof_page = page;
308 WARN_ON(fps->crc_eof_offset != 0);
309 }
310
311 get_page(page);
312 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
313 fps->crc_eof_offset, tlen);
314 skb->len += tlen;
315 skb->data_len += tlen;
316 skb->truesize += tlen;
317 fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
318
319 if (fps->crc_eof_offset >= PAGE_SIZE) {
320 fps->crc_eof_page = NULL;
321 fps->crc_eof_offset = 0;
322 put_page(page);
323 }
324 put_cpu();
325 return 0;
326}
327
328/**
329 * fcoe_fc_crc - calculates FC CRC in this fcoe skb
330 * @fp: the fc_frame containg data to be checksummed
331 *
332 * This uses crc32() to calculate the crc for fc frame
333 * Return : 32 bit crc
334 *
335 **/
336u32 fcoe_fc_crc(struct fc_frame *fp)
337{
338 struct sk_buff *skb = fp_skb(fp);
339 struct skb_frag_struct *frag;
340 unsigned char *data;
341 unsigned long off, len, clen;
342 u32 crc;
343 unsigned i;
344
345 crc = crc32(~0, skb->data, skb_headlen(skb));
346
347 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
348 frag = &skb_shinfo(skb)->frags[i];
349 off = frag->page_offset;
350 len = frag->size;
351 while (len > 0) {
352 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
353 data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
354 KM_SKB_DATA_SOFTIRQ);
355 crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
356 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
357 off += clen;
358 len -= clen;
359 }
360 }
361 return crc;
362}
363EXPORT_SYMBOL_GPL(fcoe_fc_crc);
364
365/**
366 * fcoe_xmit - FCoE frame transmit function
367 * @lp: the associated local port
368 * @fp: the fc_frame to be transmitted
369 *
370 * Return : 0 for success
371 *
372 **/
373int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
374{
375 int wlen, rc = 0;
376 u32 crc;
377 struct ethhdr *eh;
378 struct fcoe_crc_eof *cp;
379 struct sk_buff *skb;
380 struct fcoe_dev_stats *stats;
381 struct fc_frame_header *fh;
382 unsigned int hlen; /* header length implies the version */
383 unsigned int tlen; /* trailer length */
384 unsigned int elen; /* eth header, may include vlan */
385 int flogi_in_progress = 0;
386 struct fcoe_softc *fc;
387 u8 sof, eof;
388 struct fcoe_hdr *hp;
389
390 WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
391
392 fc = fcoe_softc(lp);
393 /*
394 * if it is a flogi then we need to learn gw-addr
395 * and my own fcid
396 */
397 fh = fc_frame_header_get(fp);
398 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
399 if (fc_frame_payload_op(fp) == ELS_FLOGI) {
400 fc->flogi_oxid = ntohs(fh->fh_ox_id);
401 fc->address_mode = FCOE_FCOUI_ADDR_MODE;
402 fc->flogi_progress = 1;
403 flogi_in_progress = 1;
404 } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
405 /*
406 * Here we must've gotten an SID by accepting an FLOGI
407 * from a point-to-point connection. Switch to using
408 * the source mac based on the SID. The destination
409 * MAC in this case would have been set by receving the
410 * FLOGI.
411 */
412 fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
413 fc->flogi_progress = 0;
414 }
415 }
416
417 skb = fp_skb(fp);
418 sof = fr_sof(fp);
419 eof = fr_eof(fp);
420
421 elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
422 sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
423 hlen = sizeof(struct fcoe_hdr);
424 tlen = sizeof(struct fcoe_crc_eof);
425 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
426
427 /* crc offload */
428 if (likely(lp->crc_offload)) {
429 skb->ip_summed = CHECKSUM_COMPLETE;
430 skb->csum_start = skb_headroom(skb);
431 skb->csum_offset = skb->len;
432 crc = 0;
433 } else {
434 skb->ip_summed = CHECKSUM_NONE;
435 crc = fcoe_fc_crc(fp);
436 }
437
438 /* copy fc crc and eof to the skb buff */
439 if (skb_is_nonlinear(skb)) {
440 skb_frag_t *frag;
441 if (fcoe_get_paged_crc_eof(skb, tlen)) {
442 kfree(skb);
443 return -ENOMEM;
444 }
445 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
446 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
447 + frag->page_offset;
448 } else {
449 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
450 }
451
452 memset(cp, 0, sizeof(*cp));
453 cp->fcoe_eof = eof;
454 cp->fcoe_crc32 = cpu_to_le32(~crc);
455
456 if (skb_is_nonlinear(skb)) {
457 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
458 cp = NULL;
459 }
460
461 /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
462 skb_push(skb, elen + hlen);
463 skb_reset_mac_header(skb);
464 skb_reset_network_header(skb);
465 skb->mac_len = elen;
466 skb->protocol = htons(ETH_P_802_3);
467 skb->dev = fc->real_dev;
468
469 /* fill up mac and fcoe headers */
470 eh = eth_hdr(skb);
471 eh->h_proto = htons(ETH_P_FCOE);
472 if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
473 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
474 else
475 /* insert GW address */
476 memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
477
478 if (unlikely(flogi_in_progress))
479 memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
480 else
481 memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
482
483 hp = (struct fcoe_hdr *)(eh + 1);
484 memset(hp, 0, sizeof(*hp));
485 if (FC_FCOE_VER)
486 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
487 hp->fcoe_sof = sof;
488
489 /* update tx stats: regardless if LLD fails */
490 stats = lp->dev_stats[smp_processor_id()];
491 if (stats) {
492 stats->TxFrames++;
493 stats->TxWords += wlen;
494 }
495
496 /* send down to lld */
497 fr_dev(fp) = lp;
498 if (fc->fcoe_pending_queue.qlen)
499 rc = fcoe_check_wait_queue(lp);
500
501 if (rc == 0)
502 rc = fcoe_start_io(skb);
503
504 if (rc) {
505 fcoe_insert_wait_queue(lp, skb);
506 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
507 fc_pause(lp);
508 }
509
510 return 0;
511}
512EXPORT_SYMBOL_GPL(fcoe_xmit);
513
514/*
515 * fcoe_percpu_receive_thread - recv thread per cpu
516 * @arg: ptr to the fcoe per cpu struct
517 *
518 * Return: 0 for success
519 *
520 */
521int fcoe_percpu_receive_thread(void *arg)
522{
523 struct fcoe_percpu_s *p = arg;
524 u32 fr_len;
525 struct fc_lport *lp;
526 struct fcoe_rcv_info *fr;
527 struct fcoe_dev_stats *stats;
528 struct fc_frame_header *fh;
529 struct sk_buff *skb;
530 struct fcoe_crc_eof crc_eof;
531 struct fc_frame *fp;
532 u8 *mac = NULL;
533 struct fcoe_softc *fc;
534 struct fcoe_hdr *hp;
535
536 set_user_nice(current, 19);
537
538 while (!kthread_should_stop()) {
539
540 spin_lock_bh(&p->fcoe_rx_list.lock);
541 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
542 set_current_state(TASK_INTERRUPTIBLE);
543 spin_unlock_bh(&p->fcoe_rx_list.lock);
544 schedule();
545 set_current_state(TASK_RUNNING);
546 if (kthread_should_stop())
547 return 0;
548 spin_lock_bh(&p->fcoe_rx_list.lock);
549 }
550 spin_unlock_bh(&p->fcoe_rx_list.lock);
551 fr = fcoe_dev_from_skb(skb);
552 lp = fr->fr_dev;
553 if (unlikely(lp == NULL)) {
554 FC_DBG("invalid HBA Structure");
555 kfree_skb(skb);
556 continue;
557 }
558
559 stats = lp->dev_stats[smp_processor_id()];
560
561 if (unlikely(debug_fcoe)) {
562 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
563 "tail:%p end:%p sum:%d dev:%s",
564 skb->len, skb->data_len,
565 skb->head, skb->data, skb_tail_pointer(skb),
566 skb_end_pointer(skb), skb->csum,
567 skb->dev ? skb->dev->name : "<NULL>");
568 }
569
570 /*
571 * Save source MAC address before discarding header.
572 */
573 fc = lport_priv(lp);
574 if (unlikely(fc->flogi_progress))
575 mac = eth_hdr(skb)->h_source;
576
577 if (skb_is_nonlinear(skb))
578 skb_linearize(skb); /* not ideal */
579
580 /*
581 * Frame length checks and setting up the header pointers
582 * was done in fcoe_rcv already.
583 */
584 hp = (struct fcoe_hdr *) skb_network_header(skb);
585 fh = (struct fc_frame_header *) skb_transport_header(skb);
586
587 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
588 if (stats) {
589 if (stats->ErrorFrames < 5)
590 FC_DBG("unknown FCoE version %x",
591 FC_FCOE_DECAPS_VER(hp));
592 stats->ErrorFrames++;
593 }
594 kfree_skb(skb);
595 continue;
596 }
597
598 skb_pull(skb, sizeof(struct fcoe_hdr));
599 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
600
601 if (stats) {
602 stats->RxFrames++;
603 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
604 }
605
606 fp = (struct fc_frame *)skb;
607 fc_frame_init(fp);
608 fr_dev(fp) = lp;
609 fr_sof(fp) = hp->fcoe_sof;
610
611 /* Copy out the CRC and EOF trailer for access */
612 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
613 kfree_skb(skb);
614 continue;
615 }
616 fr_eof(fp) = crc_eof.fcoe_eof;
617 fr_crc(fp) = crc_eof.fcoe_crc32;
618 if (pskb_trim(skb, fr_len)) {
619 kfree_skb(skb);
620 continue;
621 }
622
623 /*
624 * We only check CRC if no offload is available and if it is
625 * it's solicited data, in which case, the FCP layer would
626 * check it during the copy.
627 */
628 if (lp->crc_offload)
629 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
630 else
631 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
632
633 fh = fc_frame_header_get(fp);
634 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
635 fh->fh_type == FC_TYPE_FCP) {
636 fc_exch_recv(lp, lp->emp, fp);
637 continue;
638 }
639 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
640 if (le32_to_cpu(fr_crc(fp)) !=
641 ~crc32(~0, skb->data, fr_len)) {
642 if (debug_fcoe || stats->InvalidCRCCount < 5)
643 printk(KERN_WARNING "fcoe: dropping "
644 "frame with CRC error\n");
645 stats->InvalidCRCCount++;
646 stats->ErrorFrames++;
647 fc_frame_free(fp);
648 continue;
649 }
650 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
651 }
652 /* non flogi and non data exchanges are handled here */
653 if (unlikely(fc->flogi_progress))
654 fcoe_recv_flogi(fc, fp, mac);
655 fc_exch_recv(lp, lp->emp, fp);
656 }
657 return 0;
658}
659
660/**
661 * fcoe_recv_flogi - flogi receive function
662 * @fc: associated fcoe_softc
663 * @fp: the recieved frame
664 * @sa: the source address of this flogi
665 *
666 * This is responsible to parse the flogi response and sets the corresponding
667 * mac address for the initiator, eitehr OUI based or GW based.
668 *
669 * Returns: none
670 **/
671static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
672{
673 struct fc_frame_header *fh;
674 u8 op;
675
676 fh = fc_frame_header_get(fp);
677 if (fh->fh_type != FC_TYPE_ELS)
678 return;
679 op = fc_frame_payload_op(fp);
680 if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
681 fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
682 /*
683 * FLOGI accepted.
684 * If the src mac addr is FC_OUI-based, then we mark the
685 * address_mode flag to use FC_OUI-based Ethernet DA.
686 * Otherwise we use the FCoE gateway addr
687 */
688 if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
689 fc->address_mode = FCOE_FCOUI_ADDR_MODE;
690 } else {
691 memcpy(fc->dest_addr, sa, ETH_ALEN);
692 fc->address_mode = FCOE_GW_ADDR_MODE;
693 }
694
695 /*
696 * Remove any previously-set unicast MAC filter.
697 * Add secondary FCoE MAC address filter for our OUI.
698 */
699 rtnl_lock();
700 if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
701 dev_unicast_delete(fc->real_dev, fc->data_src_addr,
702 ETH_ALEN);
703 fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
704 dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
705 rtnl_unlock();
706
707 fc->flogi_progress = 0;
708 } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
709 /*
710 * Save source MAC for point-to-point responses.
711 */
712 memcpy(fc->dest_addr, sa, ETH_ALEN);
713 fc->address_mode = FCOE_GW_ADDR_MODE;
714 }
715}
716
717/**
718 * fcoe_watchdog - fcoe timer callback
719 * @vp:
720 *
721 * This checks the pending queue length for fcoe and put fcoe to be paused state
722 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
723 * fcoe_hostlist.
724 *
725 * Returns: 0 for success
726 **/
727void fcoe_watchdog(ulong vp)
728{
729 struct fc_lport *lp;
730 struct fcoe_softc *fc;
731 int paused = 0;
732
733 read_lock(&fcoe_hostlist_lock);
734 list_for_each_entry(fc, &fcoe_hostlist, list) {
735 lp = fc->lp;
736 if (lp) {
737 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
738 paused = 1;
739 if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
740 if (paused)
741 fc_unpause(lp);
742 }
743 }
744 }
745 read_unlock(&fcoe_hostlist_lock);
746
747 fcoe_timer.expires = jiffies + (1 * HZ);
748 add_timer(&fcoe_timer);
749}
750
751
752/**
753 * fcoe_check_wait_queue - put the skb into fcoe pending xmit queue
754 * @lp: the fc_port for this skb
755 * @skb: the associated skb to be xmitted
756 *
757 * This empties the wait_queue, dequeue the head of the wait_queue queue
758 * and calls fcoe_start_io() for each packet, if all skb have been
759 * transmitted, return 0 if a error occurs, then restore wait_queue and
760 * try again later.
761 *
762 * The wait_queue is used when the skb transmit fails. skb will go
763 * in the wait_queue which will be emptied by the time function OR
764 * by the next skb transmit.
765 *
766 * Returns: 0 for success
767 **/
768static int fcoe_check_wait_queue(struct fc_lport *lp)
769{
770 int rc, unpause = 0;
771 int paused = 0;
772 struct sk_buff *skb;
773 struct fcoe_softc *fc;
774
775 fc = fcoe_softc(lp);
776 spin_lock_bh(&fc->fcoe_pending_queue.lock);
777
778 /*
779 * is this interface paused?
780 */
781 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
782 paused = 1;
783 if (fc->fcoe_pending_queue.qlen) {
784 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
785 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
786 rc = fcoe_start_io(skb);
787 if (rc) {
788 fcoe_insert_wait_queue_head(lp, skb);
789 return rc;
790 }
791 spin_lock_bh(&fc->fcoe_pending_queue.lock);
792 }
793 if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
794 unpause = 1;
795 }
796 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
797 if ((unpause) && (paused))
798 fc_unpause(lp);
799 return fc->fcoe_pending_queue.qlen;
800}
801
802/**
803 * fcoe_insert_wait_queue_head - puts skb to fcoe pending queue head
804 * @lp: the fc_port for this skb
805 * @skb: the associated skb to be xmitted
806 *
807 * Returns: none
808 **/
809static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
810 struct sk_buff *skb)
811{
812 struct fcoe_softc *fc;
813
814 fc = fcoe_softc(lp);
815 spin_lock_bh(&fc->fcoe_pending_queue.lock);
816 __skb_queue_head(&fc->fcoe_pending_queue, skb);
817 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
818}
819
820/**
821 * fcoe_insert_wait_queue - put the skb into fcoe pending queue tail
822 * @lp: the fc_port for this skb
823 * @skb: the associated skb to be xmitted
824 *
825 * Returns: none
826 **/
827static void fcoe_insert_wait_queue(struct fc_lport *lp,
828 struct sk_buff *skb)
829{
830 struct fcoe_softc *fc;
831
832 fc = fcoe_softc(lp);
833 spin_lock_bh(&fc->fcoe_pending_queue.lock);
834 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
835 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
836}
837
838/**
839 * fcoe_dev_setup - setup link change notification interface
840 *
841 **/
842static void fcoe_dev_setup(void)
843{
844 /*
845 * here setup a interface specific wd time to
846 * monitor the link state
847 */
848 register_netdevice_notifier(&fcoe_notifier);
849}
850
851/**
852 * fcoe_dev_setup - cleanup link change notification interface
853 **/
854static void fcoe_dev_cleanup(void)
855{
856 unregister_netdevice_notifier(&fcoe_notifier);
857}
858
859/**
860 * fcoe_device_notification - netdev event notification callback
861 * @notifier: context of the notification
862 * @event: type of event
863 * @ptr: fixed array for output parsed ifname
864 *
865 * This function is called by the ethernet driver in case of link change event
866 *
867 * Returns: 0 for success
868 **/
869static int fcoe_device_notification(struct notifier_block *notifier,
870 ulong event, void *ptr)
871{
872 struct fc_lport *lp = NULL;
873 struct net_device *real_dev = ptr;
874 struct fcoe_softc *fc;
875 struct fcoe_dev_stats *stats;
876 u16 new_status;
877 u32 mfs;
878 int rc = NOTIFY_OK;
879
880 read_lock(&fcoe_hostlist_lock);
881 list_for_each_entry(fc, &fcoe_hostlist, list) {
882 if (fc->real_dev == real_dev) {
883 lp = fc->lp;
884 break;
885 }
886 }
887 read_unlock(&fcoe_hostlist_lock);
888 if (lp == NULL) {
889 rc = NOTIFY_DONE;
890 goto out;
891 }
892
893 new_status = lp->link_status;
894 switch (event) {
895 case NETDEV_DOWN:
896 case NETDEV_GOING_DOWN:
897 new_status &= ~FC_LINK_UP;
898 break;
899 case NETDEV_UP:
900 case NETDEV_CHANGE:
901 new_status &= ~FC_LINK_UP;
902 if (!fcoe_link_ok(lp))
903 new_status |= FC_LINK_UP;
904 break;
905 case NETDEV_CHANGEMTU:
906 mfs = fc->real_dev->mtu -
907 (sizeof(struct fcoe_hdr) +
908 sizeof(struct fcoe_crc_eof));
909 if (mfs >= FC_MIN_MAX_FRAME)
910 fc_set_mfs(lp, mfs);
911 new_status &= ~FC_LINK_UP;
912 if (!fcoe_link_ok(lp))
913 new_status |= FC_LINK_UP;
914 break;
915 case NETDEV_REGISTER:
916 break;
917 default:
918 FC_DBG("unknown event %ld call", event);
919 }
920 if (lp->link_status != new_status) {
921 if ((new_status & FC_LINK_UP) == FC_LINK_UP)
922 fc_linkup(lp);
923 else {
924 stats = lp->dev_stats[smp_processor_id()];
925 if (stats)
926 stats->LinkFailureCount++;
927 fc_linkdown(lp);
928 fcoe_clean_pending_queue(lp);
929 }
930 }
931out:
932 return rc;
933}
934
935/**
936 * fcoe_if_to_netdev - parse a name buffer to get netdev
937 * @ifname: fixed array for output parsed ifname
938 * @buffer: incoming buffer to be copied
939 *
940 * Returns: NULL or ptr to netdeive
941 **/
942static struct net_device *fcoe_if_to_netdev(const char *buffer)
943{
944 char *cp;
945 char ifname[IFNAMSIZ + 2];
946
947 if (buffer) {
948 strlcpy(ifname, buffer, IFNAMSIZ);
949 cp = ifname + strlen(ifname);
950 while (--cp >= ifname && *cp == '\n')
951 *cp = '\0';
952 return dev_get_by_name(&init_net, ifname);
953 }
954 return NULL;
955}
956
957/**
958 * fcoe_netdev_to_module_owner - finds out the nic drive moddule of the netdev
959 * @netdev: the target netdev
960 *
961 * Returns: ptr to the struct module, NULL for failure
962 **/
963static struct module *fcoe_netdev_to_module_owner(
964 const struct net_device *netdev)
965{
966 struct device *dev;
967
968 if (!netdev)
969 return NULL;
970
971 dev = netdev->dev.parent;
972 if (!dev)
973 return NULL;
974
975 if (!dev->driver)
976 return NULL;
977
978 return dev->driver->owner;
979}
980
981/**
982 * fcoe_ethdrv_get - holds the nic driver module by try_module_get() for
983 * the corresponding netdev.
984 * @netdev: the target netdev
985 *
986 * Returns: 0 for succsss
987 **/
988static int fcoe_ethdrv_get(const struct net_device *netdev)
989{
990 struct module *owner;
991
992 owner = fcoe_netdev_to_module_owner(netdev);
993 if (owner) {
994 printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
995 module_name(owner), netdev->name);
996 return try_module_get(owner);
997 }
998 return -ENODEV;
999}
1000
1001/**
1002 * fcoe_ethdrv_get - releases the nic driver module by module_put for
1003 * the corresponding netdev.
1004 * @netdev: the target netdev
1005 *
1006 * Returns: 0 for succsss
1007 **/
1008static int fcoe_ethdrv_put(const struct net_device *netdev)
1009{
1010 struct module *owner;
1011
1012 owner = fcoe_netdev_to_module_owner(netdev);
1013 if (owner) {
1014 printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
1015 module_name(owner), netdev->name);
1016 module_put(owner);
1017 return 0;
1018 }
1019 return -ENODEV;
1020}
1021
1022/**
1023 * fcoe_destroy- handles the destroy from sysfs
1024 * @buffer: expcted to be a eth if name
1025 * @kp: associated kernel param
1026 *
1027 * Returns: 0 for success
1028 **/
1029static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1030{
1031 int rc;
1032 struct net_device *netdev;
1033
1034 netdev = fcoe_if_to_netdev(buffer);
1035 if (!netdev) {
1036 rc = -ENODEV;
1037 goto out_nodev;
1038 }
1039 /* look for existing lport */
1040 if (!fcoe_hostlist_lookup(netdev)) {
1041 rc = -ENODEV;
1042 goto out_putdev;
1043 }
1044 /* pass to transport */
1045 rc = fcoe_transport_release(netdev);
1046 if (rc) {
1047 printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n",
1048 netdev->name);
1049 rc = -EIO;
1050 goto out_putdev;
1051 }
1052 fcoe_ethdrv_put(netdev);
1053 rc = 0;
1054out_putdev:
1055 dev_put(netdev);
1056out_nodev:
1057 return rc;
1058}
1059
1060/**
1061 * fcoe_create - handles the create call from sysfs
1062 * @buffer: expcted to be a eth if name
1063 * @kp: associated kernel param
1064 *
1065 * Returns: 0 for success
1066 **/
1067static int fcoe_create(const char *buffer, struct kernel_param *kp)
1068{
1069 int rc;
1070 struct net_device *netdev;
1071
1072 netdev = fcoe_if_to_netdev(buffer);
1073 if (!netdev) {
1074 rc = -ENODEV;
1075 goto out_nodev;
1076 }
1077 /* look for existing lport */
1078 if (fcoe_hostlist_lookup(netdev)) {
1079 rc = -EEXIST;
1080 goto out_putdev;
1081 }
1082 fcoe_ethdrv_get(netdev);
1083
1084 /* pass to transport */
1085 rc = fcoe_transport_attach(netdev);
1086 if (rc) {
1087 printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n",
1088 netdev->name);
1089 fcoe_ethdrv_put(netdev);
1090 rc = -EIO;
1091 goto out_putdev;
1092 }
1093 rc = 0;
1094out_putdev:
1095 dev_put(netdev);
1096out_nodev:
1097 return rc;
1098}
1099
1100module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
1101__MODULE_PARM_TYPE(create, "string");
1102MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
1103module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
1104__MODULE_PARM_TYPE(destroy, "string");
1105MODULE_PARM_DESC(destroy, "Destroy fcoe port");
1106
1107/*
1108 * fcoe_link_ok - check if link is ok for the fc_lport
1109 * @lp: ptr to the fc_lport
1110 *
1111 * Any permanently-disqualifying conditions have been previously checked.
1112 * This also updates the speed setting, which may change with link for 100/1000.
1113 *
1114 * This function should probably be checking for PAUSE support at some point
1115 * in the future. Currently Per-priority-pause is not determinable using
1116 * ethtool, so we shouldn't be restrictive until that problem is resolved.
1117 *
1118 * Returns: 0 if link is OK for use by FCoE.
1119 *
1120 */
1121int fcoe_link_ok(struct fc_lport *lp)
1122{
1123 struct fcoe_softc *fc = fcoe_softc(lp);
1124 struct net_device *dev = fc->real_dev;
1125 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1126 int rc = 0;
1127
1128 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1129 dev = fc->phys_dev;
1130 if (dev->ethtool_ops->get_settings) {
1131 dev->ethtool_ops->get_settings(dev, &ecmd);
1132 lp->link_supported_speeds &=
1133 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1134 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1135 SUPPORTED_1000baseT_Full))
1136 lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1137 if (ecmd.supported & SUPPORTED_10000baseT_Full)
1138 lp->link_supported_speeds |=
1139 FC_PORTSPEED_10GBIT;
1140 if (ecmd.speed == SPEED_1000)
1141 lp->link_speed = FC_PORTSPEED_1GBIT;
1142 if (ecmd.speed == SPEED_10000)
1143 lp->link_speed = FC_PORTSPEED_10GBIT;
1144 }
1145 } else
1146 rc = -1;
1147
1148 return rc;
1149}
1150EXPORT_SYMBOL_GPL(fcoe_link_ok);
1151
1152/*
1153 * fcoe_percpu_clean - frees skb of the corresponding lport from the per
1154 * cpu queue.
1155 * @lp: the fc_lport
1156 */
1157void fcoe_percpu_clean(struct fc_lport *lp)
1158{
1159 int idx;
1160 struct fcoe_percpu_s *pp;
1161 struct fcoe_rcv_info *fr;
1162 struct sk_buff_head *list;
1163 struct sk_buff *skb, *next;
1164 struct sk_buff *head;
1165
1166 for (idx = 0; idx < NR_CPUS; idx++) {
1167 if (fcoe_percpu[idx]) {
1168 pp = fcoe_percpu[idx];
1169 spin_lock_bh(&pp->fcoe_rx_list.lock);
1170 list = &pp->fcoe_rx_list;
1171 head = list->next;
1172 for (skb = head; skb != (struct sk_buff *)list;
1173 skb = next) {
1174 next = skb->next;
1175 fr = fcoe_dev_from_skb(skb);
1176 if (fr->fr_dev == lp) {
1177 __skb_unlink(skb, list);
1178 kfree_skb(skb);
1179 }
1180 }
1181 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1182 }
1183 }
1184}
1185EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
1186
1187/**
1188 * fcoe_clean_pending_queue - dequeue skb and free it
1189 * @lp: the corresponding fc_lport
1190 *
1191 * Returns: none
1192 **/
1193void fcoe_clean_pending_queue(struct fc_lport *lp)
1194{
1195 struct fcoe_softc *fc = lport_priv(lp);
1196 struct sk_buff *skb;
1197
1198 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1199 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1200 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1201 kfree_skb(skb);
1202 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1203 }
1204 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1205}
1206EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
1207
1208/**
1209 * libfc_host_alloc - allocate a Scsi_Host with room for the fc_lport
1210 * @sht: ptr to the scsi host templ
1211 * @priv_size: size of private data after fc_lport
1212 *
1213 * Returns: ptr to Scsi_Host
1214 * TODO - to libfc?
1215 */
1216static inline struct Scsi_Host *libfc_host_alloc(
1217 struct scsi_host_template *sht, int priv_size)
1218{
1219 return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
1220}
1221
1222/**
1223 * fcoe_host_alloc - allocate a Scsi_Host with room for the fcoe_softc
1224 * @sht: ptr to the scsi host templ
1225 * @priv_size: size of private data after fc_lport
1226 *
1227 * Returns: ptr to Scsi_Host
1228 */
1229struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size)
1230{
1231 return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size);
1232}
1233EXPORT_SYMBOL_GPL(fcoe_host_alloc);
1234
1235/*
1236 * fcoe_reset - resets the fcoe
1237 * @shost: shost the reset is from
1238 *
1239 * Returns: always 0
1240 */
1241int fcoe_reset(struct Scsi_Host *shost)
1242{
1243 struct fc_lport *lport = shost_priv(shost);
1244 fc_lport_reset(lport);
1245 return 0;
1246}
1247EXPORT_SYMBOL_GPL(fcoe_reset);
1248
1249/*
1250 * fcoe_wwn_from_mac - converts 48-bit IEEE MAC address to 64-bit FC WWN.
1251 * @mac: mac address
1252 * @scheme: check port
1253 * @port: port indicator for converting
1254 *
1255 * Returns: u64 fc world wide name
1256 */
1257u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
1258 unsigned int scheme, unsigned int port)
1259{
1260 u64 wwn;
1261 u64 host_mac;
1262
1263 /* The MAC is in NO, so flip only the low 48 bits */
1264 host_mac = ((u64) mac[0] << 40) |
1265 ((u64) mac[1] << 32) |
1266 ((u64) mac[2] << 24) |
1267 ((u64) mac[3] << 16) |
1268 ((u64) mac[4] << 8) |
1269 (u64) mac[5];
1270
1271 WARN_ON(host_mac >= (1ULL << 48));
1272 wwn = host_mac | ((u64) scheme << 60);
1273 switch (scheme) {
1274 case 1:
1275 WARN_ON(port != 0);
1276 break;
1277 case 2:
1278 WARN_ON(port >= 0xfff);
1279 wwn |= (u64) port << 48;
1280 break;
1281 default:
1282 WARN_ON(1);
1283 break;
1284 }
1285
1286 return wwn;
1287}
1288EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
1289/*
1290 * fcoe_hostlist_lookup_softc - find the corresponding lport by a given device
1291 * @device: this is currently ptr to net_device
1292 *
1293 * Returns: NULL or the located fcoe_softc
1294 */
1295static struct fcoe_softc *fcoe_hostlist_lookup_softc(
1296 const struct net_device *dev)
1297{
1298 struct fcoe_softc *fc;
1299
1300 read_lock(&fcoe_hostlist_lock);
1301 list_for_each_entry(fc, &fcoe_hostlist, list) {
1302 if (fc->real_dev == dev) {
1303 read_unlock(&fcoe_hostlist_lock);
1304 return fc;
1305 }
1306 }
1307 read_unlock(&fcoe_hostlist_lock);
1308 return NULL;
1309}
1310
1311/*
1312 * fcoe_hostlist_lookup - find the corresponding lport by netdev
1313 * @netdev: ptr to net_device
1314 *
1315 * Returns: 0 for success
1316 */
1317struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1318{
1319 struct fcoe_softc *fc;
1320
1321 fc = fcoe_hostlist_lookup_softc(netdev);
1322
1323 return (fc) ? fc->lp : NULL;
1324}
1325EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
1326
1327/*
1328 * fcoe_hostlist_add - add a lport to lports list
1329 * @lp: ptr to the fc_lport to badded
1330 *
1331 * Returns: 0 for success
1332 */
1333int fcoe_hostlist_add(const struct fc_lport *lp)
1334{
1335 struct fcoe_softc *fc;
1336
1337 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1338 if (!fc) {
1339 fc = fcoe_softc(lp);
1340 write_lock_bh(&fcoe_hostlist_lock);
1341 list_add_tail(&fc->list, &fcoe_hostlist);
1342 write_unlock_bh(&fcoe_hostlist_lock);
1343 }
1344 return 0;
1345}
1346EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
1347
1348/*
1349 * fcoe_hostlist_remove - remove a lport from lports list
1350 * @lp: ptr to the fc_lport to badded
1351 *
1352 * Returns: 0 for success
1353 */
1354int fcoe_hostlist_remove(const struct fc_lport *lp)
1355{
1356 struct fcoe_softc *fc;
1357
1358 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1359 BUG_ON(!fc);
1360 write_lock_bh(&fcoe_hostlist_lock);
1361 list_del(&fc->list);
1362 write_unlock_bh(&fcoe_hostlist_lock);
1363
1364 return 0;
1365}
1366EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
1367
1368/**
1369 * fcoe_libfc_config - sets up libfc related properties for lport
1370 * @lp: ptr to the fc_lport
1371 * @tt: libfc function template
1372 *
1373 * Returns : 0 for success
1374 **/
1375int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt)
1376{
1377 /* Set the function pointers set by the LLDD */
1378 memcpy(&lp->tt, tt, sizeof(*tt));
1379 if (fc_fcp_init(lp))
1380 return -ENOMEM;
1381 fc_exch_init(lp);
1382 fc_elsct_init(lp);
1383 fc_lport_init(lp);
1384 fc_rport_init(lp);
1385 fc_disc_init(lp);
1386
1387 return 0;
1388}
1389EXPORT_SYMBOL_GPL(fcoe_libfc_config);
1390
1391/**
1392 * fcoe_init - fcoe module loading initialization
1393 *
1394 * Initialization routine
1395 * 1. Will create fc transport software structure
1396 * 2. initialize the link list of port information structure
1397 *
1398 * Returns 0 on success, negative on failure
1399 **/
1400static int __init fcoe_init(void)
1401{
1402 int cpu;
1403 struct fcoe_percpu_s *p;
1404
1405
1406 INIT_LIST_HEAD(&fcoe_hostlist);
1407 rwlock_init(&fcoe_hostlist_lock);
1408
1409#ifdef CONFIG_HOTPLUG_CPU
1410 register_cpu_notifier(&fcoe_cpu_notifier);
1411#endif /* CONFIG_HOTPLUG_CPU */
1412
1413 /*
1414 * initialize per CPU interrupt thread
1415 */
1416 for_each_online_cpu(cpu) {
1417 p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
1418 if (p) {
1419 p->thread = kthread_create(fcoe_percpu_receive_thread,
1420 (void *)p,
1421 "fcoethread/%d", cpu);
1422
1423 /*
1424 * if there is no error then bind the thread to the cpu
1425 * initialize the semaphore and skb queue head
1426 */
1427 if (likely(!IS_ERR(p->thread))) {
1428 p->cpu = cpu;
1429 fcoe_percpu[cpu] = p;
1430 skb_queue_head_init(&p->fcoe_rx_list);
1431 kthread_bind(p->thread, cpu);
1432 wake_up_process(p->thread);
1433 } else {
1434 fcoe_percpu[cpu] = NULL;
1435 kfree(p);
1436
1437 }
1438 }
1439 }
1440
1441 /*
1442 * setup link change notification
1443 */
1444 fcoe_dev_setup();
1445
1446 init_timer(&fcoe_timer);
1447 fcoe_timer.data = 0;
1448 fcoe_timer.function = fcoe_watchdog;
1449 fcoe_timer.expires = (jiffies + (10 * HZ));
1450 add_timer(&fcoe_timer);
1451
1452 /* initiatlize the fcoe transport */
1453 fcoe_transport_init();
1454
1455 fcoe_sw_init();
1456
1457 return 0;
1458}
1459module_init(fcoe_init);
1460
1461/**
1462 * fcoe_exit - fcoe module unloading cleanup
1463 *
1464 * Returns 0 on success, negative on failure
1465 **/
1466static void __exit fcoe_exit(void)
1467{
1468 u32 idx;
1469 struct fcoe_softc *fc, *tmp;
1470 struct fcoe_percpu_s *p;
1471 struct sk_buff *skb;
1472
1473 /*
1474 * Stop all call back interfaces
1475 */
1476#ifdef CONFIG_HOTPLUG_CPU
1477 unregister_cpu_notifier(&fcoe_cpu_notifier);
1478#endif /* CONFIG_HOTPLUG_CPU */
1479 fcoe_dev_cleanup();
1480
1481 /*
1482 * stop timer
1483 */
1484 del_timer_sync(&fcoe_timer);
1485
1486 /* releases the assocaited fcoe transport for each lport */
1487 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1488 fcoe_transport_release(fc->real_dev);
1489
1490 for (idx = 0; idx < NR_CPUS; idx++) {
1491 if (fcoe_percpu[idx]) {
1492 kthread_stop(fcoe_percpu[idx]->thread);
1493 p = fcoe_percpu[idx];
1494 spin_lock_bh(&p->fcoe_rx_list.lock);
1495 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1496 kfree_skb(skb);
1497 spin_unlock_bh(&p->fcoe_rx_list.lock);
1498 if (fcoe_percpu[idx]->crc_eof_page)
1499 put_page(fcoe_percpu[idx]->crc_eof_page);
1500 kfree(fcoe_percpu[idx]);
1501 }
1502 }
1503
1504 /* remove sw trasnport */
1505 fcoe_sw_exit();
1506
1507 /* detach the transport */
1508 fcoe_transport_exit();
1509}
1510module_exit(fcoe_exit);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 56f4e6bffc21..32eef66114c7 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -3,7 +3,7 @@
3 * Revised: Mon Dec 28 21:59:02 1998 by faith@acm.org 3 * Revised: Mon Dec 28 21:59:02 1998 by faith@acm.org
4 * Author: Rickard E. Faith, faith@cs.unc.edu 4 * Author: Rickard E. Faith, faith@cs.unc.edu
5 * Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org) 5 * Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org)
6 * Shared IRQ supported added 7/7/2001 Alan Cox <alan@redhat.com> 6 * Shared IRQ supported added 7/7/2001 Alan Cox <alan@lxorguk.ukuu.org.uk>
7 7
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 4d15a62914e9..9c1e6a5b5af0 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -10,7 +10,7 @@
10 See the WWW-page: http://www.uni-mainz.de/~langm000/linux.html for latest 10 See the WWW-page: http://www.uni-mainz.de/~langm000/linux.html for latest
11 updates, info and ADF-files for adapters supported by this driver. 11 updates, info and ADF-files for adapters supported by this driver.
12 12
13 Alan Cox <alan@redhat.com> 13 Alan Cox <alan@lxorguk.ukuu.org.uk>
14 Updated for Linux 2.5.45 to use the new error handler, cleaned up the 14 Updated for Linux 2.5.45 to use the new error handler, cleaned up the
15 lock macros and did a few unavoidable locking tweaks, plus one locking 15 lock macros and did a few unavoidable locking tweaks, plus one locking
16 fix in the irq and completion path. 16 fix in the irq and completion path.
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 7650707a40de..44f202f33101 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -121,6 +121,7 @@ static const struct {
121 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" }, 121 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
122 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" }, 122 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
123 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" }, 123 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
124 { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
124 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" }, 125 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
125 126
126 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" }, 127 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
@@ -278,13 +279,6 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
278 rsp->data.info.rsp_code)) 279 rsp->data.info.rsp_code))
279 return DID_ERROR << 16; 280 return DID_ERROR << 16;
280 281
281 if (!vfc_cmd->status) {
282 if (rsp->flags & FCP_RESID_OVER)
283 return rsp->scsi_status | (DID_ERROR << 16);
284 else
285 return rsp->scsi_status | (DID_OK << 16);
286 }
287
288 err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error); 282 err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
289 if (err >= 0) 283 if (err >= 0)
290 return rsp->scsi_status | (cmd_status[err].result << 16); 284 return rsp->scsi_status | (cmd_status[err].result << 16);
@@ -503,6 +497,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
503 case IBMVFC_HOST_ACTION_INIT: 497 case IBMVFC_HOST_ACTION_INIT:
504 case IBMVFC_HOST_ACTION_TGT_DEL: 498 case IBMVFC_HOST_ACTION_TGT_DEL:
505 case IBMVFC_HOST_ACTION_QUERY_TGTS: 499 case IBMVFC_HOST_ACTION_QUERY_TGTS:
500 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
506 case IBMVFC_HOST_ACTION_TGT_ADD: 501 case IBMVFC_HOST_ACTION_TGT_ADD:
507 case IBMVFC_HOST_ACTION_NONE: 502 case IBMVFC_HOST_ACTION_NONE:
508 default: 503 default:
@@ -566,7 +561,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
566 struct ibmvfc_target *tgt; 561 struct ibmvfc_target *tgt;
567 562
568 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { 563 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
569 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) { 564 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
570 dev_err(vhost->dev, 565 dev_err(vhost->dev,
571 "Host initialization retries exceeded. Taking adapter offline\n"); 566 "Host initialization retries exceeded. Taking adapter offline\n");
572 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 567 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
@@ -765,6 +760,9 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
765 cmnd->scsi_done(cmnd); 760 cmnd->scsi_done(cmnd);
766 } 761 }
767 762
763 if (evt->eh_comp)
764 complete(evt->eh_comp);
765
768 ibmvfc_free_event(evt); 766 ibmvfc_free_event(evt);
769} 767}
770 768
@@ -847,11 +845,12 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
847static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) 845static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
848{ 846{
849 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { 847 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
850 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) { 848 vhost->delay_init = 1;
849 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
851 dev_err(vhost->dev, 850 dev_err(vhost->dev,
852 "Host initialization retries exceeded. Taking adapter offline\n"); 851 "Host initialization retries exceeded. Taking adapter offline\n");
853 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 852 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
854 } else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES) 853 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
855 __ibmvfc_reset_host(vhost); 854 __ibmvfc_reset_host(vhost);
856 else 855 else
857 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 856 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -1252,6 +1251,7 @@ static void ibmvfc_init_event(struct ibmvfc_event *evt,
1252 evt->sync_iu = NULL; 1251 evt->sync_iu = NULL;
1253 evt->crq.format = format; 1252 evt->crq.format = format;
1254 evt->done = done; 1253 evt->done = done;
1254 evt->eh_comp = NULL;
1255} 1255}
1256 1256
1257/** 1257/**
@@ -1381,6 +1381,8 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
1381 add_timer(&evt->timer); 1381 add_timer(&evt->timer);
1382 } 1382 }
1383 1383
1384 mb();
1385
1384 if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) { 1386 if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
1385 list_del(&evt->queue); 1387 list_del(&evt->queue);
1386 del_timer(&evt->timer); 1388 del_timer(&evt->timer);
@@ -1477,6 +1479,11 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1477 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; 1479 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1478 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) 1480 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1479 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); 1481 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1482 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
1483 ibmvfc_reinit_host(evt->vhost);
1484
1485 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1486 cmnd->result = (DID_ERROR << 16);
1480 1487
1481 ibmvfc_log_error(evt); 1488 ibmvfc_log_error(evt);
1482 } 1489 }
@@ -1489,6 +1496,9 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1489 cmnd->scsi_done(cmnd); 1496 cmnd->scsi_done(cmnd);
1490 } 1497 }
1491 1498
1499 if (evt->eh_comp)
1500 complete(evt->eh_comp);
1501
1492 ibmvfc_free_event(evt); 1502 ibmvfc_free_event(evt);
1493} 1503}
1494 1504
@@ -1627,7 +1637,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
1627 struct ibmvfc_host *vhost = shost_priv(sdev->host); 1637 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1628 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1638 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1629 struct ibmvfc_cmd *tmf; 1639 struct ibmvfc_cmd *tmf;
1630 struct ibmvfc_event *evt; 1640 struct ibmvfc_event *evt = NULL;
1631 union ibmvfc_iu rsp_iu; 1641 union ibmvfc_iu rsp_iu;
1632 struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp; 1642 struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1633 int rsp_rc = -EBUSY; 1643 int rsp_rc = -EBUSY;
@@ -1789,7 +1799,8 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
1789static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) 1799static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1790{ 1800{
1791 struct ibmvfc_host *vhost = shost_priv(sdev->host); 1801 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1792 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1802 struct scsi_target *starget = scsi_target(sdev);
1803 struct fc_rport *rport = starget_to_rport(starget);
1793 struct ibmvfc_tmf *tmf; 1804 struct ibmvfc_tmf *tmf;
1794 struct ibmvfc_event *evt, *found_evt; 1805 struct ibmvfc_event *evt, *found_evt;
1795 union ibmvfc_iu rsp; 1806 union ibmvfc_iu rsp;
@@ -1827,7 +1838,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1827 int_to_scsilun(sdev->lun, &tmf->lun); 1838 int_to_scsilun(sdev->lun, &tmf->lun);
1828 tmf->flags = (type | IBMVFC_TMF_LUA_VALID); 1839 tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
1829 tmf->cancel_key = (unsigned long)sdev->hostdata; 1840 tmf->cancel_key = (unsigned long)sdev->hostdata;
1830 tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata); 1841 tmf->my_cancel_key = (unsigned long)starget->hostdata;
1831 1842
1832 evt->sync_iu = &rsp; 1843 evt->sync_iu = &rsp;
1833 init_completion(&evt->comp); 1844 init_completion(&evt->comp);
@@ -1859,6 +1870,91 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1859} 1870}
1860 1871
1861/** 1872/**
1873 * ibmvfc_match_target - Match function for specified target
1874 * @evt: ibmvfc event struct
1875 * @device: device to match (starget)
1876 *
1877 * Returns:
1878 * 1 if event matches starget / 0 if event does not match starget
1879 **/
1880static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
1881{
1882 if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
1883 return 1;
1884 return 0;
1885}
1886
1887/**
1888 * ibmvfc_match_lun - Match function for specified LUN
1889 * @evt: ibmvfc event struct
1890 * @device: device to match (sdev)
1891 *
1892 * Returns:
1893 * 1 if event matches sdev / 0 if event does not match sdev
1894 **/
1895static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
1896{
1897 if (evt->cmnd && evt->cmnd->device == device)
1898 return 1;
1899 return 0;
1900}
1901
1902/**
1903 * ibmvfc_wait_for_ops - Wait for ops to complete
1904 * @vhost: ibmvfc host struct
1905 * @device: device to match (starget or sdev)
1906 * @match: match function
1907 *
1908 * Returns:
1909 * SUCCESS / FAILED
1910 **/
1911static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
1912 int (*match) (struct ibmvfc_event *, void *))
1913{
1914 struct ibmvfc_event *evt;
1915 DECLARE_COMPLETION_ONSTACK(comp);
1916 int wait;
1917 unsigned long flags;
1918 signed long timeout = init_timeout * HZ;
1919
1920 ENTER;
1921 do {
1922 wait = 0;
1923 spin_lock_irqsave(vhost->host->host_lock, flags);
1924 list_for_each_entry(evt, &vhost->sent, queue) {
1925 if (match(evt, device)) {
1926 evt->eh_comp = &comp;
1927 wait++;
1928 }
1929 }
1930 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1931
1932 if (wait) {
1933 timeout = wait_for_completion_timeout(&comp, timeout);
1934
1935 if (!timeout) {
1936 wait = 0;
1937 spin_lock_irqsave(vhost->host->host_lock, flags);
1938 list_for_each_entry(evt, &vhost->sent, queue) {
1939 if (match(evt, device)) {
1940 evt->eh_comp = NULL;
1941 wait++;
1942 }
1943 }
1944 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1945 if (wait)
1946 dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
1947 LEAVE;
1948 return wait ? FAILED : SUCCESS;
1949 }
1950 }
1951 } while (wait);
1952
1953 LEAVE;
1954 return SUCCESS;
1955}
1956
1957/**
1862 * ibmvfc_eh_abort_handler - Abort a command 1958 * ibmvfc_eh_abort_handler - Abort a command
1863 * @cmd: scsi command to abort 1959 * @cmd: scsi command to abort
1864 * 1960 *
@@ -1867,29 +1963,21 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1867 **/ 1963 **/
1868static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) 1964static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
1869{ 1965{
1870 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 1966 struct scsi_device *sdev = cmd->device;
1871 struct ibmvfc_event *evt, *pos; 1967 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1872 int cancel_rc, abort_rc; 1968 int cancel_rc, abort_rc;
1873 unsigned long flags; 1969 int rc = FAILED;
1874 1970
1875 ENTER; 1971 ENTER;
1876 ibmvfc_wait_while_resetting(vhost); 1972 ibmvfc_wait_while_resetting(vhost);
1877 cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET); 1973 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
1878 abort_rc = ibmvfc_abort_task_set(cmd->device); 1974 abort_rc = ibmvfc_abort_task_set(sdev);
1879 1975
1880 if (!cancel_rc && !abort_rc) { 1976 if (!cancel_rc && !abort_rc)
1881 spin_lock_irqsave(vhost->host->host_lock, flags); 1977 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
1882 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1883 if (evt->cmnd && evt->cmnd->device == cmd->device)
1884 ibmvfc_fail_request(evt, DID_ABORT);
1885 }
1886 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1887 LEAVE;
1888 return SUCCESS;
1889 }
1890 1978
1891 LEAVE; 1979 LEAVE;
1892 return FAILED; 1980 return rc;
1893} 1981}
1894 1982
1895/** 1983/**
@@ -1901,29 +1989,21 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
1901 **/ 1989 **/
1902static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) 1990static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
1903{ 1991{
1904 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 1992 struct scsi_device *sdev = cmd->device;
1905 struct ibmvfc_event *evt, *pos; 1993 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1906 int cancel_rc, reset_rc; 1994 int cancel_rc, reset_rc;
1907 unsigned long flags; 1995 int rc = FAILED;
1908 1996
1909 ENTER; 1997 ENTER;
1910 ibmvfc_wait_while_resetting(vhost); 1998 ibmvfc_wait_while_resetting(vhost);
1911 cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET); 1999 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
1912 reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN"); 2000 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
1913 2001
1914 if (!cancel_rc && !reset_rc) { 2002 if (!cancel_rc && !reset_rc)
1915 spin_lock_irqsave(vhost->host->host_lock, flags); 2003 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
1916 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1917 if (evt->cmnd && evt->cmnd->device == cmd->device)
1918 ibmvfc_fail_request(evt, DID_ABORT);
1919 }
1920 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1921 LEAVE;
1922 return SUCCESS;
1923 }
1924 2004
1925 LEAVE; 2005 LEAVE;
1926 return FAILED; 2006 return rc;
1927} 2007}
1928 2008
1929/** 2009/**
@@ -1959,31 +2039,23 @@ static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
1959 **/ 2039 **/
1960static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) 2040static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
1961{ 2041{
1962 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 2042 struct scsi_device *sdev = cmd->device;
1963 struct scsi_target *starget = scsi_target(cmd->device); 2043 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1964 struct ibmvfc_event *evt, *pos; 2044 struct scsi_target *starget = scsi_target(sdev);
1965 int reset_rc; 2045 int reset_rc;
2046 int rc = FAILED;
1966 unsigned long cancel_rc = 0; 2047 unsigned long cancel_rc = 0;
1967 unsigned long flags;
1968 2048
1969 ENTER; 2049 ENTER;
1970 ibmvfc_wait_while_resetting(vhost); 2050 ibmvfc_wait_while_resetting(vhost);
1971 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); 2051 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
1972 reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target"); 2052 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
1973 2053
1974 if (!cancel_rc && !reset_rc) { 2054 if (!cancel_rc && !reset_rc)
1975 spin_lock_irqsave(vhost->host->host_lock, flags); 2055 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
1976 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1977 if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
1978 ibmvfc_fail_request(evt, DID_ABORT);
1979 }
1980 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1981 LEAVE;
1982 return SUCCESS;
1983 }
1984 2056
1985 LEAVE; 2057 LEAVE;
1986 return FAILED; 2058 return rc;
1987} 2059}
1988 2060
1989/** 2061/**
@@ -2013,23 +2085,18 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2013 struct scsi_target *starget = to_scsi_target(&rport->dev); 2085 struct scsi_target *starget = to_scsi_target(&rport->dev);
2014 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2086 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2015 struct ibmvfc_host *vhost = shost_priv(shost); 2087 struct ibmvfc_host *vhost = shost_priv(shost);
2016 struct ibmvfc_event *evt, *pos;
2017 unsigned long cancel_rc = 0; 2088 unsigned long cancel_rc = 0;
2018 unsigned long abort_rc = 0; 2089 unsigned long abort_rc = 0;
2019 unsigned long flags; 2090 int rc = FAILED;
2020 2091
2021 ENTER; 2092 ENTER;
2022 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); 2093 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
2023 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all); 2094 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
2024 2095
2025 if (!cancel_rc && !abort_rc) { 2096 if (!cancel_rc && !abort_rc)
2026 spin_lock_irqsave(shost->host_lock, flags); 2097 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2027 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) { 2098
2028 if (evt->cmnd && scsi_target(evt->cmnd->device) == starget) 2099 if (rc == FAILED)
2029 ibmvfc_fail_request(evt, DID_ABORT);
2030 }
2031 spin_unlock_irqrestore(shost->host_lock, flags);
2032 } else
2033 ibmvfc_issue_fc_host_lip(shost); 2100 ibmvfc_issue_fc_host_lip(shost);
2034 LEAVE; 2101 LEAVE;
2035} 2102}
@@ -2089,15 +2156,17 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2089 case IBMVFC_AE_LINK_UP: 2156 case IBMVFC_AE_LINK_UP:
2090 case IBMVFC_AE_RESUME: 2157 case IBMVFC_AE_RESUME:
2091 vhost->events_to_log |= IBMVFC_AE_LINKUP; 2158 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2092 ibmvfc_init_host(vhost, 1); 2159 vhost->delay_init = 1;
2160 __ibmvfc_reset_host(vhost);
2093 break; 2161 break;
2094 case IBMVFC_AE_SCN_FABRIC: 2162 case IBMVFC_AE_SCN_FABRIC:
2163 case IBMVFC_AE_SCN_DOMAIN:
2095 vhost->events_to_log |= IBMVFC_AE_RSCN; 2164 vhost->events_to_log |= IBMVFC_AE_RSCN;
2096 ibmvfc_init_host(vhost, 1); 2165 vhost->delay_init = 1;
2166 __ibmvfc_reset_host(vhost);
2097 break; 2167 break;
2098 case IBMVFC_AE_SCN_NPORT: 2168 case IBMVFC_AE_SCN_NPORT:
2099 case IBMVFC_AE_SCN_GROUP: 2169 case IBMVFC_AE_SCN_GROUP:
2100 case IBMVFC_AE_SCN_DOMAIN:
2101 vhost->events_to_log |= IBMVFC_AE_RSCN; 2170 vhost->events_to_log |= IBMVFC_AE_RSCN;
2102 case IBMVFC_AE_ELS_LOGO: 2171 case IBMVFC_AE_ELS_LOGO:
2103 case IBMVFC_AE_ELS_PRLO: 2172 case IBMVFC_AE_ELS_PRLO:
@@ -2263,6 +2332,28 @@ static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2263} 2332}
2264 2333
2265/** 2334/**
2335 * ibmvfc_target_alloc - Setup the target's task set value
2336 * @starget: struct scsi_target
2337 *
2338 * Set the target's task set value so that error handling works as
2339 * expected.
2340 *
2341 * Returns:
2342 * 0 on success / -ENXIO if device does not exist
2343 **/
2344static int ibmvfc_target_alloc(struct scsi_target *starget)
2345{
2346 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2347 struct ibmvfc_host *vhost = shost_priv(shost);
2348 unsigned long flags = 0;
2349
2350 spin_lock_irqsave(shost->host_lock, flags);
2351 starget->hostdata = (void *)(unsigned long)vhost->task_set++;
2352 spin_unlock_irqrestore(shost->host_lock, flags);
2353 return 0;
2354}
2355
2356/**
2266 * ibmvfc_slave_configure - Configure the device 2357 * ibmvfc_slave_configure - Configure the device
2267 * @sdev: struct scsi_device device to configure 2358 * @sdev: struct scsi_device device to configure
2268 * 2359 *
@@ -2541,6 +2632,7 @@ static struct scsi_host_template driver_template = {
2541 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler, 2632 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
2542 .slave_alloc = ibmvfc_slave_alloc, 2633 .slave_alloc = ibmvfc_slave_alloc,
2543 .slave_configure = ibmvfc_slave_configure, 2634 .slave_configure = ibmvfc_slave_configure,
2635 .target_alloc = ibmvfc_target_alloc,
2544 .scan_finished = ibmvfc_scan_finished, 2636 .scan_finished = ibmvfc_scan_finished,
2545 .change_queue_depth = ibmvfc_change_queue_depth, 2637 .change_queue_depth = ibmvfc_change_queue_depth,
2546 .change_queue_type = ibmvfc_change_queue_type, 2638 .change_queue_type = ibmvfc_change_queue_type,
@@ -2637,7 +2729,7 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
2637 } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { 2729 } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2638 vio_disable_interrupts(vdev); 2730 vio_disable_interrupts(vdev);
2639 ibmvfc_handle_async(async, vhost); 2731 ibmvfc_handle_async(async, vhost);
2640 crq->valid = 0; 2732 async->valid = 0;
2641 } else 2733 } else
2642 done = 1; 2734 done = 1;
2643 } 2735 }
@@ -2669,7 +2761,7 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
2669static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, 2761static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2670 void (*job_step) (struct ibmvfc_target *)) 2762 void (*job_step) (struct ibmvfc_target *))
2671{ 2763{
2672 if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) { 2764 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
2673 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2765 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2674 wake_up(&tgt->vhost->work_wait_q); 2766 wake_up(&tgt->vhost->work_wait_q);
2675 } else 2767 } else
@@ -2708,6 +2800,8 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2708 rsp->status, rsp->error, status); 2800 rsp->status, rsp->error, status);
2709 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 2801 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2710 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); 2802 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2803 else
2804 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2711 break; 2805 break;
2712 }; 2806 };
2713 2807
@@ -2802,6 +2896,8 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2802 2896
2803 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 2897 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2804 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); 2898 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2899 else
2900 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2805 break; 2901 break;
2806 }; 2902 };
2807 2903
@@ -3093,6 +3189,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3093 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3189 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3094 else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3190 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3095 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); 3191 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3192 else
3193 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3096 break; 3194 break;
3097 }; 3195 };
3098 3196
@@ -3423,6 +3521,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3423 case IBMVFC_HOST_ACTION_ALLOC_TGTS: 3521 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3424 case IBMVFC_HOST_ACTION_TGT_ADD: 3522 case IBMVFC_HOST_ACTION_TGT_ADD:
3425 case IBMVFC_HOST_ACTION_TGT_DEL: 3523 case IBMVFC_HOST_ACTION_TGT_DEL:
3524 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
3426 case IBMVFC_HOST_ACTION_QUERY: 3525 case IBMVFC_HOST_ACTION_QUERY:
3427 default: 3526 default:
3428 break; 3527 break;
@@ -3519,7 +3618,13 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3519 break; 3618 break;
3520 case IBMVFC_HOST_ACTION_INIT: 3619 case IBMVFC_HOST_ACTION_INIT:
3521 BUG_ON(vhost->state != IBMVFC_INITIALIZING); 3620 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
3522 vhost->job_step(vhost); 3621 if (vhost->delay_init) {
3622 vhost->delay_init = 0;
3623 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3624 ssleep(15);
3625 return;
3626 } else
3627 vhost->job_step(vhost);
3523 break; 3628 break;
3524 case IBMVFC_HOST_ACTION_QUERY: 3629 case IBMVFC_HOST_ACTION_QUERY:
3525 list_for_each_entry(tgt, &vhost->targets, queue) 3630 list_for_each_entry(tgt, &vhost->targets, queue)
@@ -3538,6 +3643,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3538 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL); 3643 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
3539 break; 3644 break;
3540 case IBMVFC_HOST_ACTION_TGT_DEL: 3645 case IBMVFC_HOST_ACTION_TGT_DEL:
3646 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
3541 list_for_each_entry(tgt, &vhost->targets, queue) { 3647 list_for_each_entry(tgt, &vhost->targets, queue) {
3542 if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { 3648 if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3543 tgt_dbg(tgt, "Deleting rport\n"); 3649 tgt_dbg(tgt, "Deleting rport\n");
@@ -3553,8 +3659,17 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3553 } 3659 }
3554 3660
3555 if (vhost->state == IBMVFC_INITIALIZING) { 3661 if (vhost->state == IBMVFC_INITIALIZING) {
3556 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 3662 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
3557 vhost->job_step = ibmvfc_discover_targets; 3663 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
3664 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
3665 vhost->init_retries = 0;
3666 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3667 scsi_unblock_requests(vhost->host);
3668 return;
3669 } else {
3670 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
3671 vhost->job_step = ibmvfc_discover_targets;
3672 }
3558 } else { 3673 } else {
3559 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 3674 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3560 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3675 spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -3577,14 +3692,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3577 } 3692 }
3578 } 3693 }
3579 3694
3580 if (!ibmvfc_dev_init_to_do(vhost)) { 3695 if (!ibmvfc_dev_init_to_do(vhost))
3581 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); 3696 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
3582 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
3583 vhost->init_retries = 0;
3584 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3585 scsi_unblock_requests(vhost->host);
3586 return;
3587 }
3588 break; 3697 break;
3589 case IBMVFC_HOST_ACTION_TGT_ADD: 3698 case IBMVFC_HOST_ACTION_TGT_ADD:
3590 list_for_each_entry(tgt, &vhost->targets, queue) { 3699 list_for_each_entry(tgt, &vhost->targets, queue) {
@@ -3592,16 +3701,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3592 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3701 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3593 ibmvfc_tgt_add_rport(tgt); 3702 ibmvfc_tgt_add_rport(tgt);
3594 return; 3703 return;
3595 } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3596 tgt_dbg(tgt, "Deleting rport\n");
3597 rport = tgt->rport;
3598 tgt->rport = NULL;
3599 list_del(&tgt->queue);
3600 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3601 if (rport)
3602 fc_remote_port_delete(rport);
3603 kref_put(&tgt->kref, ibmvfc_release_tgt);
3604 return;
3605 } 3704 }
3606 } 3705 }
3607 3706
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index fb3177ab6691..babdf3db59df 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,11 +29,11 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.2" 32#define IBMVFC_DRIVER_VERSION "1.0.4"
33#define IBMVFC_DRIVER_DATE "(August 14, 2008)" 33#define IBMVFC_DRIVER_DATE "(November 14, 2008)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 15 35#define IBMVFC_DEFAULT_TIMEOUT 15
36#define IBMVFC_INIT_TIMEOUT 30 36#define IBMVFC_INIT_TIMEOUT 120
37#define IBMVFC_MAX_REQUESTS_DEFAULT 100 37#define IBMVFC_MAX_REQUESTS_DEFAULT 100
38 38
39#define IBMVFC_DEBUG 0 39#define IBMVFC_DEBUG 0
@@ -43,7 +43,8 @@
43#define IBMVFC_MAX_DISC_THREADS 4 43#define IBMVFC_MAX_DISC_THREADS 4
44#define IBMVFC_TGT_MEMPOOL_SZ 64 44#define IBMVFC_TGT_MEMPOOL_SZ 64
45#define IBMVFC_MAX_CMDS_PER_LUN 64 45#define IBMVFC_MAX_CMDS_PER_LUN 64
46#define IBMVFC_MAX_INIT_RETRIES 3 46#define IBMVFC_MAX_HOST_INIT_RETRIES 6
47#define IBMVFC_MAX_TGT_INIT_RETRIES 3
47#define IBMVFC_DEV_LOSS_TMO (5 * 60) 48#define IBMVFC_DEV_LOSS_TMO (5 * 60)
48#define IBMVFC_DEFAULT_LOG_LEVEL 2 49#define IBMVFC_DEFAULT_LOG_LEVEL 2
49#define IBMVFC_MAX_CDB_LEN 16 50#define IBMVFC_MAX_CDB_LEN 16
@@ -109,6 +110,7 @@ enum ibmvfc_vios_errors {
109 IBMVFC_TRANS_CANCELLED = 0x0006, 110 IBMVFC_TRANS_CANCELLED = 0x0006,
110 IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007, 111 IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007,
111 IBMVFC_INSUFFICIENT_RESOURCE = 0x0008, 112 IBMVFC_INSUFFICIENT_RESOURCE = 0x0008,
113 IBMVFC_PLOGI_REQUIRED = 0x0010,
112 IBMVFC_COMMAND_FAILED = 0x8000, 114 IBMVFC_COMMAND_FAILED = 0x8000,
113}; 115};
114 116
@@ -337,7 +339,6 @@ struct ibmvfc_tmf {
337#define IBMVFC_TMF_LUA_VALID 0x40 339#define IBMVFC_TMF_LUA_VALID 0x40
338 u32 cancel_key; 340 u32 cancel_key;
339 u32 my_cancel_key; 341 u32 my_cancel_key;
340#define IBMVFC_TMF_CANCEL_KEY 0x80000000
341 u32 pad; 342 u32 pad;
342 u64 reserved[2]; 343 u64 reserved[2];
343}__attribute__((packed, aligned (8))); 344}__attribute__((packed, aligned (8)));
@@ -524,10 +525,10 @@ enum ibmvfc_async_event {
524}; 525};
525 526
526struct ibmvfc_crq { 527struct ibmvfc_crq {
527 u8 valid; 528 volatile u8 valid;
528 u8 format; 529 volatile u8 format;
529 u8 reserved[6]; 530 u8 reserved[6];
530 u64 ioba; 531 volatile u64 ioba;
531}__attribute__((packed, aligned (8))); 532}__attribute__((packed, aligned (8)));
532 533
533struct ibmvfc_crq_queue { 534struct ibmvfc_crq_queue {
@@ -537,13 +538,13 @@ struct ibmvfc_crq_queue {
537}; 538};
538 539
539struct ibmvfc_async_crq { 540struct ibmvfc_async_crq {
540 u8 valid; 541 volatile u8 valid;
541 u8 pad[3]; 542 u8 pad[3];
542 u32 pad2; 543 u32 pad2;
543 u64 event; 544 volatile u64 event;
544 u64 scsi_id; 545 volatile u64 scsi_id;
545 u64 wwpn; 546 volatile u64 wwpn;
546 u64 node_name; 547 volatile u64 node_name;
547 u64 reserved; 548 u64 reserved;
548}__attribute__((packed, aligned (8))); 549}__attribute__((packed, aligned (8)));
549 550
@@ -606,6 +607,7 @@ struct ibmvfc_event {
606 struct srp_direct_buf *ext_list; 607 struct srp_direct_buf *ext_list;
607 dma_addr_t ext_list_token; 608 dma_addr_t ext_list_token;
608 struct completion comp; 609 struct completion comp;
610 struct completion *eh_comp;
609 struct timer_list timer; 611 struct timer_list timer;
610}; 612};
611 613
@@ -626,6 +628,7 @@ enum ibmvfc_host_action {
626 IBMVFC_HOST_ACTION_TGT_DEL, 628 IBMVFC_HOST_ACTION_TGT_DEL,
627 IBMVFC_HOST_ACTION_ALLOC_TGTS, 629 IBMVFC_HOST_ACTION_ALLOC_TGTS,
628 IBMVFC_HOST_ACTION_TGT_INIT, 630 IBMVFC_HOST_ACTION_TGT_INIT,
631 IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
629 IBMVFC_HOST_ACTION_TGT_ADD, 632 IBMVFC_HOST_ACTION_TGT_ADD,
630}; 633};
631 634
@@ -671,6 +674,7 @@ struct ibmvfc_host {
671 int discovery_threads; 674 int discovery_threads;
672 int client_migrated; 675 int client_migrated;
673 int reinit; 676 int reinit;
677 int delay_init;
674 int events_to_log; 678 int events_to_log;
675#define IBMVFC_AE_LINKUP 0x0001 679#define IBMVFC_AE_LINKUP 0x0001
676#define IBMVFC_AE_LINKDOWN 0x0002 680#define IBMVFC_AE_LINKDOWN 0x0002
@@ -700,7 +704,7 @@ struct ibmvfc_host {
700 704
701#define ibmvfc_log(vhost, level, ...) \ 705#define ibmvfc_log(vhost, level, ...) \
702 do { \ 706 do { \
703 if (level >= (vhost)->log_level) \ 707 if ((vhost)->log_level >= level) \
704 dev_err((vhost)->dev, ##__VA_ARGS__); \ 708 dev_err((vhost)->dev, ##__VA_ARGS__); \
705 } while (0) 709 } while (0)
706 710
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 6cad1758243a..868d35ea01bb 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -107,7 +107,7 @@ module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(max_channel, "Largest channel value"); 107MODULE_PARM_DESC(max_channel, "Largest channel value");
108module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); 108module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
109MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); 109MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
110module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR); 110module_param_named(max_requests, max_requests, int, S_IRUGO);
111MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); 111MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
112 112
113/* ------------------------------------------------------------ 113/* ------------------------------------------------------------
@@ -1657,7 +1657,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1657 1657
1658 vdev->dev.driver_data = NULL; 1658 vdev->dev.driver_data = NULL;
1659 1659
1660 driver_template.can_queue = max_requests; 1660 driver_template.can_queue = max_requests - 2;
1661 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1661 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1662 if (!host) { 1662 if (!host) {
1663 dev_err(&vdev->dev, "couldn't allocate host data\n"); 1663 dev_err(&vdev->dev, "couldn't allocate host data\n");
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 2370fd82ebfe..c24140aff8e7 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -578,6 +578,8 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
578{ 578{
579 idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host); 579 idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host);
580 ide_drive_t *drive = scsi->drive; 580 ide_drive_t *drive = scsi->drive;
581 ide_hwif_t *hwif;
582 ide_hwgroup_t *hwgroup;
581 int busy; 583 int busy;
582 int ret = FAILED; 584 int ret = FAILED;
583 585
@@ -594,13 +596,16 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
594 goto no_drive; 596 goto no_drive;
595 } 597 }
596 598
597 /* First give it some more time, how much is "right" is hard to say :-( */ 599 hwif = drive->hwif;
600 hwgroup = hwif->hwgroup;
598 601
599 busy = ide_wait_not_busy(HWIF(drive), 100); /* FIXME - uses mdelay which causes latency? */ 602 /* First give it some more time, how much is "right" is hard to say :-(
603 FIXME - uses mdelay which causes latency? */
604 busy = ide_wait_not_busy(hwif, 100);
600 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) 605 if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
601 printk (KERN_WARNING "ide-scsi: drive did%s become ready\n", busy?" not":""); 606 printk (KERN_WARNING "ide-scsi: drive did%s become ready\n", busy?" not":"");
602 607
603 spin_lock_irq(&ide_lock); 608 spin_lock_irq(&hwgroup->lock);
604 609
605 /* If there is no pc running we're done (our interrupt took care of it) */ 610 /* If there is no pc running we're done (our interrupt took care of it) */
606 pc = drive->pc; 611 pc = drive->pc;
@@ -629,7 +634,7 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
629 } 634 }
630 635
631ide_unlock: 636ide_unlock:
632 spin_unlock_irq(&ide_lock); 637 spin_unlock_irq(&hwgroup->lock);
633no_drive: 638no_drive:
634 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) 639 if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
635 printk (KERN_WARNING "ide-scsi: abort returns %s\n", ret == SUCCESS?"success":"failed"); 640 printk (KERN_WARNING "ide-scsi: abort returns %s\n", ret == SUCCESS?"success":"failed");
@@ -642,6 +647,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
642 struct request *req; 647 struct request *req;
643 idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host); 648 idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host);
644 ide_drive_t *drive = scsi->drive; 649 ide_drive_t *drive = scsi->drive;
650 ide_hwgroup_t *hwgroup;
645 int ready = 0; 651 int ready = 0;
646 int ret = SUCCESS; 652 int ret = SUCCESS;
647 653
@@ -658,14 +664,18 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
658 return FAILED; 664 return FAILED;
659 } 665 }
660 666
667 hwgroup = drive->hwif->hwgroup;
668
661 spin_lock_irq(cmd->device->host->host_lock); 669 spin_lock_irq(cmd->device->host->host_lock);
662 spin_lock(&ide_lock); 670 spin_lock(&hwgroup->lock);
663 671
664 pc = drive->pc; 672 pc = drive->pc;
673 if (pc)
674 req = pc->rq;
665 675
666 if (pc == NULL || (req = pc->rq) != HWGROUP(drive)->rq || !HWGROUP(drive)->handler) { 676 if (pc == NULL || req != hwgroup->rq || hwgroup->handler == NULL) {
667 printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n"); 677 printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n");
668 spin_unlock(&ide_lock); 678 spin_unlock(&hwgroup->lock);
669 spin_unlock_irq(cmd->device->host->host_lock); 679 spin_unlock_irq(cmd->device->host->host_lock);
670 return FAILED; 680 return FAILED;
671 } 681 }
@@ -685,10 +695,10 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
685 BUG(); 695 BUG();
686 } 696 }
687 697
688 HWGROUP(drive)->rq = NULL; 698 hwgroup->rq = NULL;
689 HWGROUP(drive)->handler = NULL; 699 hwgroup->handler = NULL;
690 HWGROUP(drive)->busy = 1; /* will set this to zero when ide reset finished */ 700 hwgroup->busy = 1; /* will set this to zero when ide reset finished */
691 spin_unlock(&ide_lock); 701 spin_unlock(&hwgroup->lock);
692 702
693 ide_do_reset(drive); 703 ide_do_reset(drive);
694 704
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index 8053b1e86ccb..52bdc6df6b92 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -107,7 +107,7 @@
107 * this thing into as good a shape as possible, and I'm positive 107 * this thing into as good a shape as possible, and I'm positive
108 * there are lots of lurking bugs and "Stupid Places". 108 * there are lots of lurking bugs and "Stupid Places".
109 * 109 *
110 * Updated for Linux 2.5 by Alan Cox <alan@redhat.com> 110 * Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk>
111 * - Using new_eh handler 111 * - Using new_eh handler
112 * - Hopefully got all the locking right again 112 * - Hopefully got all the locking right again
113 * See "FIXME" notes for items that could do with more work 113 * See "FIXME" notes for items that could do with more work
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index e3f739776bad..5529518ff2fa 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -4,7 +4,7 @@
4 * Copyright (c) 1994-1998 Initio Corporation 4 * Copyright (c) 1994-1998 Initio Corporation
5 * Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl> 5 * Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
6 * Copyright (c) 2004 Christoph Hellwig <hch@lst.de> 6 * Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
7 * Copyright (c) 2007 Red Hat <alan@redhat.com> 7 * Copyright (c) 2007 Red Hat
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index cb48efa81fe2..e58af9e95506 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -4,7 +4,7 @@
4 * Copyright (c) 1994-1998 Initio Corporation 4 * Copyright (c) 1994-1998 Initio Corporation
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Cleanups (c) Copyright 2007 Red Hat <alan@redhat.com> 7 * Cleanups (c) Copyright 2007 Red Hat <alan@lxorguk.ukuu.org.uk>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index ded854a6dd35..0edfb1fa63ce 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5389,9 +5389,9 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5389 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5389 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5390 wake_up_all(&ioa_cfg->reset_wait_q); 5390 wake_up_all(&ioa_cfg->reset_wait_q);
5391 5391
5392 spin_unlock_irq(ioa_cfg->host->host_lock); 5392 spin_unlock(ioa_cfg->host->host_lock);
5393 scsi_unblock_requests(ioa_cfg->host); 5393 scsi_unblock_requests(ioa_cfg->host);
5394 spin_lock_irq(ioa_cfg->host->host_lock); 5394 spin_lock(ioa_cfg->host->host_lock);
5395 5395
5396 if (!ioa_cfg->allow_cmds) 5396 if (!ioa_cfg->allow_cmds)
5397 scsi_block_requests(ioa_cfg->host); 5397 scsi_block_requests(ioa_cfg->host);
@@ -7473,7 +7473,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7473 goto out_scsi_host_put; 7473 goto out_scsi_host_put;
7474 } 7474 }
7475 7475
7476 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0)); 7476 ipr_regs = pci_ioremap_bar(pdev, 0);
7477 7477
7478 if (!ipr_regs) { 7478 if (!ipr_regs) {
7479 dev_err(&pdev->dev, 7479 dev_err(&pdev->dev,
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4871dd1f2582..59459141b437 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -19,7 +19,7 @@
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * 21 *
22 * Alan Cox <alan@redhat.com> - Removed several careless u32/dma_addr_t errors 22 * Alan Cox <alan@lxorguk.ukuu.org.uk> - Removed several careless u32/dma_addr_t errors
23 * that broke 64bit platforms. 23 * that broke 64bit platforms.
24 */ 24 */
25 25
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index e11bce6ab63c..23808dfe22ba 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -27,7 +27,6 @@
27 */ 27 */
28 28
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/list.h>
31#include <linux/inet.h> 30#include <linux/inet.h>
32#include <linux/file.h> 31#include <linux/file.h>
33#include <linux/blkdev.h> 32#include <linux/blkdev.h>
@@ -44,12 +43,12 @@
44 43
45#include "iscsi_tcp.h" 44#include "iscsi_tcp.h"
46 45
47MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " 46MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
47 "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
48 "Alex Aizman <itn780@yahoo.com>"); 48 "Alex Aizman <itn780@yahoo.com>");
49MODULE_DESCRIPTION("iSCSI/TCP data-path"); 49MODULE_DESCRIPTION("iSCSI/TCP data-path");
50MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51#undef DEBUG_TCP 51#undef DEBUG_TCP
52#define DEBUG_ASSERT
53 52
54#ifdef DEBUG_TCP 53#ifdef DEBUG_TCP
55#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt) 54#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
@@ -57,934 +56,41 @@ MODULE_LICENSE("GPL");
57#define debug_tcp(fmt...) 56#define debug_tcp(fmt...)
58#endif 57#endif
59 58
60#ifndef DEBUG_ASSERT 59static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
61#ifdef BUG_ON 60static struct scsi_host_template iscsi_sw_tcp_sht;
62#undef BUG_ON 61static struct iscsi_transport iscsi_sw_tcp_transport;
63#endif
64#define BUG_ON(expr)
65#endif
66
67static struct scsi_transport_template *iscsi_tcp_scsi_transport;
68static struct scsi_host_template iscsi_sht;
69static struct iscsi_transport iscsi_tcp_transport;
70 62
71static unsigned int iscsi_max_lun = 512; 63static unsigned int iscsi_max_lun = 512;
72module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); 64module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
73 65
74static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
75 struct iscsi_segment *segment);
76
77/*
78 * Scatterlist handling: inside the iscsi_segment, we
79 * remember an index into the scatterlist, and set data/size
80 * to the current scatterlist entry. For highmem pages, we
81 * kmap as needed.
82 *
83 * Note that the page is unmapped when we return from
84 * TCP's data_ready handler, so we may end up mapping and
85 * unmapping the same page repeatedly. The whole reason
86 * for this is that we shouldn't keep the page mapped
87 * outside the softirq.
88 */
89
90/**
91 * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
92 * @segment: the buffer object
93 * @sg: scatterlist
94 * @offset: byte offset into that sg entry
95 *
96 * This function sets up the segment so that subsequent
97 * data is copied to the indicated sg entry, at the given
98 * offset.
99 */
100static inline void
101iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
102 struct scatterlist *sg, unsigned int offset)
103{
104 segment->sg = sg;
105 segment->sg_offset = offset;
106 segment->size = min(sg->length - offset,
107 segment->total_size - segment->total_copied);
108 segment->data = NULL;
109}
110
111/**
112 * iscsi_tcp_segment_map - map the current S/G page
113 * @segment: iscsi_segment
114 * @recv: 1 if called from recv path
115 *
116 * We only need to possibly kmap data if scatter lists are being used,
117 * because the iscsi passthrough and internal IO paths will never use high
118 * mem pages.
119 */
120static inline void
121iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
122{
123 struct scatterlist *sg;
124
125 if (segment->data != NULL || !segment->sg)
126 return;
127
128 sg = segment->sg;
129 BUG_ON(segment->sg_mapped);
130 BUG_ON(sg->length == 0);
131
132 /*
133 * If the page count is greater than one it is ok to send
134 * to the network layer's zero copy send path. If not we
135 * have to go the slow sendmsg path. We always map for the
136 * recv path.
137 */
138 if (page_count(sg_page(sg)) >= 1 && !recv)
139 return;
140
141 debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
142 segment);
143 segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
144 segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
145}
146
147static inline void
148iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
149{
150 debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
151
152 if (segment->sg_mapped) {
153 debug_tcp("iscsi_tcp_segment_unmap valid\n");
154 kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
155 segment->sg_mapped = NULL;
156 segment->data = NULL;
157 }
158}
159
160/*
161 * Splice the digest buffer into the buffer
162 */
163static inline void
164iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
165{
166 segment->data = digest;
167 segment->digest_len = ISCSI_DIGEST_SIZE;
168 segment->total_size += ISCSI_DIGEST_SIZE;
169 segment->size = ISCSI_DIGEST_SIZE;
170 segment->copied = 0;
171 segment->sg = NULL;
172 segment->hash = NULL;
173}
174
175/**
176 * iscsi_tcp_segment_done - check whether the segment is complete
177 * @segment: iscsi segment to check
178 * @recv: set to one of this is called from the recv path
179 * @copied: number of bytes copied
180 *
181 * Check if we're done receiving this segment. If the receive
182 * buffer is full but we expect more data, move on to the
183 * next entry in the scatterlist.
184 *
185 * If the amount of data we received isn't a multiple of 4,
186 * we will transparently receive the pad bytes, too.
187 *
188 * This function must be re-entrant.
189 */
190static inline int
191iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
192{
193 static unsigned char padbuf[ISCSI_PAD_LEN];
194 struct scatterlist sg;
195 unsigned int pad;
196
197 debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
198 segment->size, recv ? "recv" : "xmit");
199 if (segment->hash && copied) {
200 /*
201 * If a segment is kmapd we must unmap it before sending
202 * to the crypto layer since that will try to kmap it again.
203 */
204 iscsi_tcp_segment_unmap(segment);
205
206 if (!segment->data) {
207 sg_init_table(&sg, 1);
208 sg_set_page(&sg, sg_page(segment->sg), copied,
209 segment->copied + segment->sg_offset +
210 segment->sg->offset);
211 } else
212 sg_init_one(&sg, segment->data + segment->copied,
213 copied);
214 crypto_hash_update(segment->hash, &sg, copied);
215 }
216
217 segment->copied += copied;
218 if (segment->copied < segment->size) {
219 iscsi_tcp_segment_map(segment, recv);
220 return 0;
221 }
222
223 segment->total_copied += segment->copied;
224 segment->copied = 0;
225 segment->size = 0;
226
227 /* Unmap the current scatterlist page, if there is one. */
228 iscsi_tcp_segment_unmap(segment);
229
230 /* Do we have more scatterlist entries? */
231 debug_tcp("total copied %u total size %u\n", segment->total_copied,
232 segment->total_size);
233 if (segment->total_copied < segment->total_size) {
234 /* Proceed to the next entry in the scatterlist. */
235 iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
236 0);
237 iscsi_tcp_segment_map(segment, recv);
238 BUG_ON(segment->size == 0);
239 return 0;
240 }
241
242 /* Do we need to handle padding? */
243 pad = iscsi_padding(segment->total_copied);
244 if (pad != 0) {
245 debug_tcp("consume %d pad bytes\n", pad);
246 segment->total_size += pad;
247 segment->size = pad;
248 segment->data = padbuf;
249 return 0;
250 }
251
252 /*
253 * Set us up for transferring the data digest. hdr digest
254 * is completely handled in hdr done function.
255 */
256 if (segment->hash) {
257 crypto_hash_final(segment->hash, segment->digest);
258 iscsi_tcp_segment_splice_digest(segment,
259 recv ? segment->recv_digest : segment->digest);
260 return 0;
261 }
262
263 return 1;
264}
265
266/**
267 * iscsi_tcp_xmit_segment - transmit segment
268 * @tcp_conn: the iSCSI TCP connection
269 * @segment: the buffer to transmnit
270 *
271 * This function transmits as much of the buffer as
272 * the network layer will accept, and returns the number of
273 * bytes transmitted.
274 *
275 * If CRC hashing is enabled, the function will compute the
276 * hash as it goes. When the entire segment has been transmitted,
277 * it will retrieve the hash value and send it as well.
278 */
279static int
280iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
281 struct iscsi_segment *segment)
282{
283 struct socket *sk = tcp_conn->sock;
284 unsigned int copied = 0;
285 int r = 0;
286
287 while (!iscsi_tcp_segment_done(segment, 0, r)) {
288 struct scatterlist *sg;
289 unsigned int offset, copy;
290 int flags = 0;
291
292 r = 0;
293 offset = segment->copied;
294 copy = segment->size - offset;
295
296 if (segment->total_copied + segment->size < segment->total_size)
297 flags |= MSG_MORE;
298
299 /* Use sendpage if we can; else fall back to sendmsg */
300 if (!segment->data) {
301 sg = segment->sg;
302 offset += segment->sg_offset + sg->offset;
303 r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
304 flags);
305 } else {
306 struct msghdr msg = { .msg_flags = flags };
307 struct kvec iov = {
308 .iov_base = segment->data + offset,
309 .iov_len = copy
310 };
311
312 r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
313 }
314
315 if (r < 0) {
316 iscsi_tcp_segment_unmap(segment);
317 if (copied || r == -EAGAIN)
318 break;
319 return r;
320 }
321 copied += r;
322 }
323 return copied;
324}
325
326/**
327 * iscsi_tcp_segment_recv - copy data to segment
328 * @tcp_conn: the iSCSI TCP connection
329 * @segment: the buffer to copy to
330 * @ptr: data pointer
331 * @len: amount of data available
332 *
333 * This function copies up to @len bytes to the
334 * given buffer, and returns the number of bytes
335 * consumed, which can actually be less than @len.
336 *
337 * If hash digest is enabled, the function will update the
338 * hash while copying.
339 * Combining these two operations doesn't buy us a lot (yet),
340 * but in the future we could implement combined copy+crc,
341 * just way we do for network layer checksums.
342 */
343static int
344iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
345 struct iscsi_segment *segment, const void *ptr,
346 unsigned int len)
347{
348 unsigned int copy = 0, copied = 0;
349
350 while (!iscsi_tcp_segment_done(segment, 1, copy)) {
351 if (copied == len) {
352 debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
353 len);
354 break;
355 }
356
357 copy = min(len - copied, segment->size - segment->copied);
358 debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
359 memcpy(segment->data + segment->copied, ptr + copied, copy);
360 copied += copy;
361 }
362 return copied;
363}
364
365static inline void
366iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
367 unsigned char digest[ISCSI_DIGEST_SIZE])
368{
369 struct scatterlist sg;
370
371 sg_init_one(&sg, hdr, hdrlen);
372 crypto_hash_digest(hash, &sg, hdrlen, digest);
373}
374
375static inline int
376iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
377 struct iscsi_segment *segment)
378{
379 if (!segment->digest_len)
380 return 1;
381
382 if (memcmp(segment->recv_digest, segment->digest,
383 segment->digest_len)) {
384 debug_scsi("digest mismatch\n");
385 return 0;
386 }
387
388 return 1;
389}
390
391/*
392 * Helper function to set up segment buffer
393 */
394static inline void
395__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
396 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
397{
398 memset(segment, 0, sizeof(*segment));
399 segment->total_size = size;
400 segment->done = done;
401
402 if (hash) {
403 segment->hash = hash;
404 crypto_hash_init(hash);
405 }
406}
407
408static inline void
409iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
410 size_t size, iscsi_segment_done_fn_t *done,
411 struct hash_desc *hash)
412{
413 __iscsi_segment_init(segment, size, done, hash);
414 segment->data = data;
415 segment->size = size;
416}
417
418static inline int
419iscsi_segment_seek_sg(struct iscsi_segment *segment,
420 struct scatterlist *sg_list, unsigned int sg_count,
421 unsigned int offset, size_t size,
422 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
423{
424 struct scatterlist *sg;
425 unsigned int i;
426
427 debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
428 offset, size);
429 __iscsi_segment_init(segment, size, done, hash);
430 for_each_sg(sg_list, sg, sg_count, i) {
431 debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
432 sg->offset);
433 if (offset < sg->length) {
434 iscsi_tcp_segment_init_sg(segment, sg, offset);
435 return 0;
436 }
437 offset -= sg->length;
438 }
439
440 return ISCSI_ERR_DATA_OFFSET;
441}
442
443/**
444 * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
445 * @tcp_conn: iscsi connection to prep for
446 *
447 * This function always passes NULL for the hash argument, because when this
448 * function is called we do not yet know the final size of the header and want
449 * to delay the digest processing until we know that.
450 */
451static void
452iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
453{
454 debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
455 tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
456 iscsi_segment_init_linear(&tcp_conn->in.segment,
457 tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
458 iscsi_tcp_hdr_recv_done, NULL);
459}
460
461/*
462 * Handle incoming reply to any other type of command
463 */
464static int
465iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
466 struct iscsi_segment *segment)
467{
468 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
469 int rc = 0;
470
471 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
472 return ISCSI_ERR_DATA_DGST;
473
474 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
475 conn->data, tcp_conn->in.datalen);
476 if (rc)
477 return rc;
478
479 iscsi_tcp_hdr_recv_prep(tcp_conn);
480 return 0;
481}
482
483static void
484iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
485{
486 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
487 struct hash_desc *rx_hash = NULL;
488
489 if (conn->datadgst_en)
490 rx_hash = &tcp_conn->rx_hash;
491
492 iscsi_segment_init_linear(&tcp_conn->in.segment,
493 conn->data, tcp_conn->in.datalen,
494 iscsi_tcp_data_recv_done, rx_hash);
495}
496
497/*
498 * must be called with session lock
499 */
500static void
501iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
502{
503 struct iscsi_tcp_task *tcp_task = task->dd_data;
504 struct iscsi_r2t_info *r2t;
505
506 /* nothing to do for mgmt tasks */
507 if (!task->sc)
508 return;
509
510 /* flush task's r2t queues */
511 while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
512 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
513 sizeof(void*));
514 debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
515 }
516
517 r2t = tcp_task->r2t;
518 if (r2t != NULL) {
519 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
520 sizeof(void*));
521 tcp_task->r2t = NULL;
522 }
523}
524
525/**
526 * iscsi_data_in - SCSI Data-In Response processing
527 * @conn: iscsi connection
528 * @task: scsi command task
529 **/
530static int
531iscsi_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
532{
533 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
534 struct iscsi_tcp_task *tcp_task = task->dd_data;
535 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
536 int datasn = be32_to_cpu(rhdr->datasn);
537 unsigned total_in_length = scsi_in(task->sc)->length;
538
539 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
540 if (tcp_conn->in.datalen == 0)
541 return 0;
542
543 if (tcp_task->exp_datasn != datasn) {
544 debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
545 __func__, tcp_task->exp_datasn, datasn);
546 return ISCSI_ERR_DATASN;
547 }
548
549 tcp_task->exp_datasn++;
550
551 tcp_task->data_offset = be32_to_cpu(rhdr->offset);
552 if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
553 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
554 __func__, tcp_task->data_offset,
555 tcp_conn->in.datalen, total_in_length);
556 return ISCSI_ERR_DATA_OFFSET;
557 }
558
559 conn->datain_pdus_cnt++;
560 return 0;
561}
562
563/**
564 * iscsi_solicit_data_init - initialize first Data-Out
565 * @conn: iscsi connection
566 * @task: scsi command task
567 * @r2t: R2T info
568 *
569 * Notes:
570 * Initialize first Data-Out within this R2T sequence and finds
571 * proper data_offset within this SCSI command.
572 *
573 * This function is called with connection lock taken.
574 **/
575static void
576iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
577 struct iscsi_r2t_info *r2t)
578{
579 struct iscsi_data *hdr;
580
581 hdr = &r2t->dtask.hdr;
582 memset(hdr, 0, sizeof(struct iscsi_data));
583 hdr->ttt = r2t->ttt;
584 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
585 r2t->solicit_datasn++;
586 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
587 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
588 hdr->itt = task->hdr->itt;
589 hdr->exp_statsn = r2t->exp_statsn;
590 hdr->offset = cpu_to_be32(r2t->data_offset);
591 if (r2t->data_length > conn->max_xmit_dlength) {
592 hton24(hdr->dlength, conn->max_xmit_dlength);
593 r2t->data_count = conn->max_xmit_dlength;
594 hdr->flags = 0;
595 } else {
596 hton24(hdr->dlength, r2t->data_length);
597 r2t->data_count = r2t->data_length;
598 hdr->flags = ISCSI_FLAG_CMD_FINAL;
599 }
600 conn->dataout_pdus_cnt++;
601
602 r2t->sent = 0;
603}
604
605/**
606 * iscsi_r2t_rsp - iSCSI R2T Response processing
607 * @conn: iscsi connection
608 * @task: scsi command task
609 **/
610static int
611iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
612{
613 struct iscsi_r2t_info *r2t;
614 struct iscsi_session *session = conn->session;
615 struct iscsi_tcp_task *tcp_task = task->dd_data;
616 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
617 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
618 int r2tsn = be32_to_cpu(rhdr->r2tsn);
619 int rc;
620
621 if (tcp_conn->in.datalen) {
622 iscsi_conn_printk(KERN_ERR, conn,
623 "invalid R2t with datalen %d\n",
624 tcp_conn->in.datalen);
625 return ISCSI_ERR_DATALEN;
626 }
627
628 if (tcp_task->exp_datasn != r2tsn){
629 debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
630 __func__, tcp_task->exp_datasn, r2tsn);
631 return ISCSI_ERR_R2TSN;
632 }
633
634 /* fill-in new R2T associated with the task */
635 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
636
637 if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
638 iscsi_conn_printk(KERN_INFO, conn,
639 "dropping R2T itt %d in recovery.\n",
640 task->itt);
641 return 0;
642 }
643
644 rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
645 BUG_ON(!rc);
646
647 r2t->exp_statsn = rhdr->statsn;
648 r2t->data_length = be32_to_cpu(rhdr->data_length);
649 if (r2t->data_length == 0) {
650 iscsi_conn_printk(KERN_ERR, conn,
651 "invalid R2T with zero data len\n");
652 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
653 sizeof(void*));
654 return ISCSI_ERR_DATALEN;
655 }
656
657 if (r2t->data_length > session->max_burst)
658 debug_scsi("invalid R2T with data len %u and max burst %u."
659 "Attempting to execute request.\n",
660 r2t->data_length, session->max_burst);
661
662 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
663 if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
664 iscsi_conn_printk(KERN_ERR, conn,
665 "invalid R2T with data len %u at offset %u "
666 "and total length %d\n", r2t->data_length,
667 r2t->data_offset, scsi_out(task->sc)->length);
668 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
669 sizeof(void*));
670 return ISCSI_ERR_DATALEN;
671 }
672
673 r2t->ttt = rhdr->ttt; /* no flip */
674 r2t->solicit_datasn = 0;
675
676 iscsi_solicit_data_init(conn, task, r2t);
677
678 tcp_task->exp_datasn = r2tsn + 1;
679 __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
680 conn->r2t_pdus_cnt++;
681
682 iscsi_requeue_task(task);
683 return 0;
684}
685
686/*
687 * Handle incoming reply to DataIn command
688 */
689static int
690iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
691 struct iscsi_segment *segment)
692{
693 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
694 struct iscsi_hdr *hdr = tcp_conn->in.hdr;
695 int rc;
696
697 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
698 return ISCSI_ERR_DATA_DGST;
699
700 /* check for non-exceptional status */
701 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
702 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
703 if (rc)
704 return rc;
705 }
706
707 iscsi_tcp_hdr_recv_prep(tcp_conn);
708 return 0;
709}
710
711/**
712 * iscsi_tcp_hdr_dissect - process PDU header
713 * @conn: iSCSI connection
714 * @hdr: PDU header
715 *
716 * This function analyzes the header of the PDU received,
717 * and performs several sanity checks. If the PDU is accompanied
718 * by data, the receive buffer is set up to copy the incoming data
719 * to the correct location.
720 */
721static int
722iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
723{
724 int rc = 0, opcode, ahslen;
725 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
726 struct iscsi_task *task;
727
728 /* verify PDU length */
729 tcp_conn->in.datalen = ntoh24(hdr->dlength);
730 if (tcp_conn->in.datalen > conn->max_recv_dlength) {
731 iscsi_conn_printk(KERN_ERR, conn,
732 "iscsi_tcp: datalen %d > %d\n",
733 tcp_conn->in.datalen, conn->max_recv_dlength);
734 return ISCSI_ERR_DATALEN;
735 }
736
737 /* Additional header segments. So far, we don't
738 * process additional headers.
739 */
740 ahslen = hdr->hlength << 2;
741
742 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
743 /* verify itt (itt encoding: age+cid+itt) */
744 rc = iscsi_verify_itt(conn, hdr->itt);
745 if (rc)
746 return rc;
747
748 debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
749 opcode, ahslen, tcp_conn->in.datalen);
750
751 switch(opcode) {
752 case ISCSI_OP_SCSI_DATA_IN:
753 spin_lock(&conn->session->lock);
754 task = iscsi_itt_to_ctask(conn, hdr->itt);
755 if (!task)
756 rc = ISCSI_ERR_BAD_ITT;
757 else
758 rc = iscsi_data_in(conn, task);
759 if (rc) {
760 spin_unlock(&conn->session->lock);
761 break;
762 }
763
764 if (tcp_conn->in.datalen) {
765 struct iscsi_tcp_task *tcp_task = task->dd_data;
766 struct hash_desc *rx_hash = NULL;
767 struct scsi_data_buffer *sdb = scsi_in(task->sc);
768
769 /*
770 * Setup copy of Data-In into the Scsi_Cmnd
771 * Scatterlist case:
772 * We set up the iscsi_segment to point to the next
773 * scatterlist entry to copy to. As we go along,
774 * we move on to the next scatterlist entry and
775 * update the digest per-entry.
776 */
777 if (conn->datadgst_en)
778 rx_hash = &tcp_conn->rx_hash;
779
780 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
781 "datalen=%d)\n", tcp_conn,
782 tcp_task->data_offset,
783 tcp_conn->in.datalen);
784 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
785 sdb->table.sgl,
786 sdb->table.nents,
787 tcp_task->data_offset,
788 tcp_conn->in.datalen,
789 iscsi_tcp_process_data_in,
790 rx_hash);
791 spin_unlock(&conn->session->lock);
792 return rc;
793 }
794 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
795 spin_unlock(&conn->session->lock);
796 break;
797 case ISCSI_OP_SCSI_CMD_RSP:
798 if (tcp_conn->in.datalen) {
799 iscsi_tcp_data_recv_prep(tcp_conn);
800 return 0;
801 }
802 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
803 break;
804 case ISCSI_OP_R2T:
805 spin_lock(&conn->session->lock);
806 task = iscsi_itt_to_ctask(conn, hdr->itt);
807 if (!task)
808 rc = ISCSI_ERR_BAD_ITT;
809 else if (ahslen)
810 rc = ISCSI_ERR_AHSLEN;
811 else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
812 rc = iscsi_r2t_rsp(conn, task);
813 else
814 rc = ISCSI_ERR_PROTO;
815 spin_unlock(&conn->session->lock);
816 break;
817 case ISCSI_OP_LOGIN_RSP:
818 case ISCSI_OP_TEXT_RSP:
819 case ISCSI_OP_REJECT:
820 case ISCSI_OP_ASYNC_EVENT:
821 /*
822 * It is possible that we could get a PDU with a buffer larger
823 * than 8K, but there are no targets that currently do this.
824 * For now we fail until we find a vendor that needs it
825 */
826 if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
827 iscsi_conn_printk(KERN_ERR, conn,
828 "iscsi_tcp: received buffer of "
829 "len %u but conn buffer is only %u "
830 "(opcode %0x)\n",
831 tcp_conn->in.datalen,
832 ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
833 rc = ISCSI_ERR_PROTO;
834 break;
835 }
836
837 /* If there's data coming in with the response,
838 * receive it to the connection's buffer.
839 */
840 if (tcp_conn->in.datalen) {
841 iscsi_tcp_data_recv_prep(tcp_conn);
842 return 0;
843 }
844 /* fall through */
845 case ISCSI_OP_LOGOUT_RSP:
846 case ISCSI_OP_NOOP_IN:
847 case ISCSI_OP_SCSI_TMFUNC_RSP:
848 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
849 break;
850 default:
851 rc = ISCSI_ERR_BAD_OPCODE;
852 break;
853 }
854
855 if (rc == 0) {
856 /* Anything that comes with data should have
857 * been handled above. */
858 if (tcp_conn->in.datalen)
859 return ISCSI_ERR_PROTO;
860 iscsi_tcp_hdr_recv_prep(tcp_conn);
861 }
862
863 return rc;
864}
865
866/** 66/**
867 * iscsi_tcp_hdr_recv_done - process PDU header 67 * iscsi_sw_tcp_recv - TCP receive in sendfile fashion
868 *
869 * This is the callback invoked when the PDU header has
870 * been received. If the header is followed by additional
871 * header segments, we go back for more data.
872 */
873static int
874iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
875 struct iscsi_segment *segment)
876{
877 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
878 struct iscsi_hdr *hdr;
879
880 /* Check if there are additional header segments
881 * *prior* to computing the digest, because we
882 * may need to go back to the caller for more.
883 */
884 hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
885 if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
886 /* Bump the header length - the caller will
887 * just loop around and get the AHS for us, and
888 * call again. */
889 unsigned int ahslen = hdr->hlength << 2;
890
891 /* Make sure we don't overflow */
892 if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
893 return ISCSI_ERR_AHSLEN;
894
895 segment->total_size += ahslen;
896 segment->size += ahslen;
897 return 0;
898 }
899
900 /* We're done processing the header. See if we're doing
901 * header digests; if so, set up the recv_digest buffer
902 * and go back for more. */
903 if (conn->hdrdgst_en) {
904 if (segment->digest_len == 0) {
905 iscsi_tcp_segment_splice_digest(segment,
906 segment->recv_digest);
907 return 0;
908 }
909 iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
910 segment->total_copied - ISCSI_DIGEST_SIZE,
911 segment->digest);
912
913 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
914 return ISCSI_ERR_HDR_DGST;
915 }
916
917 tcp_conn->in.hdr = hdr;
918 return iscsi_tcp_hdr_dissect(conn, hdr);
919}
920
921/**
922 * iscsi_tcp_recv - TCP receive in sendfile fashion
923 * @rd_desc: read descriptor 68 * @rd_desc: read descriptor
924 * @skb: socket buffer 69 * @skb: socket buffer
925 * @offset: offset in skb 70 * @offset: offset in skb
926 * @len: skb->len - offset 71 * @len: skb->len - offset
927 **/ 72 */
928static int 73static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
929iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 74 unsigned int offset, size_t len)
930 unsigned int offset, size_t len)
931{ 75{
932 struct iscsi_conn *conn = rd_desc->arg.data; 76 struct iscsi_conn *conn = rd_desc->arg.data;
933 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 77 unsigned int consumed, total_consumed = 0;
934 struct iscsi_segment *segment = &tcp_conn->in.segment; 78 int status;
935 struct skb_seq_state seq;
936 unsigned int consumed = 0;
937 int rc = 0;
938 79
939 debug_tcp("in %d bytes\n", skb->len - offset); 80 debug_tcp("in %d bytes\n", skb->len - offset);
940 81
941 if (unlikely(conn->suspend_rx)) { 82 do {
942 debug_tcp("conn %d Rx suspended!\n", conn->id); 83 status = 0;
943 return 0; 84 consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status);
944 } 85 offset += consumed;
86 total_consumed += consumed;
87 } while (consumed != 0 && status != ISCSI_TCP_SKB_DONE);
945 88
946 skb_prepare_seq_read(skb, offset, skb->len, &seq); 89 debug_tcp("read %d bytes status %d\n", skb->len - offset, status);
947 while (1) { 90 return total_consumed;
948 unsigned int avail;
949 const u8 *ptr;
950
951 avail = skb_seq_read(consumed, &ptr, &seq);
952 if (avail == 0) {
953 debug_tcp("no more data avail. Consumed %d\n",
954 consumed);
955 break;
956 }
957 BUG_ON(segment->copied >= segment->size);
958
959 debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
960 rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
961 BUG_ON(rc == 0);
962 consumed += rc;
963
964 if (segment->total_copied >= segment->total_size) {
965 debug_tcp("segment done\n");
966 rc = segment->done(tcp_conn, segment);
967 if (rc != 0) {
968 skb_abort_seq_read(&seq);
969 goto error;
970 }
971
972 /* The done() functions sets up the
973 * next segment. */
974 }
975 }
976 skb_abort_seq_read(&seq);
977 conn->rxdata_octets += consumed;
978 return consumed;
979
980error:
981 debug_tcp("Error receiving PDU, errno=%d\n", rc);
982 iscsi_conn_failure(conn, rc);
983 return 0;
984} 91}
985 92
986static void 93static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
987iscsi_tcp_data_ready(struct sock *sk, int flag)
988{ 94{
989 struct iscsi_conn *conn = sk->sk_user_data; 95 struct iscsi_conn *conn = sk->sk_user_data;
990 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 96 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
@@ -1000,7 +106,7 @@ iscsi_tcp_data_ready(struct sock *sk, int flag)
1000 */ 106 */
1001 rd_desc.arg.data = conn; 107 rd_desc.arg.data = conn;
1002 rd_desc.count = 1; 108 rd_desc.count = 1;
1003 tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv); 109 tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
1004 110
1005 read_unlock(&sk->sk_callback_lock); 111 read_unlock(&sk->sk_callback_lock);
1006 112
@@ -1009,10 +115,10 @@ iscsi_tcp_data_ready(struct sock *sk, int flag)
1009 iscsi_tcp_segment_unmap(&tcp_conn->in.segment); 115 iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
1010} 116}
1011 117
1012static void 118static void iscsi_sw_tcp_state_change(struct sock *sk)
1013iscsi_tcp_state_change(struct sock *sk)
1014{ 119{
1015 struct iscsi_tcp_conn *tcp_conn; 120 struct iscsi_tcp_conn *tcp_conn;
121 struct iscsi_sw_tcp_conn *tcp_sw_conn;
1016 struct iscsi_conn *conn; 122 struct iscsi_conn *conn;
1017 struct iscsi_session *session; 123 struct iscsi_session *session;
1018 void (*old_state_change)(struct sock *); 124 void (*old_state_change)(struct sock *);
@@ -1030,7 +136,8 @@ iscsi_tcp_state_change(struct sock *sk)
1030 } 136 }
1031 137
1032 tcp_conn = conn->dd_data; 138 tcp_conn = conn->dd_data;
1033 old_state_change = tcp_conn->old_state_change; 139 tcp_sw_conn = tcp_conn->dd_data;
140 old_state_change = tcp_sw_conn->old_state_change;
1034 141
1035 read_unlock(&sk->sk_callback_lock); 142 read_unlock(&sk->sk_callback_lock);
1036 143
@@ -1041,63 +148,123 @@ iscsi_tcp_state_change(struct sock *sk)
1041 * iscsi_write_space - Called when more output buffer space is available 148 * iscsi_write_space - Called when more output buffer space is available
1042 * @sk: socket space is available for 149 * @sk: socket space is available for
1043 **/ 150 **/
1044static void 151static void iscsi_sw_tcp_write_space(struct sock *sk)
1045iscsi_write_space(struct sock *sk)
1046{ 152{
1047 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data; 153 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
1048 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 154 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
155 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1049 156
1050 tcp_conn->old_write_space(sk); 157 tcp_sw_conn->old_write_space(sk);
1051 debug_tcp("iscsi_write_space: cid %d\n", conn->id); 158 debug_tcp("iscsi_write_space: cid %d\n", conn->id);
1052 scsi_queue_work(conn->session->host, &conn->xmitwork); 159 scsi_queue_work(conn->session->host, &conn->xmitwork);
1053} 160}
1054 161
1055static void 162static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
1056iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1057{ 163{
1058 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 164 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1059 struct sock *sk = tcp_conn->sock->sk; 165 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
166 struct sock *sk = tcp_sw_conn->sock->sk;
1060 167
1061 /* assign new callbacks */ 168 /* assign new callbacks */
1062 write_lock_bh(&sk->sk_callback_lock); 169 write_lock_bh(&sk->sk_callback_lock);
1063 sk->sk_user_data = conn; 170 sk->sk_user_data = conn;
1064 tcp_conn->old_data_ready = sk->sk_data_ready; 171 tcp_sw_conn->old_data_ready = sk->sk_data_ready;
1065 tcp_conn->old_state_change = sk->sk_state_change; 172 tcp_sw_conn->old_state_change = sk->sk_state_change;
1066 tcp_conn->old_write_space = sk->sk_write_space; 173 tcp_sw_conn->old_write_space = sk->sk_write_space;
1067 sk->sk_data_ready = iscsi_tcp_data_ready; 174 sk->sk_data_ready = iscsi_sw_tcp_data_ready;
1068 sk->sk_state_change = iscsi_tcp_state_change; 175 sk->sk_state_change = iscsi_sw_tcp_state_change;
1069 sk->sk_write_space = iscsi_write_space; 176 sk->sk_write_space = iscsi_sw_tcp_write_space;
1070 write_unlock_bh(&sk->sk_callback_lock); 177 write_unlock_bh(&sk->sk_callback_lock);
1071} 178}
1072 179
1073static void 180static void
1074iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn) 181iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_sw_tcp_conn *tcp_sw_conn)
1075{ 182{
1076 struct sock *sk = tcp_conn->sock->sk; 183 struct sock *sk = tcp_sw_conn->sock->sk;
1077 184
1078 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ 185 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1079 write_lock_bh(&sk->sk_callback_lock); 186 write_lock_bh(&sk->sk_callback_lock);
1080 sk->sk_user_data = NULL; 187 sk->sk_user_data = NULL;
1081 sk->sk_data_ready = tcp_conn->old_data_ready; 188 sk->sk_data_ready = tcp_sw_conn->old_data_ready;
1082 sk->sk_state_change = tcp_conn->old_state_change; 189 sk->sk_state_change = tcp_sw_conn->old_state_change;
1083 sk->sk_write_space = tcp_conn->old_write_space; 190 sk->sk_write_space = tcp_sw_conn->old_write_space;
1084 sk->sk_no_check = 0; 191 sk->sk_no_check = 0;
1085 write_unlock_bh(&sk->sk_callback_lock); 192 write_unlock_bh(&sk->sk_callback_lock);
1086} 193}
1087 194
1088/** 195/**
1089 * iscsi_xmit - TCP transmit 196 * iscsi_sw_tcp_xmit_segment - transmit segment
197 * @tcp_conn: the iSCSI TCP connection
198 * @segment: the buffer to transmnit
199 *
200 * This function transmits as much of the buffer as
201 * the network layer will accept, and returns the number of
202 * bytes transmitted.
203 *
204 * If CRC hashing is enabled, the function will compute the
205 * hash as it goes. When the entire segment has been transmitted,
206 * it will retrieve the hash value and send it as well.
207 */
208static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
209 struct iscsi_segment *segment)
210{
211 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
212 struct socket *sk = tcp_sw_conn->sock;
213 unsigned int copied = 0;
214 int r = 0;
215
216 while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
217 struct scatterlist *sg;
218 unsigned int offset, copy;
219 int flags = 0;
220
221 r = 0;
222 offset = segment->copied;
223 copy = segment->size - offset;
224
225 if (segment->total_copied + segment->size < segment->total_size)
226 flags |= MSG_MORE;
227
228 /* Use sendpage if we can; else fall back to sendmsg */
229 if (!segment->data) {
230 sg = segment->sg;
231 offset += segment->sg_offset + sg->offset;
232 r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
233 copy, flags);
234 } else {
235 struct msghdr msg = { .msg_flags = flags };
236 struct kvec iov = {
237 .iov_base = segment->data + offset,
238 .iov_len = copy
239 };
240
241 r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
242 }
243
244 if (r < 0) {
245 iscsi_tcp_segment_unmap(segment);
246 if (copied || r == -EAGAIN)
247 break;
248 return r;
249 }
250 copied += r;
251 }
252 return copied;
253}
254
255/**
256 * iscsi_sw_tcp_xmit - TCP transmit
1090 **/ 257 **/
1091static int 258static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
1092iscsi_xmit(struct iscsi_conn *conn)
1093{ 259{
1094 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 260 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1095 struct iscsi_segment *segment = &tcp_conn->out.segment; 261 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
262 struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
1096 unsigned int consumed = 0; 263 unsigned int consumed = 0;
1097 int rc = 0; 264 int rc = 0;
1098 265
1099 while (1) { 266 while (1) {
1100 rc = iscsi_tcp_xmit_segment(tcp_conn, segment); 267 rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
1101 if (rc < 0) { 268 if (rc < 0) {
1102 rc = ISCSI_ERR_XMIT_FAILED; 269 rc = ISCSI_ERR_XMIT_FAILED;
1103 goto error; 270 goto error;
@@ -1132,22 +299,22 @@ error:
1132/** 299/**
1133 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit 300 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
1134 */ 301 */
1135static inline int 302static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
1136iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
1137{ 303{
1138 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 304 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1139 struct iscsi_segment *segment = &tcp_conn->out.segment; 305 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
306 struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
1140 307
1141 return segment->total_copied - segment->total_size; 308 return segment->total_copied - segment->total_size;
1142} 309}
1143 310
1144static inline int 311static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
1145iscsi_tcp_flush(struct iscsi_conn *conn)
1146{ 312{
313 struct iscsi_conn *conn = task->conn;
1147 int rc; 314 int rc;
1148 315
1149 while (iscsi_tcp_xmit_qlen(conn)) { 316 while (iscsi_sw_tcp_xmit_qlen(conn)) {
1150 rc = iscsi_xmit(conn); 317 rc = iscsi_sw_tcp_xmit(conn);
1151 if (rc == 0) 318 if (rc == 0)
1152 return -EAGAIN; 319 return -EAGAIN;
1153 if (rc < 0) 320 if (rc < 0)
@@ -1161,27 +328,31 @@ iscsi_tcp_flush(struct iscsi_conn *conn)
1161 * This is called when we're done sending the header. 328 * This is called when we're done sending the header.
1162 * Simply copy the data_segment to the send segment, and return. 329 * Simply copy the data_segment to the send segment, and return.
1163 */ 330 */
1164static int 331static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
1165iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn, 332 struct iscsi_segment *segment)
1166 struct iscsi_segment *segment)
1167{ 333{
1168 tcp_conn->out.segment = tcp_conn->out.data_segment; 334 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
335
336 tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
1169 debug_tcp("Header done. Next segment size %u total_size %u\n", 337 debug_tcp("Header done. Next segment size %u total_size %u\n",
1170 tcp_conn->out.segment.size, tcp_conn->out.segment.total_size); 338 tcp_sw_conn->out.segment.size,
339 tcp_sw_conn->out.segment.total_size);
1171 return 0; 340 return 0;
1172} 341}
1173 342
1174static void 343static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
1175iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen) 344 size_t hdrlen)
1176{ 345{
1177 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 346 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
347 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1178 348
1179 debug_tcp("%s(%p%s)\n", __func__, tcp_conn, 349 debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
1180 conn->hdrdgst_en? ", digest enabled" : ""); 350 conn->hdrdgst_en? ", digest enabled" : "");
1181 351
1182 /* Clear the data segment - needs to be filled in by the 352 /* Clear the data segment - needs to be filled in by the
1183 * caller using iscsi_tcp_send_data_prep() */ 353 * caller using iscsi_tcp_send_data_prep() */
1184 memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment)); 354 memset(&tcp_sw_conn->out.data_segment, 0,
355 sizeof(struct iscsi_segment));
1185 356
1186 /* If header digest is enabled, compute the CRC and 357 /* If header digest is enabled, compute the CRC and
1187 * place the digest into the same buffer. We make 358 * place the digest into the same buffer. We make
@@ -1189,7 +360,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1189 * sufficient room. 360 * sufficient room.
1190 */ 361 */
1191 if (conn->hdrdgst_en) { 362 if (conn->hdrdgst_en) {
1192 iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen, 363 iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
1193 hdr + hdrlen); 364 hdr + hdrlen);
1194 hdrlen += ISCSI_DIGEST_SIZE; 365 hdrlen += ISCSI_DIGEST_SIZE;
1195 } 366 }
@@ -1197,10 +368,10 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1197 /* Remember header pointer for later, when we need 368 /* Remember header pointer for later, when we need
1198 * to decide whether there's a payload to go along 369 * to decide whether there's a payload to go along
1199 * with the header. */ 370 * with the header. */
1200 tcp_conn->out.hdr = hdr; 371 tcp_sw_conn->out.hdr = hdr;
1201 372
1202 iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen, 373 iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
1203 iscsi_tcp_send_hdr_done, NULL); 374 iscsi_sw_tcp_send_hdr_done, NULL);
1204} 375}
1205 376
1206/* 377/*
@@ -1209,11 +380,12 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1209 * of by the iscsi_segment routines. 380 * of by the iscsi_segment routines.
1210 */ 381 */
1211static int 382static int
1212iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, 383iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
1213 unsigned int count, unsigned int offset, 384 unsigned int count, unsigned int offset,
1214 unsigned int len) 385 unsigned int len)
1215{ 386{
1216 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 387 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
388 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1217 struct hash_desc *tx_hash = NULL; 389 struct hash_desc *tx_hash = NULL;
1218 unsigned int hdr_spec_len; 390 unsigned int hdr_spec_len;
1219 391
@@ -1223,22 +395,23 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
1223 395
1224 /* Make sure the datalen matches what the caller 396 /* Make sure the datalen matches what the caller
1225 said he would send. */ 397 said he would send. */
1226 hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength); 398 hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
1227 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); 399 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
1228 400
1229 if (conn->datadgst_en) 401 if (conn->datadgst_en)
1230 tx_hash = &tcp_conn->tx_hash; 402 tx_hash = &tcp_sw_conn->tx_hash;
1231 403
1232 return iscsi_segment_seek_sg(&tcp_conn->out.data_segment, 404 return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
1233 sg, count, offset, len, 405 sg, count, offset, len,
1234 NULL, tx_hash); 406 NULL, tx_hash);
1235} 407}
1236 408
1237static void 409static void
1238iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data, 410iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
1239 size_t len) 411 size_t len)
1240{ 412{
1241 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 413 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
414 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1242 struct hash_desc *tx_hash = NULL; 415 struct hash_desc *tx_hash = NULL;
1243 unsigned int hdr_spec_len; 416 unsigned int hdr_spec_len;
1244 417
@@ -1247,341 +420,160 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
1247 420
1248 /* Make sure the datalen matches what the caller 421 /* Make sure the datalen matches what the caller
1249 said he would send. */ 422 said he would send. */
1250 hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength); 423 hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
1251 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); 424 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
1252 425
1253 if (conn->datadgst_en) 426 if (conn->datadgst_en)
1254 tx_hash = &tcp_conn->tx_hash; 427 tx_hash = &tcp_sw_conn->tx_hash;
1255 428
1256 iscsi_segment_init_linear(&tcp_conn->out.data_segment, 429 iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
1257 data, len, NULL, tx_hash); 430 data, len, NULL, tx_hash);
1258} 431}
1259 432
1260/** 433static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
1261 * iscsi_solicit_data_cont - initialize next Data-Out 434 unsigned int offset, unsigned int count)
1262 * @conn: iscsi connection
1263 * @task: scsi command task
1264 * @r2t: R2T info
1265 * @left: bytes left to transfer
1266 *
1267 * Notes:
1268 * Initialize next Data-Out within this R2T sequence and continue
1269 * to process next Scatter-Gather element(if any) of this SCSI command.
1270 *
1271 * Called under connection lock.
1272 **/
1273static int
1274iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
1275 struct iscsi_r2t_info *r2t)
1276{ 435{
1277 struct iscsi_data *hdr;
1278 int new_offset, left;
1279
1280 BUG_ON(r2t->data_length - r2t->sent < 0);
1281 left = r2t->data_length - r2t->sent;
1282 if (left == 0)
1283 return 0;
1284
1285 hdr = &r2t->dtask.hdr;
1286 memset(hdr, 0, sizeof(struct iscsi_data));
1287 hdr->ttt = r2t->ttt;
1288 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
1289 r2t->solicit_datasn++;
1290 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1291 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
1292 hdr->itt = task->hdr->itt;
1293 hdr->exp_statsn = r2t->exp_statsn;
1294 new_offset = r2t->data_offset + r2t->sent;
1295 hdr->offset = cpu_to_be32(new_offset);
1296 if (left > conn->max_xmit_dlength) {
1297 hton24(hdr->dlength, conn->max_xmit_dlength);
1298 r2t->data_count = conn->max_xmit_dlength;
1299 } else {
1300 hton24(hdr->dlength, left);
1301 r2t->data_count = left;
1302 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1303 }
1304
1305 conn->dataout_pdus_cnt++;
1306 return 1;
1307}
1308
1309/**
1310 * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1311 * @conn: iscsi connection
1312 * @task: scsi command task
1313 * @sc: scsi command
1314 **/
1315static int
1316iscsi_tcp_task_init(struct iscsi_task *task)
1317{
1318 struct iscsi_tcp_task *tcp_task = task->dd_data;
1319 struct iscsi_conn *conn = task->conn; 436 struct iscsi_conn *conn = task->conn;
1320 struct scsi_cmnd *sc = task->sc; 437 int err = 0;
1321 int err;
1322 438
1323 if (!sc) { 439 iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
1324 /*
1325 * mgmt tasks do not have a scatterlist since they come
1326 * in from the iscsi interface.
1327 */
1328 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
1329 task->itt);
1330
1331 /* Prepare PDU, optionally w/ immediate data */
1332 iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
1333
1334 /* If we have immediate data, attach a payload */
1335 if (task->data_count)
1336 iscsi_tcp_send_linear_data_prepare(conn, task->data,
1337 task->data_count);
1338 return 0;
1339 }
1340 440
1341 BUG_ON(__kfifo_len(tcp_task->r2tqueue)); 441 if (!count)
1342 tcp_task->sent = 0; 442 return 0;
1343 tcp_task->exp_datasn = 0;
1344 443
1345 /* Prepare PDU, optionally w/ immediate data */ 444 if (!task->sc)
1346 debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n", 445 iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
1347 conn->id, task->itt, task->imm_count, 446 else {
1348 task->unsol_count); 447 struct scsi_data_buffer *sdb = scsi_out(task->sc);
1349 iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
1350 448
1351 if (!task->imm_count) 449 err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
1352 return 0; 450 sdb->table.nents, offset,
451 count);
452 }
1353 453
1354 /* If we have immediate data, attach a payload */ 454 if (err) {
1355 err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl, 455 iscsi_conn_failure(conn, err);
1356 scsi_out(sc)->table.nents, 456 return -EIO;
1357 0, task->imm_count); 457 }
1358 if (err)
1359 return err;
1360 tcp_task->sent += task->imm_count;
1361 task->imm_count = 0;
1362 return 0; 458 return 0;
1363} 459}
1364 460
1365/* 461static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
1366 * iscsi_tcp_task_xmit - xmit normal PDU task
1367 * @task: iscsi command task
1368 *
1369 * We're expected to return 0 when everything was transmitted succesfully,
1370 * -EAGAIN if there's still data in the queue, or != 0 for any other kind
1371 * of error.
1372 */
1373static int
1374iscsi_tcp_task_xmit(struct iscsi_task *task)
1375{ 462{
1376 struct iscsi_conn *conn = task->conn;
1377 struct iscsi_tcp_task *tcp_task = task->dd_data; 463 struct iscsi_tcp_task *tcp_task = task->dd_data;
1378 struct scsi_cmnd *sc = task->sc;
1379 struct scsi_data_buffer *sdb;
1380 int rc = 0;
1381
1382flush:
1383 /* Flush any pending data first. */
1384 rc = iscsi_tcp_flush(conn);
1385 if (rc < 0)
1386 return rc;
1387
1388 /* mgmt command */
1389 if (!sc) {
1390 if (task->hdr->itt == RESERVED_ITT)
1391 iscsi_put_task(task);
1392 return 0;
1393 }
1394
1395 /* Are we done already? */
1396 if (sc->sc_data_direction != DMA_TO_DEVICE)
1397 return 0;
1398 464
1399 sdb = scsi_out(sc); 465 task->hdr = task->dd_data + sizeof(*tcp_task);
1400 if (task->unsol_count != 0) { 466 task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
1401 struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
1402
1403 /* Prepare a header for the unsolicited PDU.
1404 * The amount of data we want to send will be
1405 * in task->data_count.
1406 * FIXME: return the data count instead.
1407 */
1408 iscsi_prep_unsolicit_data_pdu(task, hdr);
1409
1410 debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
1411 task->itt, tcp_task->sent, task->data_count);
1412
1413 iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
1414 rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
1415 sdb->table.nents, tcp_task->sent,
1416 task->data_count);
1417 if (rc)
1418 goto fail;
1419 tcp_task->sent += task->data_count;
1420 task->unsol_count -= task->data_count;
1421 goto flush;
1422 } else {
1423 struct iscsi_session *session = conn->session;
1424 struct iscsi_r2t_info *r2t;
1425
1426 /* All unsolicited PDUs sent. Check for solicited PDUs.
1427 */
1428 spin_lock_bh(&session->lock);
1429 r2t = tcp_task->r2t;
1430 if (r2t != NULL) {
1431 /* Continue with this R2T? */
1432 if (!iscsi_solicit_data_cont(conn, task, r2t)) {
1433 debug_scsi(" done with r2t %p\n", r2t);
1434
1435 __kfifo_put(tcp_task->r2tpool.queue,
1436 (void*)&r2t, sizeof(void*));
1437 tcp_task->r2t = r2t = NULL;
1438 }
1439 }
1440
1441 if (r2t == NULL) {
1442 __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
1443 sizeof(void*));
1444 r2t = tcp_task->r2t;
1445 }
1446 spin_unlock_bh(&session->lock);
1447
1448 /* Waiting for more R2Ts to arrive. */
1449 if (r2t == NULL) {
1450 debug_tcp("no R2Ts yet\n");
1451 return 0;
1452 }
1453
1454 debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
1455 r2t, r2t->solicit_datasn - 1, task->itt,
1456 r2t->data_offset + r2t->sent, r2t->data_count);
1457
1458 iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
1459 sizeof(struct iscsi_hdr));
1460
1461 rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
1462 sdb->table.nents,
1463 r2t->data_offset + r2t->sent,
1464 r2t->data_count);
1465 if (rc)
1466 goto fail;
1467 tcp_task->sent += r2t->data_count;
1468 r2t->sent += r2t->data_count;
1469 goto flush;
1470 }
1471 return 0; 467 return 0;
1472fail:
1473 iscsi_conn_failure(conn, rc);
1474 return -EIO;
1475} 468}
1476 469
1477static struct iscsi_cls_conn * 470static struct iscsi_cls_conn *
1478iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) 471iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
472 uint32_t conn_idx)
1479{ 473{
1480 struct iscsi_conn *conn; 474 struct iscsi_conn *conn;
1481 struct iscsi_cls_conn *cls_conn; 475 struct iscsi_cls_conn *cls_conn;
1482 struct iscsi_tcp_conn *tcp_conn; 476 struct iscsi_tcp_conn *tcp_conn;
477 struct iscsi_sw_tcp_conn *tcp_sw_conn;
1483 478
1484 cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx); 479 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
480 conn_idx);
1485 if (!cls_conn) 481 if (!cls_conn)
1486 return NULL; 482 return NULL;
1487 conn = cls_conn->dd_data; 483 conn = cls_conn->dd_data;
1488 /*
1489 * due to strange issues with iser these are not set
1490 * in iscsi_conn_setup
1491 */
1492 conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1493
1494 tcp_conn = conn->dd_data; 484 tcp_conn = conn->dd_data;
1495 tcp_conn->iscsi_conn = conn; 485 tcp_sw_conn = tcp_conn->dd_data;
1496 486
1497 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, 487 tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1498 CRYPTO_ALG_ASYNC); 488 CRYPTO_ALG_ASYNC);
1499 tcp_conn->tx_hash.flags = 0; 489 tcp_sw_conn->tx_hash.flags = 0;
1500 if (IS_ERR(tcp_conn->tx_hash.tfm)) 490 if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
1501 goto free_conn; 491 goto free_conn;
1502 492
1503 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, 493 tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1504 CRYPTO_ALG_ASYNC); 494 CRYPTO_ALG_ASYNC);
1505 tcp_conn->rx_hash.flags = 0; 495 tcp_sw_conn->rx_hash.flags = 0;
1506 if (IS_ERR(tcp_conn->rx_hash.tfm)) 496 if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
1507 goto free_tx_tfm; 497 goto free_tx_tfm;
498 tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;
1508 499
1509 return cls_conn; 500 return cls_conn;
1510 501
1511free_tx_tfm: 502free_tx_tfm:
1512 crypto_free_hash(tcp_conn->tx_hash.tfm); 503 crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
1513free_conn: 504free_conn:
1514 iscsi_conn_printk(KERN_ERR, conn, 505 iscsi_conn_printk(KERN_ERR, conn,
1515 "Could not create connection due to crc32c " 506 "Could not create connection due to crc32c "
1516 "loading error. Make sure the crc32c " 507 "loading error. Make sure the crc32c "
1517 "module is built as a module or into the " 508 "module is built as a module or into the "
1518 "kernel\n"); 509 "kernel\n");
1519 iscsi_conn_teardown(cls_conn); 510 iscsi_tcp_conn_teardown(cls_conn);
1520 return NULL; 511 return NULL;
1521} 512}
1522 513
1523static void 514static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
1524iscsi_tcp_release_conn(struct iscsi_conn *conn)
1525{ 515{
1526 struct iscsi_session *session = conn->session; 516 struct iscsi_session *session = conn->session;
1527 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 517 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1528 struct socket *sock = tcp_conn->sock; 518 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
519 struct socket *sock = tcp_sw_conn->sock;
1529 520
1530 if (!sock) 521 if (!sock)
1531 return; 522 return;
1532 523
1533 sock_hold(sock->sk); 524 sock_hold(sock->sk);
1534 iscsi_conn_restore_callbacks(tcp_conn); 525 iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);
1535 sock_put(sock->sk); 526 sock_put(sock->sk);
1536 527
1537 spin_lock_bh(&session->lock); 528 spin_lock_bh(&session->lock);
1538 tcp_conn->sock = NULL; 529 tcp_sw_conn->sock = NULL;
1539 spin_unlock_bh(&session->lock); 530 spin_unlock_bh(&session->lock);
1540 sockfd_put(sock); 531 sockfd_put(sock);
1541} 532}
1542 533
1543static void 534static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1544iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1545{ 535{
1546 struct iscsi_conn *conn = cls_conn->dd_data; 536 struct iscsi_conn *conn = cls_conn->dd_data;
1547 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 537 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
538 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1548 539
1549 iscsi_tcp_release_conn(conn); 540 iscsi_sw_tcp_release_conn(conn);
1550 541
1551 if (tcp_conn->tx_hash.tfm) 542 if (tcp_sw_conn->tx_hash.tfm)
1552 crypto_free_hash(tcp_conn->tx_hash.tfm); 543 crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
1553 if (tcp_conn->rx_hash.tfm) 544 if (tcp_sw_conn->rx_hash.tfm)
1554 crypto_free_hash(tcp_conn->rx_hash.tfm); 545 crypto_free_hash(tcp_sw_conn->rx_hash.tfm);
1555 546
1556 iscsi_conn_teardown(cls_conn); 547 iscsi_tcp_conn_teardown(cls_conn);
1557} 548}
1558 549
1559static void 550static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1560iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1561{ 551{
1562 struct iscsi_conn *conn = cls_conn->dd_data; 552 struct iscsi_conn *conn = cls_conn->dd_data;
1563 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 553 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
554 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1564 555
1565 /* userspace may have goofed up and not bound us */ 556 /* userspace may have goofed up and not bound us */
1566 if (!tcp_conn->sock) 557 if (!tcp_sw_conn->sock)
1567 return; 558 return;
1568 /* 559 /*
1569 * Make sure our recv side is stopped. 560 * Make sure our recv side is stopped.
1570 * Older tools called conn stop before ep_disconnect 561 * Older tools called conn stop before ep_disconnect
1571 * so IO could still be coming in. 562 * so IO could still be coming in.
1572 */ 563 */
1573 write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock); 564 write_lock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
1574 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 565 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1575 write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock); 566 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
1576 567
1577 iscsi_conn_stop(cls_conn, flag); 568 iscsi_conn_stop(cls_conn, flag);
1578 iscsi_tcp_release_conn(conn); 569 iscsi_sw_tcp_release_conn(conn);
1579} 570}
1580 571
1581static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock, 572static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
1582 char *buf, int *port, 573 char *buf, int *port,
1583 int (*getname)(struct socket *, struct sockaddr *, 574 int (*getname)(struct socket *,
1584 int *addrlen)) 575 struct sockaddr *,
576 int *addrlen))
1585{ 577{
1586 struct sockaddr_storage *addr; 578 struct sockaddr_storage *addr;
1587 struct sockaddr_in6 *sin6; 579 struct sockaddr_in6 *sin6;
@@ -1619,14 +611,15 @@ free_addr:
1619} 611}
1620 612
1621static int 613static int
1622iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, 614iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1623 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, 615 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
1624 int is_leading) 616 int is_leading)
1625{ 617{
1626 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 618 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1627 struct iscsi_host *ihost = shost_priv(shost); 619 struct iscsi_host *ihost = shost_priv(shost);
1628 struct iscsi_conn *conn = cls_conn->dd_data; 620 struct iscsi_conn *conn = cls_conn->dd_data;
1629 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 621 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
622 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1630 struct sock *sk; 623 struct sock *sk;
1631 struct socket *sock; 624 struct socket *sock;
1632 int err; 625 int err;
@@ -1643,13 +636,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1643 * userspace may still want to query the values since we will 636 * userspace may still want to query the values since we will
1644 * be using them for the reconnect 637 * be using them for the reconnect
1645 */ 638 */
1646 err = iscsi_tcp_get_addr(conn, sock, conn->portal_address, 639 err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address,
1647 &conn->portal_port, kernel_getpeername); 640 &conn->portal_port, kernel_getpeername);
1648 if (err) 641 if (err)
1649 goto free_socket; 642 goto free_socket;
1650 643
1651 err = iscsi_tcp_get_addr(conn, sock, ihost->local_address, 644 err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address,
1652 &ihost->local_port, kernel_getsockname); 645 &ihost->local_port, kernel_getsockname);
1653 if (err) 646 if (err)
1654 goto free_socket; 647 goto free_socket;
1655 648
@@ -1658,7 +651,7 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1658 goto free_socket; 651 goto free_socket;
1659 652
1660 /* bind iSCSI connection and socket */ 653 /* bind iSCSI connection and socket */
1661 tcp_conn->sock = sock; 654 tcp_sw_conn->sock = sock;
1662 655
1663 /* setup Socket parameters */ 656 /* setup Socket parameters */
1664 sk = sock->sk; 657 sk = sock->sk;
@@ -1666,8 +659,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1666 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ 659 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
1667 sk->sk_allocation = GFP_ATOMIC; 660 sk->sk_allocation = GFP_ATOMIC;
1668 661
1669 iscsi_conn_set_callbacks(conn); 662 iscsi_sw_tcp_conn_set_callbacks(conn);
1670 tcp_conn->sendpage = tcp_conn->sock->ops->sendpage; 663 tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
1671 /* 664 /*
1672 * set receive state machine into initial state 665 * set receive state machine into initial state
1673 */ 666 */
@@ -1679,74 +672,14 @@ free_socket:
1679 return err; 672 return err;
1680} 673}
1681 674
1682static int 675static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
1683iscsi_r2tpool_alloc(struct iscsi_session *session) 676 enum iscsi_param param, char *buf,
1684{ 677 int buflen)
1685 int i;
1686 int cmd_i;
1687
1688 /*
1689 * initialize per-task: R2T pool and xmit queue
1690 */
1691 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1692 struct iscsi_task *task = session->cmds[cmd_i];
1693 struct iscsi_tcp_task *tcp_task = task->dd_data;
1694
1695 /*
1696 * pre-allocated x4 as much r2ts to handle race when
1697 * target acks DataOut faster than we data_xmit() queues
1698 * could replenish r2tqueue.
1699 */
1700
1701 /* R2T pool */
1702 if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
1703 sizeof(struct iscsi_r2t_info))) {
1704 goto r2t_alloc_fail;
1705 }
1706
1707 /* R2T xmit queue */
1708 tcp_task->r2tqueue = kfifo_alloc(
1709 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
1710 if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
1711 iscsi_pool_free(&tcp_task->r2tpool);
1712 goto r2t_alloc_fail;
1713 }
1714 }
1715
1716 return 0;
1717
1718r2t_alloc_fail:
1719 for (i = 0; i < cmd_i; i++) {
1720 struct iscsi_task *task = session->cmds[i];
1721 struct iscsi_tcp_task *tcp_task = task->dd_data;
1722
1723 kfifo_free(tcp_task->r2tqueue);
1724 iscsi_pool_free(&tcp_task->r2tpool);
1725 }
1726 return -ENOMEM;
1727}
1728
1729static void
1730iscsi_r2tpool_free(struct iscsi_session *session)
1731{
1732 int i;
1733
1734 for (i = 0; i < session->cmds_max; i++) {
1735 struct iscsi_task *task = session->cmds[i];
1736 struct iscsi_tcp_task *tcp_task = task->dd_data;
1737
1738 kfifo_free(tcp_task->r2tqueue);
1739 iscsi_pool_free(&tcp_task->r2tpool);
1740 }
1741}
1742
1743static int
1744iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
1745 char *buf, int buflen)
1746{ 678{
1747 struct iscsi_conn *conn = cls_conn->dd_data; 679 struct iscsi_conn *conn = cls_conn->dd_data;
1748 struct iscsi_session *session = conn->session; 680 struct iscsi_session *session = conn->session;
1749 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 681 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
682 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1750 int value; 683 int value;
1751 684
1752 switch(param) { 685 switch(param) {
@@ -1755,8 +688,8 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
1755 break; 688 break;
1756 case ISCSI_PARAM_DATADGST_EN: 689 case ISCSI_PARAM_DATADGST_EN:
1757 iscsi_set_param(cls_conn, param, buf, buflen); 690 iscsi_set_param(cls_conn, param, buf, buflen);
1758 tcp_conn->sendpage = conn->datadgst_en ? 691 tcp_sw_conn->sendpage = conn->datadgst_en ?
1759 sock_no_sendpage : tcp_conn->sock->ops->sendpage; 692 sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
1760 break; 693 break;
1761 case ISCSI_PARAM_MAX_R2T: 694 case ISCSI_PARAM_MAX_R2T:
1762 sscanf(buf, "%d", &value); 695 sscanf(buf, "%d", &value);
@@ -1764,9 +697,9 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
1764 return -EINVAL; 697 return -EINVAL;
1765 if (session->max_r2t == value) 698 if (session->max_r2t == value)
1766 break; 699 break;
1767 iscsi_r2tpool_free(session); 700 iscsi_tcp_r2tpool_free(session);
1768 iscsi_set_param(cls_conn, param, buf, buflen); 701 iscsi_set_param(cls_conn, param, buf, buflen);
1769 if (iscsi_r2tpool_alloc(session)) 702 if (iscsi_tcp_r2tpool_alloc(session))
1770 return -ENOMEM; 703 return -ENOMEM;
1771 break; 704 break;
1772 default: 705 default:
@@ -1776,9 +709,8 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
1776 return 0; 709 return 0;
1777} 710}
1778 711
1779static int 712static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
1780iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, 713 enum iscsi_param param, char *buf)
1781 enum iscsi_param param, char *buf)
1782{ 714{
1783 struct iscsi_conn *conn = cls_conn->dd_data; 715 struct iscsi_conn *conn = cls_conn->dd_data;
1784 int len; 716 int len;
@@ -1802,48 +734,42 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
1802} 734}
1803 735
1804static void 736static void
1805iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) 737iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
738 struct iscsi_stats *stats)
1806{ 739{
1807 struct iscsi_conn *conn = cls_conn->dd_data; 740 struct iscsi_conn *conn = cls_conn->dd_data;
1808 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 741 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
742 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1809 743
1810 stats->txdata_octets = conn->txdata_octets;
1811 stats->rxdata_octets = conn->rxdata_octets;
1812 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1813 stats->dataout_pdus = conn->dataout_pdus_cnt;
1814 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1815 stats->datain_pdus = conn->datain_pdus_cnt;
1816 stats->r2t_pdus = conn->r2t_pdus_cnt;
1817 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1818 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1819 stats->custom_length = 3; 744 stats->custom_length = 3;
1820 strcpy(stats->custom[0].desc, "tx_sendpage_failures"); 745 strcpy(stats->custom[0].desc, "tx_sendpage_failures");
1821 stats->custom[0].value = tcp_conn->sendpage_failures_cnt; 746 stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
1822 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr"); 747 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
1823 stats->custom[1].value = tcp_conn->discontiguous_hdr_cnt; 748 stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
1824 strcpy(stats->custom[2].desc, "eh_abort_cnt"); 749 strcpy(stats->custom[2].desc, "eh_abort_cnt");
1825 stats->custom[2].value = conn->eh_abort_cnt; 750 stats->custom[2].value = conn->eh_abort_cnt;
751
752 iscsi_tcp_conn_get_stats(cls_conn, stats);
1826} 753}
1827 754
1828static struct iscsi_cls_session * 755static struct iscsi_cls_session *
1829iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, 756iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
1830 uint16_t qdepth, uint32_t initial_cmdsn, 757 uint16_t qdepth, uint32_t initial_cmdsn,
1831 uint32_t *hostno) 758 uint32_t *hostno)
1832{ 759{
1833 struct iscsi_cls_session *cls_session; 760 struct iscsi_cls_session *cls_session;
1834 struct iscsi_session *session; 761 struct iscsi_session *session;
1835 struct Scsi_Host *shost; 762 struct Scsi_Host *shost;
1836 int cmd_i;
1837 763
1838 if (ep) { 764 if (ep) {
1839 printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep); 765 printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
1840 return NULL; 766 return NULL;
1841 } 767 }
1842 768
1843 shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth); 769 shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, qdepth);
1844 if (!shost) 770 if (!shost)
1845 return NULL; 771 return NULL;
1846 shost->transportt = iscsi_tcp_scsi_transport; 772 shost->transportt = iscsi_sw_tcp_scsi_transport;
1847 shost->max_lun = iscsi_max_lun; 773 shost->max_lun = iscsi_max_lun;
1848 shost->max_id = 0; 774 shost->max_id = 0;
1849 shost->max_channel = 0; 775 shost->max_channel = 0;
@@ -1853,23 +779,17 @@ iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
1853 goto free_host; 779 goto free_host;
1854 *hostno = shost->host_no; 780 *hostno = shost->host_no;
1855 781
1856 cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max, 782 cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
1857 sizeof(struct iscsi_tcp_task), 783 cmds_max,
784 sizeof(struct iscsi_tcp_task) +
785 sizeof(struct iscsi_sw_tcp_hdrbuf),
1858 initial_cmdsn, 0); 786 initial_cmdsn, 0);
1859 if (!cls_session) 787 if (!cls_session)
1860 goto remove_host; 788 goto remove_host;
1861 session = cls_session->dd_data; 789 session = cls_session->dd_data;
1862 790
1863 shost->can_queue = session->scsi_cmds_max; 791 shost->can_queue = session->scsi_cmds_max;
1864 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { 792 if (iscsi_tcp_r2tpool_alloc(session))
1865 struct iscsi_task *task = session->cmds[cmd_i];
1866 struct iscsi_tcp_task *tcp_task = task->dd_data;
1867
1868 task->hdr = &tcp_task->hdr.cmd_hdr;
1869 task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
1870 }
1871
1872 if (iscsi_r2tpool_alloc(session))
1873 goto remove_session; 793 goto remove_session;
1874 return cls_session; 794 return cls_session;
1875 795
@@ -1882,25 +802,25 @@ free_host:
1882 return NULL; 802 return NULL;
1883} 803}
1884 804
1885static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) 805static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
1886{ 806{
1887 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 807 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1888 808
1889 iscsi_r2tpool_free(cls_session->dd_data); 809 iscsi_tcp_r2tpool_free(cls_session->dd_data);
1890 iscsi_session_teardown(cls_session); 810 iscsi_session_teardown(cls_session);
1891 811
1892 iscsi_host_remove(shost); 812 iscsi_host_remove(shost);
1893 iscsi_host_free(shost); 813 iscsi_host_free(shost);
1894} 814}
1895 815
1896static int iscsi_tcp_slave_configure(struct scsi_device *sdev) 816static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
1897{ 817{
1898 blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY); 818 blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
1899 blk_queue_dma_alignment(sdev->request_queue, 0); 819 blk_queue_dma_alignment(sdev->request_queue, 0);
1900 return 0; 820 return 0;
1901} 821}
1902 822
1903static struct scsi_host_template iscsi_sht = { 823static struct scsi_host_template iscsi_sw_tcp_sht = {
1904 .module = THIS_MODULE, 824 .module = THIS_MODULE,
1905 .name = "iSCSI Initiator over TCP/IP", 825 .name = "iSCSI Initiator over TCP/IP",
1906 .queuecommand = iscsi_queuecommand, 826 .queuecommand = iscsi_queuecommand,
@@ -1913,12 +833,12 @@ static struct scsi_host_template iscsi_sht = {
1913 .eh_device_reset_handler= iscsi_eh_device_reset, 833 .eh_device_reset_handler= iscsi_eh_device_reset,
1914 .eh_target_reset_handler= iscsi_eh_target_reset, 834 .eh_target_reset_handler= iscsi_eh_target_reset,
1915 .use_clustering = DISABLE_CLUSTERING, 835 .use_clustering = DISABLE_CLUSTERING,
1916 .slave_configure = iscsi_tcp_slave_configure, 836 .slave_configure = iscsi_sw_tcp_slave_configure,
1917 .proc_name = "iscsi_tcp", 837 .proc_name = "iscsi_tcp",
1918 .this_id = -1, 838 .this_id = -1,
1919}; 839};
1920 840
1921static struct iscsi_transport iscsi_tcp_transport = { 841static struct iscsi_transport iscsi_sw_tcp_transport = {
1922 .owner = THIS_MODULE, 842 .owner = THIS_MODULE,
1923 .name = "tcp", 843 .name = "tcp",
1924 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST 844 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
@@ -1951,32 +871,36 @@ static struct iscsi_transport iscsi_tcp_transport = {
1951 ISCSI_HOST_INITIATOR_NAME | 871 ISCSI_HOST_INITIATOR_NAME |
1952 ISCSI_HOST_NETDEV_NAME, 872 ISCSI_HOST_NETDEV_NAME,
1953 /* session management */ 873 /* session management */
1954 .create_session = iscsi_tcp_session_create, 874 .create_session = iscsi_sw_tcp_session_create,
1955 .destroy_session = iscsi_tcp_session_destroy, 875 .destroy_session = iscsi_sw_tcp_session_destroy,
1956 /* connection management */ 876 /* connection management */
1957 .create_conn = iscsi_tcp_conn_create, 877 .create_conn = iscsi_sw_tcp_conn_create,
1958 .bind_conn = iscsi_tcp_conn_bind, 878 .bind_conn = iscsi_sw_tcp_conn_bind,
1959 .destroy_conn = iscsi_tcp_conn_destroy, 879 .destroy_conn = iscsi_sw_tcp_conn_destroy,
1960 .set_param = iscsi_conn_set_param, 880 .set_param = iscsi_sw_tcp_conn_set_param,
1961 .get_conn_param = iscsi_tcp_conn_get_param, 881 .get_conn_param = iscsi_sw_tcp_conn_get_param,
1962 .get_session_param = iscsi_session_get_param, 882 .get_session_param = iscsi_session_get_param,
1963 .start_conn = iscsi_conn_start, 883 .start_conn = iscsi_conn_start,
1964 .stop_conn = iscsi_tcp_conn_stop, 884 .stop_conn = iscsi_sw_tcp_conn_stop,
1965 /* iscsi host params */ 885 /* iscsi host params */
1966 .get_host_param = iscsi_host_get_param, 886 .get_host_param = iscsi_host_get_param,
1967 .set_host_param = iscsi_host_set_param, 887 .set_host_param = iscsi_host_set_param,
1968 /* IO */ 888 /* IO */
1969 .send_pdu = iscsi_conn_send_pdu, 889 .send_pdu = iscsi_conn_send_pdu,
1970 .get_stats = iscsi_conn_get_stats, 890 .get_stats = iscsi_sw_tcp_conn_get_stats,
891 /* iscsi task/cmd helpers */
1971 .init_task = iscsi_tcp_task_init, 892 .init_task = iscsi_tcp_task_init,
1972 .xmit_task = iscsi_tcp_task_xmit, 893 .xmit_task = iscsi_tcp_task_xmit,
1973 .cleanup_task = iscsi_tcp_cleanup_task, 894 .cleanup_task = iscsi_tcp_cleanup_task,
895 /* low level pdu helpers */
896 .xmit_pdu = iscsi_sw_tcp_pdu_xmit,
897 .init_pdu = iscsi_sw_tcp_pdu_init,
898 .alloc_pdu = iscsi_sw_tcp_pdu_alloc,
1974 /* recovery */ 899 /* recovery */
1975 .session_recovery_timedout = iscsi_session_recovery_timedout, 900 .session_recovery_timedout = iscsi_session_recovery_timedout,
1976}; 901};
1977 902
1978static int __init 903static int __init iscsi_sw_tcp_init(void)
1979iscsi_tcp_init(void)
1980{ 904{
1981 if (iscsi_max_lun < 1) { 905 if (iscsi_max_lun < 1) {
1982 printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n", 906 printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
@@ -1984,19 +908,18 @@ iscsi_tcp_init(void)
1984 return -EINVAL; 908 return -EINVAL;
1985 } 909 }
1986 910
1987 iscsi_tcp_scsi_transport = iscsi_register_transport( 911 iscsi_sw_tcp_scsi_transport = iscsi_register_transport(
1988 &iscsi_tcp_transport); 912 &iscsi_sw_tcp_transport);
1989 if (!iscsi_tcp_scsi_transport) 913 if (!iscsi_sw_tcp_scsi_transport)
1990 return -ENODEV; 914 return -ENODEV;
1991 915
1992 return 0; 916 return 0;
1993} 917}
1994 918
1995static void __exit 919static void __exit iscsi_sw_tcp_exit(void)
1996iscsi_tcp_exit(void)
1997{ 920{
1998 iscsi_unregister_transport(&iscsi_tcp_transport); 921 iscsi_unregister_transport(&iscsi_sw_tcp_transport);
1999} 922}
2000 923
2001module_init(iscsi_tcp_init); 924module_init(iscsi_sw_tcp_init);
2002module_exit(iscsi_tcp_exit); 925module_exit(iscsi_sw_tcp_exit);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 498d8ca39848..ca6b7bc64de0 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -19,67 +19,27 @@
19 * See the file COPYING included with this distribution for more details. 19 * See the file COPYING included with this distribution for more details.
20 */ 20 */
21 21
22#ifndef ISCSI_TCP_H 22#ifndef ISCSI_SW_TCP_H
23#define ISCSI_TCP_H 23#define ISCSI_SW_TCP_H
24 24
25#include <scsi/libiscsi.h> 25#include <scsi/libiscsi.h>
26#include <scsi/libiscsi_tcp.h>
26 27
27struct crypto_hash;
28struct socket; 28struct socket;
29struct iscsi_tcp_conn; 29struct iscsi_tcp_conn;
30struct iscsi_segment;
31
32typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
33 struct iscsi_segment *);
34
35struct iscsi_segment {
36 unsigned char *data;
37 unsigned int size;
38 unsigned int copied;
39 unsigned int total_size;
40 unsigned int total_copied;
41
42 struct hash_desc *hash;
43 unsigned char recv_digest[ISCSI_DIGEST_SIZE];
44 unsigned char digest[ISCSI_DIGEST_SIZE];
45 unsigned int digest_len;
46
47 struct scatterlist *sg;
48 void *sg_mapped;
49 unsigned int sg_offset;
50
51 iscsi_segment_done_fn_t *done;
52};
53
54/* Socket connection recieve helper */
55struct iscsi_tcp_recv {
56 struct iscsi_hdr *hdr;
57 struct iscsi_segment segment;
58
59 /* Allocate buffer for BHS + AHS */
60 uint32_t hdr_buf[64];
61
62 /* copied and flipped values */
63 int datalen;
64};
65 30
66/* Socket connection send helper */ 31/* Socket connection send helper */
67struct iscsi_tcp_send { 32struct iscsi_sw_tcp_send {
68 struct iscsi_hdr *hdr; 33 struct iscsi_hdr *hdr;
69 struct iscsi_segment segment; 34 struct iscsi_segment segment;
70 struct iscsi_segment data_segment; 35 struct iscsi_segment data_segment;
71}; 36};
72 37
73struct iscsi_tcp_conn { 38struct iscsi_sw_tcp_conn {
74 struct iscsi_conn *iscsi_conn; 39 struct iscsi_conn *iscsi_conn;
75 struct socket *sock; 40 struct socket *sock;
76 int stop_stage; /* conn_stop() flag: *
77 * stop to recover, *
78 * stop to terminate */
79 /* control data */
80 struct iscsi_tcp_recv in; /* TCP receive context */
81 struct iscsi_tcp_send out; /* TCP send context */
82 41
42 struct iscsi_sw_tcp_send out;
83 /* old values for socket callbacks */ 43 /* old values for socket callbacks */
84 void (*old_data_ready)(struct sock *, int); 44 void (*old_data_ready)(struct sock *, int);
85 void (*old_state_change)(struct sock *); 45 void (*old_state_change)(struct sock *);
@@ -93,41 +53,13 @@ struct iscsi_tcp_conn {
93 uint32_t sendpage_failures_cnt; 53 uint32_t sendpage_failures_cnt;
94 uint32_t discontiguous_hdr_cnt; 54 uint32_t discontiguous_hdr_cnt;
95 55
96 int error;
97
98 ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); 56 ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
99}; 57};
100 58
101struct iscsi_data_task { 59struct iscsi_sw_tcp_hdrbuf {
102 struct iscsi_data hdr; /* PDU */ 60 struct iscsi_hdr hdrbuf;
103 char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */ 61 char hdrextbuf[ISCSI_MAX_AHS_SIZE +
104};
105
106struct iscsi_r2t_info {
107 __be32 ttt; /* copied from R2T */
108 __be32 exp_statsn; /* copied from R2T */
109 uint32_t data_length; /* copied from R2T */
110 uint32_t data_offset; /* copied from R2T */
111 int sent; /* R2T sequence progress */
112 int data_count; /* DATA-Out payload progress */
113 int solicit_datasn;
114 struct iscsi_data_task dtask; /* Data-Out header buf */
115};
116
117struct iscsi_tcp_task {
118 struct iscsi_hdr_buff {
119 struct iscsi_cmd cmd_hdr;
120 char hdrextbuf[ISCSI_MAX_AHS_SIZE +
121 ISCSI_DIGEST_SIZE]; 62 ISCSI_DIGEST_SIZE];
122 } hdr;
123
124 int sent;
125 uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
126 int data_offset;
127 struct iscsi_r2t_info *r2t; /* in progress R2T */
128 struct iscsi_pool r2tpool;
129 struct kfifo *r2tqueue;
130 struct iscsi_data_task unsol_dtask; /* Data-Out header buf */
131}; 63};
132 64
133#endif /* ISCSI_H */ 65#endif /* ISCSI_SW_TCP_H */
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
new file mode 100644
index 000000000000..55f982de3a9a
--- /dev/null
+++ b/drivers/scsi/libfc/Makefile
@@ -0,0 +1,12 @@
1# $Id: Makefile
2
3obj-$(CONFIG_LIBFC) += libfc.o
4
5libfc-objs := \
6 fc_disc.o \
7 fc_exch.o \
8 fc_elsct.o \
9 fc_frame.o \
10 fc_lport.o \
11 fc_rport.o \
12 fc_fcp.o
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
new file mode 100644
index 000000000000..dd1564c9e04a
--- /dev/null
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -0,0 +1,845 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * Target Discovery
22 *
23 * This block discovers all FC-4 remote ports, including FCP initiators. It
24 * also handles RSCN events and re-discovery if necessary.
25 */
26
27/*
28 * DISC LOCKING
29 *
30 * The disc mutex is can be locked when acquiring rport locks, but may not
31 * be held when acquiring the lport lock. Refer to fc_lport.c for more
32 * details.
33 */
34
35#include <linux/timer.h>
36#include <linux/err.h>
37#include <asm/unaligned.h>
38
39#include <scsi/fc/fc_gs.h>
40
41#include <scsi/libfc.h>
42
43#define FC_DISC_RETRY_LIMIT 3 /* max retries */
44#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
45
46#define FC_DISC_DELAY 3
47
48static int fc_disc_debug;
49
50#define FC_DEBUG_DISC(fmt...) \
51 do { \
52 if (fc_disc_debug) \
53 FC_DBG(fmt); \
54 } while (0)
55
56static void fc_disc_gpn_ft_req(struct fc_disc *);
57static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
58static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
59 struct fc_rport_identifiers *);
60static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
61static void fc_disc_done(struct fc_disc *);
62static void fc_disc_timeout(struct work_struct *);
63static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
64static void fc_disc_restart(struct fc_disc *);
65
66/**
67 * fc_disc_lookup_rport - lookup a remote port by port_id
68 * @lport: Fibre Channel host port instance
69 * @port_id: remote port port_id to match
70 */
71struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
72 u32 port_id)
73{
74 const struct fc_disc *disc = &lport->disc;
75 struct fc_rport *rport, *found = NULL;
76 struct fc_rport_libfc_priv *rdata;
77 int disc_found = 0;
78
79 list_for_each_entry(rdata, &disc->rports, peers) {
80 rport = PRIV_TO_RPORT(rdata);
81 if (rport->port_id == port_id) {
82 disc_found = 1;
83 found = rport;
84 break;
85 }
86 }
87
88 if (!disc_found)
89 found = NULL;
90
91 return found;
92}
93
94/**
95 * fc_disc_stop_rports - delete all the remote ports associated with the lport
96 * @disc: The discovery job to stop rports on
97 *
98 * Locking Note: This function expects that the lport mutex is locked before
99 * calling it.
100 */
101void fc_disc_stop_rports(struct fc_disc *disc)
102{
103 struct fc_lport *lport;
104 struct fc_rport *rport;
105 struct fc_rport_libfc_priv *rdata, *next;
106
107 lport = disc->lport;
108
109 mutex_lock(&disc->disc_mutex);
110 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
111 rport = PRIV_TO_RPORT(rdata);
112 list_del(&rdata->peers);
113 lport->tt.rport_logoff(rport);
114 }
115
116 mutex_unlock(&disc->disc_mutex);
117}
118
119/**
120 * fc_disc_rport_callback - Event handler for rport events
121 * @lport: The lport which is receiving the event
122 * @rport: The rport which the event has occured on
123 * @event: The event that occured
124 *
125 * Locking Note: The rport lock should not be held when calling
126 * this function.
127 */
128static void fc_disc_rport_callback(struct fc_lport *lport,
129 struct fc_rport *rport,
130 enum fc_rport_event event)
131{
132 struct fc_rport_libfc_priv *rdata = rport->dd_data;
133 struct fc_disc *disc = &lport->disc;
134 int found = 0;
135
136 FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
137 rport->port_id);
138
139 if (event == RPORT_EV_CREATED) {
140 if (disc) {
141 found = 1;
142 mutex_lock(&disc->disc_mutex);
143 list_add_tail(&rdata->peers, &disc->rports);
144 mutex_unlock(&disc->disc_mutex);
145 }
146 }
147
148 if (!found)
149 FC_DEBUG_DISC("The rport (%6x) is not maintained "
150 "by the discovery layer\n", rport->port_id);
151}
152
153/**
154 * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN)
155 * @sp: Current sequence of the RSCN exchange
156 * @fp: RSCN Frame
157 * @lport: Fibre Channel host port instance
158 *
159 * Locking Note: This function expects that the disc_mutex is locked
160 * before it is called.
161 */
162static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
163 struct fc_disc *disc)
164{
165 struct fc_lport *lport;
166 struct fc_rport *rport;
167 struct fc_rport_libfc_priv *rdata;
168 struct fc_els_rscn *rp;
169 struct fc_els_rscn_page *pp;
170 struct fc_seq_els_data rjt_data;
171 unsigned int len;
172 int redisc = 0;
173 enum fc_els_rscn_ev_qual ev_qual;
174 enum fc_els_rscn_addr_fmt fmt;
175 LIST_HEAD(disc_ports);
176 struct fc_disc_port *dp, *next;
177
178 lport = disc->lport;
179
180 FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
181 fc_host_port_id(lport->host));
182
183 /* make sure the frame contains an RSCN message */
184 rp = fc_frame_payload_get(fp, sizeof(*rp));
185 if (!rp)
186 goto reject;
187 /* make sure the page length is as expected (4 bytes) */
188 if (rp->rscn_page_len != sizeof(*pp))
189 goto reject;
190 /* get the RSCN payload length */
191 len = ntohs(rp->rscn_plen);
192 if (len < sizeof(*rp))
193 goto reject;
194 /* make sure the frame contains the expected payload */
195 rp = fc_frame_payload_get(fp, len);
196 if (!rp)
197 goto reject;
198 /* payload must be a multiple of the RSCN page size */
199 len -= sizeof(*rp);
200 if (len % sizeof(*pp))
201 goto reject;
202
203 for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
204 ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
205 ev_qual &= ELS_RSCN_EV_QUAL_MASK;
206 fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
207 fmt &= ELS_RSCN_ADDR_FMT_MASK;
208 /*
209 * if we get an address format other than port
210 * (area, domain, fabric), then do a full discovery
211 */
212 switch (fmt) {
213 case ELS_ADDR_FMT_PORT:
214 FC_DEBUG_DISC("Port address format for port (%6x)\n",
215 ntoh24(pp->rscn_fid));
216 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
217 if (!dp) {
218 redisc = 1;
219 break;
220 }
221 dp->lp = lport;
222 dp->ids.port_id = ntoh24(pp->rscn_fid);
223 dp->ids.port_name = -1;
224 dp->ids.node_name = -1;
225 dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
226 list_add_tail(&dp->peers, &disc_ports);
227 break;
228 case ELS_ADDR_FMT_AREA:
229 case ELS_ADDR_FMT_DOM:
230 case ELS_ADDR_FMT_FAB:
231 default:
232 FC_DEBUG_DISC("Address format is (%d)\n", fmt);
233 redisc = 1;
234 break;
235 }
236 }
237 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
238 if (redisc) {
239 FC_DEBUG_DISC("RSCN received: rediscovering\n");
240 fc_disc_restart(disc);
241 } else {
242 FC_DEBUG_DISC("RSCN received: not rediscovering. "
243 "redisc %d state %d in_prog %d\n",
244 redisc, lport->state, disc->pending);
245 list_for_each_entry_safe(dp, next, &disc_ports, peers) {
246 list_del(&dp->peers);
247 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
248 if (rport) {
249 rdata = RPORT_TO_PRIV(rport);
250 list_del(&rdata->peers);
251 lport->tt.rport_logoff(rport);
252 }
253 fc_disc_single(disc, dp);
254 }
255 }
256 fc_frame_free(fp);
257 return;
258reject:
259 FC_DEBUG_DISC("Received a bad RSCN frame\n");
260 rjt_data.fp = NULL;
261 rjt_data.reason = ELS_RJT_LOGIC;
262 rjt_data.explan = ELS_EXPL_NONE;
263 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
264 fc_frame_free(fp);
265}
266
267/**
268 * fc_disc_recv_req - Handle incoming requests
269 * @sp: Current sequence of the request exchange
270 * @fp: The frame
271 * @lport: The FC local port
272 *
273 * Locking Note: This function is called from the EM and will lock
274 * the disc_mutex before calling the handler for the
275 * request.
276 */
277static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
278 struct fc_lport *lport)
279{
280 u8 op;
281 struct fc_disc *disc = &lport->disc;
282
283 op = fc_frame_payload_op(fp);
284 switch (op) {
285 case ELS_RSCN:
286 mutex_lock(&disc->disc_mutex);
287 fc_disc_recv_rscn_req(sp, fp, disc);
288 mutex_unlock(&disc->disc_mutex);
289 break;
290 default:
291 FC_DBG("Received an unsupported request. opcode (%x)\n", op);
292 break;
293 }
294}
295
296/**
297 * fc_disc_restart - Restart discovery
298 * @lport: FC discovery context
299 *
300 * Locking Note: This function expects that the disc mutex
301 * is already locked.
302 */
303static void fc_disc_restart(struct fc_disc *disc)
304{
305 struct fc_rport *rport;
306 struct fc_rport_libfc_priv *rdata, *next;
307 struct fc_lport *lport = disc->lport;
308
309 FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
310 fc_host_port_id(lport->host));
311
312 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
313 rport = PRIV_TO_RPORT(rdata);
314 FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
315 list_del(&rdata->peers);
316 lport->tt.rport_logoff(rport);
317 }
318
319 disc->requested = 1;
320 if (!disc->pending)
321 fc_disc_gpn_ft_req(disc);
322}
323
324/**
325 * fc_disc_start - Fibre Channel Target discovery
326 * @lport: FC local port
327 *
328 * Returns non-zero if discovery cannot be started.
329 */
330static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
331 enum fc_disc_event),
332 struct fc_lport *lport)
333{
334 struct fc_rport *rport;
335 struct fc_rport_identifiers ids;
336 struct fc_disc *disc = &lport->disc;
337
338 /*
339 * At this point we may have a new disc job or an existing
340 * one. Either way, let's lock when we make changes to it
341 * and send the GPN_FT request.
342 */
343 mutex_lock(&disc->disc_mutex);
344
345 disc->disc_callback = disc_callback;
346
347 /*
348 * If not ready, or already running discovery, just set request flag.
349 */
350 disc->requested = 1;
351
352 if (disc->pending) {
353 mutex_unlock(&disc->disc_mutex);
354 return;
355 }
356
357 /*
358 * Handle point-to-point mode as a simple discovery
359 * of the remote port. Yucky, yucky, yuck, yuck!
360 */
361 rport = disc->lport->ptp_rp;
362 if (rport) {
363 ids.port_id = rport->port_id;
364 ids.port_name = rport->port_name;
365 ids.node_name = rport->node_name;
366 ids.roles = FC_RPORT_ROLE_UNKNOWN;
367 get_device(&rport->dev);
368
369 if (!fc_disc_new_target(disc, rport, &ids)) {
370 disc->event = DISC_EV_SUCCESS;
371 fc_disc_done(disc);
372 }
373 put_device(&rport->dev);
374 } else {
375 fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
376 }
377
378 mutex_unlock(&disc->disc_mutex);
379}
380
381static struct fc_rport_operations fc_disc_rport_ops = {
382 .event_callback = fc_disc_rport_callback,
383};
384
385/**
386 * fc_disc_new_target - Handle new target found by discovery
387 * @lport: FC local port
388 * @rport: The previous FC remote port (NULL if new remote port)
389 * @ids: Identifiers for the new FC remote port
390 *
391 * Locking Note: This function expects that the disc_mutex is locked
392 * before it is called.
393 */
394static int fc_disc_new_target(struct fc_disc *disc,
395 struct fc_rport *rport,
396 struct fc_rport_identifiers *ids)
397{
398 struct fc_lport *lport = disc->lport;
399 struct fc_rport_libfc_priv *rp;
400 int error = 0;
401
402 if (rport && ids->port_name) {
403 if (rport->port_name == -1) {
404 /*
405 * Set WWN and fall through to notify of create.
406 */
407 fc_rport_set_name(rport, ids->port_name,
408 rport->node_name);
409 } else if (rport->port_name != ids->port_name) {
410 /*
411 * This is a new port with the same FCID as
412 * a previously-discovered port. Presumably the old
413 * port logged out and a new port logged in and was
414 * assigned the same FCID. This should be rare.
415 * Delete the old one and fall thru to re-create.
416 */
417 fc_disc_del_target(disc, rport);
418 rport = NULL;
419 }
420 }
421 if (((ids->port_name != -1) || (ids->port_id != -1)) &&
422 ids->port_id != fc_host_port_id(lport->host) &&
423 ids->port_name != lport->wwpn) {
424 if (!rport) {
425 rport = lport->tt.rport_lookup(lport, ids->port_id);
426 if (!rport) {
427 struct fc_disc_port dp;
428 dp.lp = lport;
429 dp.ids.port_id = ids->port_id;
430 dp.ids.port_name = ids->port_name;
431 dp.ids.node_name = ids->node_name;
432 dp.ids.roles = ids->roles;
433 rport = fc_rport_rogue_create(&dp);
434 }
435 if (!rport)
436 error = -ENOMEM;
437 }
438 if (rport) {
439 rp = rport->dd_data;
440 rp->ops = &fc_disc_rport_ops;
441 rp->rp_state = RPORT_ST_INIT;
442 lport->tt.rport_login(rport);
443 }
444 }
445 return error;
446}
447
448/**
449 * fc_disc_del_target - Delete a target
450 * @disc: FC discovery context
451 * @rport: The remote port to be removed
452 */
453static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
454{
455 struct fc_lport *lport = disc->lport;
456 struct fc_rport_libfc_priv *rdata = RPORT_TO_PRIV(rport);
457 list_del(&rdata->peers);
458 lport->tt.rport_logoff(rport);
459}
460
461/**
462 * fc_disc_done - Discovery has been completed
463 * @disc: FC discovery context
464 */
465static void fc_disc_done(struct fc_disc *disc)
466{
467 struct fc_lport *lport = disc->lport;
468
469 FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
470 fc_host_port_id(lport->host));
471
472 disc->disc_callback(lport, disc->event);
473 disc->event = DISC_EV_NONE;
474
475 if (disc->requested)
476 fc_disc_gpn_ft_req(disc);
477 else
478 disc->pending = 0;
479}
480
481/**
482 * fc_disc_error - Handle error on dNS request
483 * @disc: FC discovery context
484 * @fp: The frame pointer
485 */
486static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
487{
488 struct fc_lport *lport = disc->lport;
489 unsigned long delay = 0;
490 if (fc_disc_debug)
491 FC_DBG("Error %ld, retries %d/%d\n",
492 PTR_ERR(fp), disc->retry_count,
493 FC_DISC_RETRY_LIMIT);
494
495 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
496 /*
497 * Memory allocation failure, or the exchange timed out,
498 * retry after delay.
499 */
500 if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
501 /* go ahead and retry */
502 if (!fp)
503 delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
504 else {
505 delay = msecs_to_jiffies(lport->e_d_tov);
506
507 /* timeout faster first time */
508 if (!disc->retry_count)
509 delay /= 4;
510 }
511 disc->retry_count++;
512 schedule_delayed_work(&disc->disc_work, delay);
513 } else {
514 /* exceeded retries */
515 disc->event = DISC_EV_FAILED;
516 fc_disc_done(disc);
517 }
518 }
519}
520
521/**
522 * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
523 * @lport: FC discovery context
524 *
525 * Locking Note: This function expects that the disc_mutex is locked
526 * before it is called.
527 */
528static void fc_disc_gpn_ft_req(struct fc_disc *disc)
529{
530 struct fc_frame *fp;
531 struct fc_lport *lport = disc->lport;
532
533 WARN_ON(!fc_lport_test_ready(lport));
534
535 disc->pending = 1;
536 disc->requested = 0;
537
538 disc->buf_len = 0;
539 disc->seq_count = 0;
540 fp = fc_frame_alloc(lport,
541 sizeof(struct fc_ct_hdr) +
542 sizeof(struct fc_ns_gid_ft));
543 if (!fp)
544 goto err;
545
546 if (lport->tt.elsct_send(lport, NULL, fp,
547 FC_NS_GPN_FT,
548 fc_disc_gpn_ft_resp,
549 disc, lport->e_d_tov))
550 return;
551err:
552 fc_disc_error(disc, fp);
553}
554
555/**
556 * fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request
557 * @lport: Fibre Channel host port instance
558 * @buf: GPN_FT response buffer
559 * @len: size of response buffer
560 */
561static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
562{
563 struct fc_lport *lport;
564 struct fc_gpn_ft_resp *np;
565 char *bp;
566 size_t plen;
567 size_t tlen;
568 int error = 0;
569 struct fc_disc_port dp;
570 struct fc_rport *rport;
571 struct fc_rport_libfc_priv *rdata;
572
573 lport = disc->lport;
574
575 /*
576 * Handle partial name record left over from previous call.
577 */
578 bp = buf;
579 plen = len;
580 np = (struct fc_gpn_ft_resp *)bp;
581 tlen = disc->buf_len;
582 if (tlen) {
583 WARN_ON(tlen >= sizeof(*np));
584 plen = sizeof(*np) - tlen;
585 WARN_ON(plen <= 0);
586 WARN_ON(plen >= sizeof(*np));
587 if (plen > len)
588 plen = len;
589 np = &disc->partial_buf;
590 memcpy((char *)np + tlen, bp, plen);
591
592 /*
593 * Set bp so that the loop below will advance it to the
594 * first valid full name element.
595 */
596 bp -= tlen;
597 len += tlen;
598 plen += tlen;
599 disc->buf_len = (unsigned char) plen;
600 if (plen == sizeof(*np))
601 disc->buf_len = 0;
602 }
603
604 /*
605 * Handle full name records, including the one filled from above.
606 * Normally, np == bp and plen == len, but from the partial case above,
607 * bp, len describe the overall buffer, and np, plen describe the
608 * partial buffer, which if would usually be full now.
609 * After the first time through the loop, things return to "normal".
610 */
611 while (plen >= sizeof(*np)) {
612 dp.lp = lport;
613 dp.ids.port_id = ntoh24(np->fp_fid);
614 dp.ids.port_name = ntohll(np->fp_wwpn);
615 dp.ids.node_name = -1;
616 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
617
618 if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
619 (dp.ids.port_name != lport->wwpn)) {
620 rport = fc_rport_rogue_create(&dp);
621 if (rport) {
622 rdata = rport->dd_data;
623 rdata->ops = &fc_disc_rport_ops;
624 rdata->local_port = lport;
625 lport->tt.rport_login(rport);
626 } else
627 FC_DBG("Failed to allocate memory for "
628 "the newly discovered port (%6x)\n",
629 dp.ids.port_id);
630 }
631
632 if (np->fp_flags & FC_NS_FID_LAST) {
633 disc->event = DISC_EV_SUCCESS;
634 fc_disc_done(disc);
635 len = 0;
636 break;
637 }
638 len -= sizeof(*np);
639 bp += sizeof(*np);
640 np = (struct fc_gpn_ft_resp *)bp;
641 plen = len;
642 }
643
644 /*
645 * Save any partial record at the end of the buffer for next time.
646 */
647 if (error == 0 && len > 0 && len < sizeof(*np)) {
648 if (np != &disc->partial_buf) {
649 FC_DEBUG_DISC("Partial buffer remains "
650 "for discovery by (%6x)\n",
651 fc_host_port_id(lport->host));
652 memcpy(&disc->partial_buf, np, len);
653 }
654 disc->buf_len = (unsigned char) len;
655 } else {
656 disc->buf_len = 0;
657 }
658 return error;
659}
660
661/*
662 * Handle retry of memory allocation for remote ports.
663 */
664static void fc_disc_timeout(struct work_struct *work)
665{
666 struct fc_disc *disc = container_of(work,
667 struct fc_disc,
668 disc_work.work);
669 mutex_lock(&disc->disc_mutex);
670 if (disc->requested && !disc->pending)
671 fc_disc_gpn_ft_req(disc);
672 mutex_unlock(&disc->disc_mutex);
673}
674
675/**
676 * fc_disc_gpn_ft_resp - Handle a response frame from Get Port Names (GPN_FT)
677 * @sp: Current sequence of GPN_FT exchange
678 * @fp: response frame
679 * @lp_arg: Fibre Channel host port instance
680 *
681 * Locking Note: This function expects that the disc_mutex is locked
682 * before it is called.
683 */
684static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
685 void *disc_arg)
686{
687 struct fc_disc *disc = disc_arg;
688 struct fc_ct_hdr *cp;
689 struct fc_frame_header *fh;
690 unsigned int seq_cnt;
691 void *buf = NULL;
692 unsigned int len;
693 int error;
694
695 FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
696 fc_host_port_id(disc->lport->host));
697
698 if (IS_ERR(fp)) {
699 fc_disc_error(disc, fp);
700 return;
701 }
702
703 WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
704 fh = fc_frame_header_get(fp);
705 len = fr_len(fp) - sizeof(*fh);
706 seq_cnt = ntohs(fh->fh_seq_cnt);
707 if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
708 disc->seq_count == 0) {
709 cp = fc_frame_payload_get(fp, sizeof(*cp));
710 if (!cp) {
711 FC_DBG("GPN_FT response too short, len %d\n",
712 fr_len(fp));
713 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
714
715 /*
716 * Accepted. Parse response.
717 */
718 buf = cp + 1;
719 len -= sizeof(*cp);
720 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
721 FC_DBG("GPN_FT rejected reason %x exp %x "
722 "(check zoning)\n", cp->ct_reason,
723 cp->ct_explan);
724 disc->event = DISC_EV_FAILED;
725 fc_disc_done(disc);
726 } else {
727 FC_DBG("GPN_FT unexpected response code %x\n",
728 ntohs(cp->ct_cmd));
729 }
730 } else if (fr_sof(fp) == FC_SOF_N3 &&
731 seq_cnt == disc->seq_count) {
732 buf = fh + 1;
733 } else {
734 FC_DBG("GPN_FT unexpected frame - out of sequence? "
735 "seq_cnt %x expected %x sof %x eof %x\n",
736 seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
737 }
738 if (buf) {
739 error = fc_disc_gpn_ft_parse(disc, buf, len);
740 if (error)
741 fc_disc_error(disc, fp);
742 else
743 disc->seq_count++;
744 }
745 fc_frame_free(fp);
746}
747
748/**
749 * fc_disc_single - Discover the directory information for a single target
750 * @lport: FC local port
751 * @dp: The port to rediscover
752 *
753 * Locking Note: This function expects that the disc_mutex is locked
754 * before it is called.
755 */
756static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
757{
758 struct fc_lport *lport;
759 struct fc_rport *rport;
760 struct fc_rport *new_rport;
761 struct fc_rport_libfc_priv *rdata;
762
763 lport = disc->lport;
764
765 if (dp->ids.port_id == fc_host_port_id(lport->host))
766 goto out;
767
768 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
769 if (rport)
770 fc_disc_del_target(disc, rport);
771
772 new_rport = fc_rport_rogue_create(dp);
773 if (new_rport) {
774 rdata = new_rport->dd_data;
775 rdata->ops = &fc_disc_rport_ops;
776 kfree(dp);
777 lport->tt.rport_login(new_rport);
778 }
779 return;
780out:
781 kfree(dp);
782}
783
784/**
785 * fc_disc_stop - Stop discovery for a given lport
786 * @lport: The lport that discovery should stop for
787 */
788void fc_disc_stop(struct fc_lport *lport)
789{
790 struct fc_disc *disc = &lport->disc;
791
792 if (disc) {
793 cancel_delayed_work_sync(&disc->disc_work);
794 fc_disc_stop_rports(disc);
795 }
796}
797
798/**
799 * fc_disc_stop_final - Stop discovery for a given lport
800 * @lport: The lport that discovery should stop for
801 *
802 * This function will block until discovery has been
803 * completely stopped and all rports have been deleted.
804 */
805void fc_disc_stop_final(struct fc_lport *lport)
806{
807 fc_disc_stop(lport);
808 lport->tt.rport_flush_queue();
809}
810
811/**
812 * fc_disc_init - Initialize the discovery block
813 * @lport: FC local port
814 */
815int fc_disc_init(struct fc_lport *lport)
816{
817 struct fc_disc *disc;
818
819 if (!lport->tt.disc_start)
820 lport->tt.disc_start = fc_disc_start;
821
822 if (!lport->tt.disc_stop)
823 lport->tt.disc_stop = fc_disc_stop;
824
825 if (!lport->tt.disc_stop_final)
826 lport->tt.disc_stop_final = fc_disc_stop_final;
827
828 if (!lport->tt.disc_recv_req)
829 lport->tt.disc_recv_req = fc_disc_recv_req;
830
831 if (!lport->tt.rport_lookup)
832 lport->tt.rport_lookup = fc_disc_lookup_rport;
833
834 disc = &lport->disc;
835 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
836 mutex_init(&disc->disc_mutex);
837 INIT_LIST_HEAD(&disc->rports);
838
839 disc->lport = lport;
840 disc->delay = FC_DISC_DELAY;
841 disc->event = DISC_EV_NONE;
842
843 return 0;
844}
845EXPORT_SYMBOL(fc_disc_init);
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
new file mode 100644
index 000000000000..dd47fe619d1e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright(c) 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * Provide interface to send ELS/CT FC frames
22 */
23
24#include <asm/unaligned.h>
25#include <scsi/fc/fc_gs.h>
26#include <scsi/fc/fc_ns.h>
27#include <scsi/fc/fc_els.h>
28#include <scsi/libfc.h>
29#include <scsi/fc_encode.h>
30
31/*
32 * fc_elsct_send - sends ELS/CT frame
33 */
34static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
35 struct fc_rport *rport,
36 struct fc_frame *fp,
37 unsigned int op,
38 void (*resp)(struct fc_seq *,
39 struct fc_frame *fp,
40 void *arg),
41 void *arg, u32 timer_msec)
42{
43 enum fc_rctl r_ctl;
44 u32 did;
45 enum fc_fh_type fh_type;
46 int rc;
47
48 /* ELS requests */
49 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
50 rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type);
51 else
52 /* CT requests */
53 rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type);
54
55 if (rc)
56 return NULL;
57
58 fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
59 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
60
61 return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
62}
63
64int fc_elsct_init(struct fc_lport *lport)
65{
66 if (!lport->tt.elsct_send)
67 lport->tt.elsct_send = fc_elsct_send;
68
69 return 0;
70}
71EXPORT_SYMBOL(fc_elsct_init);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
new file mode 100644
index 000000000000..66db08a5f27f
--- /dev/null
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -0,0 +1,1970 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Maintained at www.Open-FCoE.org
20 */
21
22/*
23 * Fibre Channel exchange and sequence handling.
24 */
25
26#include <linux/timer.h>
27#include <linux/gfp.h>
28#include <linux/err.h>
29
30#include <scsi/fc/fc_fc2.h>
31
32#include <scsi/libfc.h>
33#include <scsi/fc_encode.h>
34
35#define FC_DEF_R_A_TOV (10 * 1000) /* resource allocation timeout */
36
37/*
38 * fc_exch_debug can be set in debugger or at compile time to get more logs.
39 */
40static int fc_exch_debug;
41
42#define FC_DEBUG_EXCH(fmt...) \
43 do { \
44 if (fc_exch_debug) \
45 FC_DBG(fmt); \
46 } while (0)
47
48static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
49
50/*
51 * Structure and function definitions for managing Fibre Channel Exchanges
52 * and Sequences.
53 *
54 * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
55 *
56 * fc_exch_mgr holds the exchange state for an N port
57 *
58 * fc_exch holds state for one exchange and links to its active sequence.
59 *
60 * fc_seq holds the state for an individual sequence.
61 */
62
63/*
64 * Exchange manager.
65 *
66 * This structure is the center for creating exchanges and sequences.
67 * It manages the allocation of exchange IDs.
68 */
69struct fc_exch_mgr {
70 enum fc_class class; /* default class for sequences */
71 spinlock_t em_lock; /* exchange manager lock,
72 must be taken before ex_lock */
73 u16 last_xid; /* last allocated exchange ID */
74 u16 min_xid; /* min exchange ID */
75 u16 max_xid; /* max exchange ID */
76 u16 max_read; /* max exchange ID for read */
77 u16 last_read; /* last xid allocated for read */
78 u32 total_exches; /* total allocated exchanges */
79 struct list_head ex_list; /* allocated exchanges list */
80 struct fc_lport *lp; /* fc device instance */
81 mempool_t *ep_pool; /* reserve ep's */
82
83 /*
84 * currently exchange mgr stats are updated but not used.
85 * either stats can be expose via sysfs or remove them
86 * all together if not used XXX
87 */
88 struct {
89 atomic_t no_free_exch;
90 atomic_t no_free_exch_xid;
91 atomic_t xid_not_found;
92 atomic_t xid_busy;
93 atomic_t seq_not_found;
94 atomic_t non_bls_resp;
95 } stats;
96 struct fc_exch **exches; /* for exch pointers indexed by xid */
97};
98#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
99
100static void fc_exch_rrq(struct fc_exch *);
101static void fc_seq_ls_acc(struct fc_seq *);
102static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
103 enum fc_els_rjt_explan);
104static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
105static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
106static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
107
108/*
109 * Internal implementation notes.
110 *
111 * The exchange manager is one by default in libfc but LLD may choose
112 * to have one per CPU. The sequence manager is one per exchange manager
113 * and currently never separated.
114 *
115 * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
116 * assigned by the Sequence Initiator that shall be unique for a specific
117 * D_ID and S_ID pair while the Sequence is open." Note that it isn't
118 * qualified by exchange ID, which one might think it would be.
119 * In practice this limits the number of open sequences and exchanges to 256
120 * per session. For most targets we could treat this limit as per exchange.
121 *
122 * The exchange and its sequence are freed when the last sequence is received.
123 * It's possible for the remote port to leave an exchange open without
124 * sending any sequences.
125 *
126 * Notes on reference counts:
127 *
128 * Exchanges are reference counted and exchange gets freed when the reference
129 * count becomes zero.
130 *
131 * Timeouts:
132 * Sequences are timed out for E_D_TOV and R_A_TOV.
133 *
134 * Sequence event handling:
135 *
136 * The following events may occur on initiator sequences:
137 *
138 * Send.
139 * For now, the whole thing is sent.
140 * Receive ACK
141 * This applies only to class F.
142 * The sequence is marked complete.
143 * ULP completion.
144 * The upper layer calls fc_exch_done() when done
145 * with exchange and sequence tuple.
146 * RX-inferred completion.
147 * When we receive the next sequence on the same exchange, we can
148 * retire the previous sequence ID. (XXX not implemented).
149 * Timeout.
150 * R_A_TOV frees the sequence ID. If we're waiting for ACK,
151 * E_D_TOV causes abort and calls upper layer response handler
152 * with FC_EX_TIMEOUT error.
153 * Receive RJT
154 * XXX defer.
155 * Send ABTS
156 * On timeout.
157 *
158 * The following events may occur on recipient sequences:
159 *
160 * Receive
161 * Allocate sequence for first frame received.
162 * Hold during receive handler.
163 * Release when final frame received.
164 * Keep status of last N of these for the ELS RES command. XXX TBD.
165 * Receive ABTS
166 * Deallocate sequence
167 * Send RJT
168 * Deallocate
169 *
170 * For now, we neglect conditions where only part of a sequence was
171 * received or transmitted, or where out-of-order receipt is detected.
172 */
173
174/*
175 * Locking notes:
176 *
177 * The EM code run in a per-CPU worker thread.
178 *
179 * To protect against concurrency between a worker thread code and timers,
180 * sequence allocation and deallocation must be locked.
181 * - exchange refcnt can be done atomicly without locks.
182 * - sequence allocation must be locked by exch lock.
183 * - If the em_lock and ex_lock must be taken at the same time, then the
184 * em_lock must be taken before the ex_lock.
185 */
186
187/*
188 * opcode names for debugging.
189 */
190static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
191
192#define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
193
194static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
195 unsigned int max_index)
196{
197 const char *name = NULL;
198
199 if (op < max_index)
200 name = table[op];
201 if (!name)
202 name = "unknown";
203 return name;
204}
205
206static const char *fc_exch_rctl_name(unsigned int op)
207{
208 return fc_exch_name_lookup(op, fc_exch_rctl_names,
209 FC_TABLE_SIZE(fc_exch_rctl_names));
210}
211
212/*
213 * Hold an exchange - keep it from being freed.
214 */
215static void fc_exch_hold(struct fc_exch *ep)
216{
217 atomic_inc(&ep->ex_refcnt);
218}
219
220/*
221 * setup fc hdr by initializing few more FC header fields and sof/eof.
222 * Initialized fields by this func:
223 * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
224 * - sof and eof
225 */
226static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
227 u32 f_ctl)
228{
229 struct fc_frame_header *fh = fc_frame_header_get(fp);
230 u16 fill;
231
232 fr_sof(fp) = ep->class;
233 if (ep->seq.cnt)
234 fr_sof(fp) = fc_sof_normal(ep->class);
235
236 if (f_ctl & FC_FC_END_SEQ) {
237 fr_eof(fp) = FC_EOF_T;
238 if (fc_sof_needs_ack(ep->class))
239 fr_eof(fp) = FC_EOF_N;
240 /*
241 * Form f_ctl.
242 * The number of fill bytes to make the length a 4-byte
243 * multiple is the low order 2-bits of the f_ctl.
244 * The fill itself will have been cleared by the frame
245 * allocation.
246 * After this, the length will be even, as expected by
247 * the transport.
248 */
249 fill = fr_len(fp) & 3;
250 if (fill) {
251 fill = 4 - fill;
252 /* TODO, this may be a problem with fragmented skb */
253 skb_put(fp_skb(fp), fill);
254 hton24(fh->fh_f_ctl, f_ctl | fill);
255 }
256 } else {
257 WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
258 fr_eof(fp) = FC_EOF_N;
259 }
260
261 /*
262 * Initialize remainig fh fields
263 * from fc_fill_fc_hdr
264 */
265 fh->fh_ox_id = htons(ep->oxid);
266 fh->fh_rx_id = htons(ep->rxid);
267 fh->fh_seq_id = ep->seq.id;
268 fh->fh_seq_cnt = htons(ep->seq.cnt);
269}
270
271
272/*
273 * Release a reference to an exchange.
274 * If the refcnt goes to zero and the exchange is complete, it is freed.
275 */
276static void fc_exch_release(struct fc_exch *ep)
277{
278 struct fc_exch_mgr *mp;
279
280 if (atomic_dec_and_test(&ep->ex_refcnt)) {
281 mp = ep->em;
282 if (ep->destructor)
283 ep->destructor(&ep->seq, ep->arg);
284 if (ep->lp->tt.exch_put)
285 ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
286 WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
287 mempool_free(ep, mp->ep_pool);
288 }
289}
290
291static int fc_exch_done_locked(struct fc_exch *ep)
292{
293 int rc = 1;
294
295 /*
296 * We must check for completion in case there are two threads
297 * tyring to complete this. But the rrq code will reuse the
298 * ep, and in that case we only clear the resp and set it as
299 * complete, so it can be reused by the timer to send the rrq.
300 */
301 ep->resp = NULL;
302 if (ep->state & FC_EX_DONE)
303 return rc;
304 ep->esb_stat |= ESB_ST_COMPLETE;
305
306 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
307 ep->state |= FC_EX_DONE;
308 if (cancel_delayed_work(&ep->timeout_work))
309 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
310 rc = 0;
311 }
312 return rc;
313}
314
315static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
316{
317 struct fc_exch_mgr *mp;
318
319 mp = ep->em;
320 spin_lock_bh(&mp->em_lock);
321 WARN_ON(mp->total_exches <= 0);
322 mp->total_exches--;
323 mp->exches[ep->xid - mp->min_xid] = NULL;
324 list_del(&ep->ex_list);
325 spin_unlock_bh(&mp->em_lock);
326 fc_exch_release(ep); /* drop hold for exch in mp */
327}
328
329/*
330 * Internal version of fc_exch_timer_set - used with lock held.
331 */
332static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
333 unsigned int timer_msec)
334{
335 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
336 return;
337
338 FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
339 ep->xid);
340 if (schedule_delayed_work(&ep->timeout_work,
341 msecs_to_jiffies(timer_msec)))
342 fc_exch_hold(ep); /* hold for timer */
343}
344
345/*
346 * Set timer for an exchange.
347 * The time is a minimum delay in milliseconds until the timer fires.
348 * Used for upper level protocols to time out the exchange.
349 * The timer is cancelled when it fires or when the exchange completes.
350 * Returns non-zero if a timer couldn't be allocated.
351 */
352static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
353{
354 spin_lock_bh(&ep->ex_lock);
355 fc_exch_timer_set_locked(ep, timer_msec);
356 spin_unlock_bh(&ep->ex_lock);
357}
358
359int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
360{
361 struct fc_seq *sp;
362 struct fc_exch *ep;
363 struct fc_frame *fp;
364 int error;
365
366 ep = fc_seq_exch(req_sp);
367
368 spin_lock_bh(&ep->ex_lock);
369 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
370 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
371 spin_unlock_bh(&ep->ex_lock);
372 return -ENXIO;
373 }
374
375 /*
376 * Send the abort on a new sequence if possible.
377 */
378 sp = fc_seq_start_next_locked(&ep->seq);
379 if (!sp) {
380 spin_unlock_bh(&ep->ex_lock);
381 return -ENOMEM;
382 }
383
384 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
385 if (timer_msec)
386 fc_exch_timer_set_locked(ep, timer_msec);
387 spin_unlock_bh(&ep->ex_lock);
388
389 /*
390 * If not logged into the fabric, don't send ABTS but leave
391 * sequence active until next timeout.
392 */
393 if (!ep->sid)
394 return 0;
395
396 /*
397 * Send an abort for the sequence that timed out.
398 */
399 fp = fc_frame_alloc(ep->lp, 0);
400 if (fp) {
401 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
402 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
403 error = fc_seq_send(ep->lp, sp, fp);
404 } else
405 error = -ENOBUFS;
406 return error;
407}
408EXPORT_SYMBOL(fc_seq_exch_abort);
409
410/*
411 * Exchange timeout - handle exchange timer expiration.
412 * The timer will have been cancelled before this is called.
413 */
414static void fc_exch_timeout(struct work_struct *work)
415{
416 struct fc_exch *ep = container_of(work, struct fc_exch,
417 timeout_work.work);
418 struct fc_seq *sp = &ep->seq;
419 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
420 void *arg;
421 u32 e_stat;
422 int rc = 1;
423
424 spin_lock_bh(&ep->ex_lock);
425 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
426 goto unlock;
427
428 e_stat = ep->esb_stat;
429 if (e_stat & ESB_ST_COMPLETE) {
430 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
431 if (e_stat & ESB_ST_REC_QUAL)
432 fc_exch_rrq(ep);
433 spin_unlock_bh(&ep->ex_lock);
434 goto done;
435 } else {
436 resp = ep->resp;
437 arg = ep->arg;
438 ep->resp = NULL;
439 if (e_stat & ESB_ST_ABNORMAL)
440 rc = fc_exch_done_locked(ep);
441 spin_unlock_bh(&ep->ex_lock);
442 if (!rc)
443 fc_exch_mgr_delete_ep(ep);
444 if (resp)
445 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
446 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
447 goto done;
448 }
449unlock:
450 spin_unlock_bh(&ep->ex_lock);
451done:
452 /*
453 * This release matches the hold taken when the timer was set.
454 */
455 fc_exch_release(ep);
456}
457
458/*
459 * Allocate a sequence.
460 *
461 * We don't support multiple originated sequences on the same exchange.
462 * By implication, any previously originated sequence on this exchange
463 * is complete, and we reallocate the same sequence.
464 */
465static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
466{
467 struct fc_seq *sp;
468
469 sp = &ep->seq;
470 sp->ssb_stat = 0;
471 sp->cnt = 0;
472 sp->id = seq_id;
473 return sp;
474}
475
476/*
477 * fc_em_alloc_xid - returns an xid based on request type
478 * @lp : ptr to associated lport
479 * @fp : ptr to the assocated frame
480 *
481 * check the associated fc_fsp_pkt to get scsi command type and
482 * command direction to decide from which range this exch id
483 * will be allocated from.
484 *
485 * Returns : 0 or an valid xid
486 */
487static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
488{
489 u16 xid, min, max;
490 u16 *plast;
491 struct fc_exch *ep = NULL;
492
493 if (mp->max_read) {
494 if (fc_frame_is_read(fp)) {
495 min = mp->min_xid;
496 max = mp->max_read;
497 plast = &mp->last_read;
498 } else {
499 min = mp->max_read + 1;
500 max = mp->max_xid;
501 plast = &mp->last_xid;
502 }
503 } else {
504 min = mp->min_xid;
505 max = mp->max_xid;
506 plast = &mp->last_xid;
507 }
508 xid = *plast;
509 do {
510 xid = (xid == max) ? min : xid + 1;
511 ep = mp->exches[xid - mp->min_xid];
512 } while ((ep != NULL) && (xid != *plast));
513
514 if (unlikely(ep))
515 xid = 0;
516 else
517 *plast = xid;
518
519 return xid;
520}
521
522/*
523 * fc_exch_alloc - allocate an exchange.
524 * @mp : ptr to the exchange manager
525 * @xid: input xid
526 *
527 * if xid is supplied zero then assign next free exchange ID
528 * from exchange manager, otherwise use supplied xid.
529 * Returns with exch lock held.
530 */
531struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
532 struct fc_frame *fp, u16 xid)
533{
534 struct fc_exch *ep;
535
536 /* allocate memory for exchange */
537 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
538 if (!ep) {
539 atomic_inc(&mp->stats.no_free_exch);
540 goto out;
541 }
542 memset(ep, 0, sizeof(*ep));
543
544 spin_lock_bh(&mp->em_lock);
545 /* alloc xid if input xid 0 */
546 if (!xid) {
547 /* alloc a new xid */
548 xid = fc_em_alloc_xid(mp, fp);
549 if (!xid) {
550 printk(KERN_ERR "fc_em_alloc_xid() failed\n");
551 goto err;
552 }
553 }
554
555 fc_exch_hold(ep); /* hold for exch in mp */
556 spin_lock_init(&ep->ex_lock);
557 /*
558 * Hold exch lock for caller to prevent fc_exch_reset()
559 * from releasing exch while fc_exch_alloc() caller is
560 * still working on exch.
561 */
562 spin_lock_bh(&ep->ex_lock);
563
564 mp->exches[xid - mp->min_xid] = ep;
565 list_add_tail(&ep->ex_list, &mp->ex_list);
566 fc_seq_alloc(ep, ep->seq_id++);
567 mp->total_exches++;
568 spin_unlock_bh(&mp->em_lock);
569
570 /*
571 * update exchange
572 */
573 ep->oxid = ep->xid = xid;
574 ep->em = mp;
575 ep->lp = mp->lp;
576 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
577 ep->rxid = FC_XID_UNKNOWN;
578 ep->class = mp->class;
579 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
580out:
581 return ep;
582err:
583 spin_unlock_bh(&mp->em_lock);
584 atomic_inc(&mp->stats.no_free_exch_xid);
585 mempool_free(ep, mp->ep_pool);
586 return NULL;
587}
588EXPORT_SYMBOL(fc_exch_alloc);
589
590/*
591 * Lookup and hold an exchange.
592 */
593static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
594{
595 struct fc_exch *ep = NULL;
596
597 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
598 spin_lock_bh(&mp->em_lock);
599 ep = mp->exches[xid - mp->min_xid];
600 if (ep) {
601 fc_exch_hold(ep);
602 WARN_ON(ep->xid != xid);
603 }
604 spin_unlock_bh(&mp->em_lock);
605 }
606 return ep;
607}
608
609void fc_exch_done(struct fc_seq *sp)
610{
611 struct fc_exch *ep = fc_seq_exch(sp);
612 int rc;
613
614 spin_lock_bh(&ep->ex_lock);
615 rc = fc_exch_done_locked(ep);
616 spin_unlock_bh(&ep->ex_lock);
617 if (!rc)
618 fc_exch_mgr_delete_ep(ep);
619}
620EXPORT_SYMBOL(fc_exch_done);
621
622/*
623 * Allocate a new exchange as responder.
624 * Sets the responder ID in the frame header.
625 */
626static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
627{
628 struct fc_exch *ep;
629 struct fc_frame_header *fh;
630 u16 rxid;
631
632 ep = mp->lp->tt.exch_get(mp->lp, fp);
633 if (ep) {
634 ep->class = fc_frame_class(fp);
635
636 /*
637 * Set EX_CTX indicating we're responding on this exchange.
638 */
639 ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
640 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
641 fh = fc_frame_header_get(fp);
642 ep->sid = ntoh24(fh->fh_d_id);
643 ep->did = ntoh24(fh->fh_s_id);
644 ep->oid = ep->did;
645
646 /*
647 * Allocated exchange has placed the XID in the
648 * originator field. Move it to the responder field,
649 * and set the originator XID from the frame.
650 */
651 ep->rxid = ep->xid;
652 ep->oxid = ntohs(fh->fh_ox_id);
653 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
654 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
655 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
656
657 /*
658 * Set the responder ID in the frame header.
659 * The old one should've been 0xffff.
660 * If it isn't, don't assign one.
661 * Incoming basic link service frames may specify
662 * a referenced RX_ID.
663 */
664 if (fh->fh_type != FC_TYPE_BLS) {
665 rxid = ntohs(fh->fh_rx_id);
666 WARN_ON(rxid != FC_XID_UNKNOWN);
667 fh->fh_rx_id = htons(ep->rxid);
668 }
669 fc_exch_hold(ep); /* hold for caller */
670 spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */
671 }
672 return ep;
673}
674
675/*
676 * Find a sequence for receive where the other end is originating the sequence.
677 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
678 * on the ep that should be released by the caller.
679 */
680static enum fc_pf_rjt_reason
681fc_seq_lookup_recip(struct fc_exch_mgr *mp, struct fc_frame *fp)
682{
683 struct fc_frame_header *fh = fc_frame_header_get(fp);
684 struct fc_exch *ep = NULL;
685 struct fc_seq *sp = NULL;
686 enum fc_pf_rjt_reason reject = FC_RJT_NONE;
687 u32 f_ctl;
688 u16 xid;
689
690 f_ctl = ntoh24(fh->fh_f_ctl);
691 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
692
693 /*
694 * Lookup or create the exchange if we will be creating the sequence.
695 */
696 if (f_ctl & FC_FC_EX_CTX) {
697 xid = ntohs(fh->fh_ox_id); /* we originated exch */
698 ep = fc_exch_find(mp, xid);
699 if (!ep) {
700 atomic_inc(&mp->stats.xid_not_found);
701 reject = FC_RJT_OX_ID;
702 goto out;
703 }
704 if (ep->rxid == FC_XID_UNKNOWN)
705 ep->rxid = ntohs(fh->fh_rx_id);
706 else if (ep->rxid != ntohs(fh->fh_rx_id)) {
707 reject = FC_RJT_OX_ID;
708 goto rel;
709 }
710 } else {
711 xid = ntohs(fh->fh_rx_id); /* we are the responder */
712
713 /*
714 * Special case for MDS issuing an ELS TEST with a
715 * bad rxid of 0.
716 * XXX take this out once we do the proper reject.
717 */
718 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
719 fc_frame_payload_op(fp) == ELS_TEST) {
720 fh->fh_rx_id = htons(FC_XID_UNKNOWN);
721 xid = FC_XID_UNKNOWN;
722 }
723
724 /*
725 * new sequence - find the exchange
726 */
727 ep = fc_exch_find(mp, xid);
728 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
729 if (ep) {
730 atomic_inc(&mp->stats.xid_busy);
731 reject = FC_RJT_RX_ID;
732 goto rel;
733 }
734 ep = fc_exch_resp(mp, fp);
735 if (!ep) {
736 reject = FC_RJT_EXCH_EST; /* XXX */
737 goto out;
738 }
739 xid = ep->xid; /* get our XID */
740 } else if (!ep) {
741 atomic_inc(&mp->stats.xid_not_found);
742 reject = FC_RJT_RX_ID; /* XID not found */
743 goto out;
744 }
745 }
746
747 /*
748 * At this point, we have the exchange held.
749 * Find or create the sequence.
750 */
751 if (fc_sof_is_init(fr_sof(fp))) {
752 sp = fc_seq_start_next(&ep->seq);
753 if (!sp) {
754 reject = FC_RJT_SEQ_XS; /* exchange shortage */
755 goto rel;
756 }
757 sp->id = fh->fh_seq_id;
758 sp->ssb_stat |= SSB_ST_RESP;
759 } else {
760 sp = &ep->seq;
761 if (sp->id != fh->fh_seq_id) {
762 atomic_inc(&mp->stats.seq_not_found);
763 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
764 goto rel;
765 }
766 }
767 WARN_ON(ep != fc_seq_exch(sp));
768
769 if (f_ctl & FC_FC_SEQ_INIT)
770 ep->esb_stat |= ESB_ST_SEQ_INIT;
771
772 fr_seq(fp) = sp;
773out:
774 return reject;
775rel:
776 fc_exch_done(&ep->seq);
777 fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
778 return reject;
779}
780
781/*
782 * Find the sequence for a frame being received.
783 * We originated the sequence, so it should be found.
784 * We may or may not have originated the exchange.
785 * Does not hold the sequence for the caller.
786 */
787static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
788 struct fc_frame *fp)
789{
790 struct fc_frame_header *fh = fc_frame_header_get(fp);
791 struct fc_exch *ep;
792 struct fc_seq *sp = NULL;
793 u32 f_ctl;
794 u16 xid;
795
796 f_ctl = ntoh24(fh->fh_f_ctl);
797 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
798 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
799 ep = fc_exch_find(mp, xid);
800 if (!ep)
801 return NULL;
802 if (ep->seq.id == fh->fh_seq_id) {
803 /*
804 * Save the RX_ID if we didn't previously know it.
805 */
806 sp = &ep->seq;
807 if ((f_ctl & FC_FC_EX_CTX) != 0 &&
808 ep->rxid == FC_XID_UNKNOWN) {
809 ep->rxid = ntohs(fh->fh_rx_id);
810 }
811 }
812 fc_exch_release(ep);
813 return sp;
814}
815
816/*
817 * Set addresses for an exchange.
818 * Note this must be done before the first sequence of the exchange is sent.
819 */
820static void fc_exch_set_addr(struct fc_exch *ep,
821 u32 orig_id, u32 resp_id)
822{
823 ep->oid = orig_id;
824 if (ep->esb_stat & ESB_ST_RESP) {
825 ep->sid = resp_id;
826 ep->did = orig_id;
827 } else {
828 ep->sid = orig_id;
829 ep->did = resp_id;
830 }
831}
832
833static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
834{
835 struct fc_exch *ep = fc_seq_exch(sp);
836
837 sp = fc_seq_alloc(ep, ep->seq_id++);
838 FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n",
839 ep->xid, ep->f_ctl, sp->id);
840 return sp;
841}
842/*
843 * Allocate a new sequence on the same exchange as the supplied sequence.
844 * This will never return NULL.
845 */
846struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
847{
848 struct fc_exch *ep = fc_seq_exch(sp);
849
850 spin_lock_bh(&ep->ex_lock);
851 WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
852 sp = fc_seq_start_next_locked(sp);
853 spin_unlock_bh(&ep->ex_lock);
854
855 return sp;
856}
857EXPORT_SYMBOL(fc_seq_start_next);
858
859int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp)
860{
861 struct fc_exch *ep;
862 struct fc_frame_header *fh = fc_frame_header_get(fp);
863 int error;
864 u32 f_ctl;
865
866 ep = fc_seq_exch(sp);
867 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
868
869 f_ctl = ntoh24(fh->fh_f_ctl);
870 fc_exch_setup_hdr(ep, fp, f_ctl);
871
872 /*
873 * update sequence count if this frame is carrying
874 * multiple FC frames when sequence offload is enabled
875 * by LLD.
876 */
877 if (fr_max_payload(fp))
878 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
879 fr_max_payload(fp));
880 else
881 sp->cnt++;
882
883 /*
884 * Send the frame.
885 */
886 error = lp->tt.frame_send(lp, fp);
887
888 /*
889 * Update the exchange and sequence flags,
890 * assuming all frames for the sequence have been sent.
891 * We can only be called to send once for each sequence.
892 */
893 spin_lock_bh(&ep->ex_lock);
894 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
895 if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
896 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
897 spin_unlock_bh(&ep->ex_lock);
898 return error;
899}
900EXPORT_SYMBOL(fc_seq_send);
901
902void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
903 struct fc_seq_els_data *els_data)
904{
905 switch (els_cmd) {
906 case ELS_LS_RJT:
907 fc_seq_ls_rjt(sp, els_data->reason, els_data->explan);
908 break;
909 case ELS_LS_ACC:
910 fc_seq_ls_acc(sp);
911 break;
912 case ELS_RRQ:
913 fc_exch_els_rrq(sp, els_data->fp);
914 break;
915 case ELS_REC:
916 fc_exch_els_rec(sp, els_data->fp);
917 break;
918 default:
919 FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
920 }
921}
922EXPORT_SYMBOL(fc_seq_els_rsp_send);
923
924/*
925 * Send a sequence, which is also the last sequence in the exchange.
926 */
927static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
928 enum fc_rctl rctl, enum fc_fh_type fh_type)
929{
930 u32 f_ctl;
931 struct fc_exch *ep = fc_seq_exch(sp);
932
933 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
934 f_ctl |= ep->f_ctl;
935 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
936 fc_seq_send(ep->lp, sp, fp);
937}
938
939/*
940 * Send ACK_1 (or equiv.) indicating we received something.
941 * The frame we're acking is supplied.
942 */
943static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
944{
945 struct fc_frame *fp;
946 struct fc_frame_header *rx_fh;
947 struct fc_frame_header *fh;
948 struct fc_exch *ep = fc_seq_exch(sp);
949 struct fc_lport *lp = ep->lp;
950 unsigned int f_ctl;
951
952 /*
953 * Don't send ACKs for class 3.
954 */
955 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
956 fp = fc_frame_alloc(lp, 0);
957 if (!fp)
958 return;
959
960 fh = fc_frame_header_get(fp);
961 fh->fh_r_ctl = FC_RCTL_ACK_1;
962 fh->fh_type = FC_TYPE_BLS;
963
964 /*
965 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
966 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
967 * Bits 9-8 are meaningful (retransmitted or unidirectional).
968 * Last ACK uses bits 7-6 (continue sequence),
969 * bits 5-4 are meaningful (what kind of ACK to use).
970 */
971 rx_fh = fc_frame_header_get(rx_fp);
972 f_ctl = ntoh24(rx_fh->fh_f_ctl);
973 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
974 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
975 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
976 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
977 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
978 hton24(fh->fh_f_ctl, f_ctl);
979
980 fc_exch_setup_hdr(ep, fp, f_ctl);
981 fh->fh_seq_id = rx_fh->fh_seq_id;
982 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
983 fh->fh_parm_offset = htonl(1); /* ack single frame */
984
985 fr_sof(fp) = fr_sof(rx_fp);
986 if (f_ctl & FC_FC_END_SEQ)
987 fr_eof(fp) = FC_EOF_T;
988 else
989 fr_eof(fp) = FC_EOF_N;
990
991 (void) lp->tt.frame_send(lp, fp);
992 }
993}
994
995/*
996 * Send BLS Reject.
997 * This is for rejecting BA_ABTS only.
998 */
999static void
1000fc_exch_send_ba_rjt(struct fc_frame *rx_fp, enum fc_ba_rjt_reason reason,
1001 enum fc_ba_rjt_explan explan)
1002{
1003 struct fc_frame *fp;
1004 struct fc_frame_header *rx_fh;
1005 struct fc_frame_header *fh;
1006 struct fc_ba_rjt *rp;
1007 struct fc_lport *lp;
1008 unsigned int f_ctl;
1009
1010 lp = fr_dev(rx_fp);
1011 fp = fc_frame_alloc(lp, sizeof(*rp));
1012 if (!fp)
1013 return;
1014 fh = fc_frame_header_get(fp);
1015 rx_fh = fc_frame_header_get(rx_fp);
1016
1017 memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1018
1019 rp = fc_frame_payload_get(fp, sizeof(*rp));
1020 rp->br_reason = reason;
1021 rp->br_explan = explan;
1022
1023 /*
1024 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1025 */
1026 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1027 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1028 fh->fh_ox_id = rx_fh->fh_rx_id;
1029 fh->fh_rx_id = rx_fh->fh_ox_id;
1030 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1031 fh->fh_r_ctl = FC_RCTL_BA_RJT;
1032 fh->fh_type = FC_TYPE_BLS;
1033
1034 /*
1035 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1036 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1037 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1038 * Last ACK uses bits 7-6 (continue sequence),
1039 * bits 5-4 are meaningful (what kind of ACK to use).
1040 * Always set LAST_SEQ, END_SEQ.
1041 */
1042 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1043 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1044 FC_FC_END_CONN | FC_FC_SEQ_INIT |
1045 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1046 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1047 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1048 f_ctl &= ~FC_FC_FIRST_SEQ;
1049 hton24(fh->fh_f_ctl, f_ctl);
1050
1051 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1052 fr_eof(fp) = FC_EOF_T;
1053 if (fc_sof_needs_ack(fr_sof(fp)))
1054 fr_eof(fp) = FC_EOF_N;
1055
1056 (void) lp->tt.frame_send(lp, fp);
1057}
1058
1059/*
1060 * Handle an incoming ABTS. This would be for target mode usually,
1061 * but could be due to lost FCP transfer ready, confirm or RRQ.
1062 * We always handle this as an exchange abort, ignoring the parameter.
1063 */
1064static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1065{
1066 struct fc_frame *fp;
1067 struct fc_ba_acc *ap;
1068 struct fc_frame_header *fh;
1069 struct fc_seq *sp;
1070
1071 if (!ep)
1072 goto reject;
1073 spin_lock_bh(&ep->ex_lock);
1074 if (ep->esb_stat & ESB_ST_COMPLETE) {
1075 spin_unlock_bh(&ep->ex_lock);
1076 goto reject;
1077 }
1078 if (!(ep->esb_stat & ESB_ST_REC_QUAL))
1079 fc_exch_hold(ep); /* hold for REC_QUAL */
1080 ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
1081 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1082
1083 fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1084 if (!fp) {
1085 spin_unlock_bh(&ep->ex_lock);
1086 goto free;
1087 }
1088 fh = fc_frame_header_get(fp);
1089 ap = fc_frame_payload_get(fp, sizeof(*ap));
1090 memset(ap, 0, sizeof(*ap));
1091 sp = &ep->seq;
1092 ap->ba_high_seq_cnt = htons(0xffff);
1093 if (sp->ssb_stat & SSB_ST_RESP) {
1094 ap->ba_seq_id = sp->id;
1095 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1096 ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1097 ap->ba_low_seq_cnt = htons(sp->cnt);
1098 }
1099 sp = fc_seq_start_next(sp);
1100 spin_unlock_bh(&ep->ex_lock);
1101 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1102 fc_frame_free(rx_fp);
1103 return;
1104
1105reject:
1106 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1107free:
1108 fc_frame_free(rx_fp);
1109}
1110
1111/*
1112 * Handle receive where the other end is originating the sequence.
1113 */
1114static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1115 struct fc_frame *fp)
1116{
1117 struct fc_frame_header *fh = fc_frame_header_get(fp);
1118 struct fc_seq *sp = NULL;
1119 struct fc_exch *ep = NULL;
1120 enum fc_sof sof;
1121 enum fc_eof eof;
1122 u32 f_ctl;
1123 enum fc_pf_rjt_reason reject;
1124
1125 fr_seq(fp) = NULL;
1126 reject = fc_seq_lookup_recip(mp, fp);
1127 if (reject == FC_RJT_NONE) {
1128 sp = fr_seq(fp); /* sequence will be held */
1129 ep = fc_seq_exch(sp);
1130 sof = fr_sof(fp);
1131 eof = fr_eof(fp);
1132 f_ctl = ntoh24(fh->fh_f_ctl);
1133 fc_seq_send_ack(sp, fp);
1134
1135 /*
1136 * Call the receive function.
1137 *
1138 * The receive function may allocate a new sequence
1139 * over the old one, so we shouldn't change the
1140 * sequence after this.
1141 *
1142 * The frame will be freed by the receive function.
1143 * If new exch resp handler is valid then call that
1144 * first.
1145 */
1146 if (ep->resp)
1147 ep->resp(sp, fp, ep->arg);
1148 else
1149 lp->tt.lport_recv(lp, sp, fp);
1150 fc_exch_release(ep); /* release from lookup */
1151 } else {
1152 FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject);
1153 fc_frame_free(fp);
1154 }
1155}
1156
1157/*
1158 * Handle receive where the other end is originating the sequence in
1159 * response to our exchange.
1160 */
1161static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1162{
1163 struct fc_frame_header *fh = fc_frame_header_get(fp);
1164 struct fc_seq *sp;
1165 struct fc_exch *ep;
1166 enum fc_sof sof;
1167 u32 f_ctl;
1168 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1169 void *ex_resp_arg;
1170 int rc;
1171
1172 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1173 if (!ep) {
1174 atomic_inc(&mp->stats.xid_not_found);
1175 goto out;
1176 }
1177 if (ep->rxid == FC_XID_UNKNOWN)
1178 ep->rxid = ntohs(fh->fh_rx_id);
1179 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1180 atomic_inc(&mp->stats.xid_not_found);
1181 goto rel;
1182 }
1183 if (ep->did != ntoh24(fh->fh_s_id) &&
1184 ep->did != FC_FID_FLOGI) {
1185 atomic_inc(&mp->stats.xid_not_found);
1186 goto rel;
1187 }
1188 sof = fr_sof(fp);
1189 if (fc_sof_is_init(sof)) {
1190 sp = fc_seq_start_next(&ep->seq);
1191 sp->id = fh->fh_seq_id;
1192 sp->ssb_stat |= SSB_ST_RESP;
1193 } else {
1194 sp = &ep->seq;
1195 if (sp->id != fh->fh_seq_id) {
1196 atomic_inc(&mp->stats.seq_not_found);
1197 goto rel;
1198 }
1199 }
1200 f_ctl = ntoh24(fh->fh_f_ctl);
1201 fr_seq(fp) = sp;
1202 if (f_ctl & FC_FC_SEQ_INIT)
1203 ep->esb_stat |= ESB_ST_SEQ_INIT;
1204
1205 if (fc_sof_needs_ack(sof))
1206 fc_seq_send_ack(sp, fp);
1207 resp = ep->resp;
1208 ex_resp_arg = ep->arg;
1209
1210 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1211 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1212 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1213 spin_lock_bh(&ep->ex_lock);
1214 rc = fc_exch_done_locked(ep);
1215 WARN_ON(fc_seq_exch(sp) != ep);
1216 spin_unlock_bh(&ep->ex_lock);
1217 if (!rc)
1218 fc_exch_mgr_delete_ep(ep);
1219 }
1220
1221 /*
1222 * Call the receive function.
1223 * The sequence is held (has a refcnt) for us,
1224 * but not for the receive function.
1225 *
1226 * The receive function may allocate a new sequence
1227 * over the old one, so we shouldn't change the
1228 * sequence after this.
1229 *
1230 * The frame will be freed by the receive function.
1231 * If new exch resp handler is valid then call that
1232 * first.
1233 */
1234 if (resp)
1235 resp(sp, fp, ex_resp_arg);
1236 else
1237 fc_frame_free(fp);
1238 fc_exch_release(ep);
1239 return;
1240rel:
1241 fc_exch_release(ep);
1242out:
1243 fc_frame_free(fp);
1244}
1245
1246/*
1247 * Handle receive for a sequence where other end is responding to our sequence.
1248 */
1249static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1250{
1251 struct fc_seq *sp;
1252
1253 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
1254 if (!sp) {
1255 atomic_inc(&mp->stats.xid_not_found);
1256 FC_DEBUG_EXCH("seq lookup failed\n");
1257 } else {
1258 atomic_inc(&mp->stats.non_bls_resp);
1259 FC_DEBUG_EXCH("non-BLS response to sequence");
1260 }
1261 fc_frame_free(fp);
1262}
1263
1264/*
1265 * Handle the response to an ABTS for exchange or sequence.
1266 * This can be BA_ACC or BA_RJT.
1267 */
1268static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1269{
1270 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1271 void *ex_resp_arg;
1272 struct fc_frame_header *fh;
1273 struct fc_ba_acc *ap;
1274 struct fc_seq *sp;
1275 u16 low;
1276 u16 high;
1277 int rc = 1, has_rec = 0;
1278
1279 fh = fc_frame_header_get(fp);
1280 FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n",
1281 fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
1282
1283 if (cancel_delayed_work_sync(&ep->timeout_work))
1284 fc_exch_release(ep); /* release from pending timer hold */
1285
1286 spin_lock_bh(&ep->ex_lock);
1287 switch (fh->fh_r_ctl) {
1288 case FC_RCTL_BA_ACC:
1289 ap = fc_frame_payload_get(fp, sizeof(*ap));
1290 if (!ap)
1291 break;
1292
1293 /*
1294 * Decide whether to establish a Recovery Qualifier.
1295 * We do this if there is a non-empty SEQ_CNT range and
1296 * SEQ_ID is the same as the one we aborted.
1297 */
1298 low = ntohs(ap->ba_low_seq_cnt);
1299 high = ntohs(ap->ba_high_seq_cnt);
1300 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1301 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1302 ap->ba_seq_id == ep->seq_id) && low != high) {
1303 ep->esb_stat |= ESB_ST_REC_QUAL;
1304 fc_exch_hold(ep); /* hold for recovery qualifier */
1305 has_rec = 1;
1306 }
1307 break;
1308 case FC_RCTL_BA_RJT:
1309 break;
1310 default:
1311 break;
1312 }
1313
1314 resp = ep->resp;
1315 ex_resp_arg = ep->arg;
1316
1317 /* do we need to do some other checks here. Can we reuse more of
1318 * fc_exch_recv_seq_resp
1319 */
1320 sp = &ep->seq;
1321 /*
1322 * do we want to check END_SEQ as well as LAST_SEQ here?
1323 */
1324 if (ep->fh_type != FC_TYPE_FCP &&
1325 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1326 rc = fc_exch_done_locked(ep);
1327 spin_unlock_bh(&ep->ex_lock);
1328 if (!rc)
1329 fc_exch_mgr_delete_ep(ep);
1330
1331 if (resp)
1332 resp(sp, fp, ex_resp_arg);
1333 else
1334 fc_frame_free(fp);
1335
1336 if (has_rec)
1337 fc_exch_timer_set(ep, ep->r_a_tov);
1338
1339}
1340
1341/*
1342 * Receive BLS sequence.
1343 * This is always a sequence initiated by the remote side.
1344 * We may be either the originator or recipient of the exchange.
1345 */
1346static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1347{
1348 struct fc_frame_header *fh;
1349 struct fc_exch *ep;
1350 u32 f_ctl;
1351
1352 fh = fc_frame_header_get(fp);
1353 f_ctl = ntoh24(fh->fh_f_ctl);
1354 fr_seq(fp) = NULL;
1355
1356 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1357 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1358 if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1359 spin_lock_bh(&ep->ex_lock);
1360 ep->esb_stat |= ESB_ST_SEQ_INIT;
1361 spin_unlock_bh(&ep->ex_lock);
1362 }
1363 if (f_ctl & FC_FC_SEQ_CTX) {
1364 /*
1365 * A response to a sequence we initiated.
1366 * This should only be ACKs for class 2 or F.
1367 */
1368 switch (fh->fh_r_ctl) {
1369 case FC_RCTL_ACK_1:
1370 case FC_RCTL_ACK_0:
1371 break;
1372 default:
1373 FC_DEBUG_EXCH("BLS rctl %x - %s received",
1374 fh->fh_r_ctl,
1375 fc_exch_rctl_name(fh->fh_r_ctl));
1376 break;
1377 }
1378 fc_frame_free(fp);
1379 } else {
1380 switch (fh->fh_r_ctl) {
1381 case FC_RCTL_BA_RJT:
1382 case FC_RCTL_BA_ACC:
1383 if (ep)
1384 fc_exch_abts_resp(ep, fp);
1385 else
1386 fc_frame_free(fp);
1387 break;
1388 case FC_RCTL_BA_ABTS:
1389 fc_exch_recv_abts(ep, fp);
1390 break;
1391 default: /* ignore junk */
1392 fc_frame_free(fp);
1393 break;
1394 }
1395 }
1396 if (ep)
1397 fc_exch_release(ep); /* release hold taken by fc_exch_find */
1398}
1399
1400/*
1401 * Accept sequence with LS_ACC.
1402 * If this fails due to allocation or transmit congestion, assume the
1403 * originator will repeat the sequence.
1404 */
1405static void fc_seq_ls_acc(struct fc_seq *req_sp)
1406{
1407 struct fc_seq *sp;
1408 struct fc_els_ls_acc *acc;
1409 struct fc_frame *fp;
1410
1411 sp = fc_seq_start_next(req_sp);
1412 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1413 if (fp) {
1414 acc = fc_frame_payload_get(fp, sizeof(*acc));
1415 memset(acc, 0, sizeof(*acc));
1416 acc->la_cmd = ELS_LS_ACC;
1417 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1418 }
1419}
1420
1421/*
1422 * Reject sequence with ELS LS_RJT.
1423 * If this fails due to allocation or transmit congestion, assume the
1424 * originator will repeat the sequence.
1425 */
1426static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
1427 enum fc_els_rjt_explan explan)
1428{
1429 struct fc_seq *sp;
1430 struct fc_els_ls_rjt *rjt;
1431 struct fc_frame *fp;
1432
1433 sp = fc_seq_start_next(req_sp);
1434 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt));
1435 if (fp) {
1436 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1437 memset(rjt, 0, sizeof(*rjt));
1438 rjt->er_cmd = ELS_LS_RJT;
1439 rjt->er_reason = reason;
1440 rjt->er_explan = explan;
1441 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1442 }
1443}
1444
1445static void fc_exch_reset(struct fc_exch *ep)
1446{
1447 struct fc_seq *sp;
1448 void (*resp)(struct fc_seq *, struct fc_frame *, void *);
1449 void *arg;
1450 int rc = 1;
1451
1452 spin_lock_bh(&ep->ex_lock);
1453 ep->state |= FC_EX_RST_CLEANUP;
1454 /*
1455 * we really want to call del_timer_sync, but cannot due
1456 * to the lport calling with the lport lock held (some resp
1457 * functions can also grab the lport lock which could cause
1458 * a deadlock).
1459 */
1460 if (cancel_delayed_work(&ep->timeout_work))
1461 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
1462 resp = ep->resp;
1463 ep->resp = NULL;
1464 if (ep->esb_stat & ESB_ST_REC_QUAL)
1465 atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
1466 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1467 arg = ep->arg;
1468 sp = &ep->seq;
1469 rc = fc_exch_done_locked(ep);
1470 spin_unlock_bh(&ep->ex_lock);
1471 if (!rc)
1472 fc_exch_mgr_delete_ep(ep);
1473
1474 if (resp)
1475 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1476}
1477
1478/*
1479 * Reset an exchange manager, releasing all sequences and exchanges.
1480 * If sid is non-zero, reset only exchanges we source from that FID.
1481 * If did is non-zero, reset only exchanges destined to that FID.
1482 */
1483void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did)
1484{
1485 struct fc_exch *ep;
1486 struct fc_exch *next;
1487
1488 spin_lock_bh(&mp->em_lock);
1489restart:
1490 list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
1491 if ((sid == 0 || sid == ep->sid) &&
1492 (did == 0 || did == ep->did)) {
1493 fc_exch_hold(ep);
1494 spin_unlock_bh(&mp->em_lock);
1495
1496 fc_exch_reset(ep);
1497
1498 fc_exch_release(ep);
1499 spin_lock_bh(&mp->em_lock);
1500
1501 /*
1502 * must restart loop incase while lock was down
1503 * multiple eps were released.
1504 */
1505 goto restart;
1506 }
1507 }
1508 spin_unlock_bh(&mp->em_lock);
1509}
1510EXPORT_SYMBOL(fc_exch_mgr_reset);
1511
1512/*
1513 * Handle incoming ELS REC - Read Exchange Concise.
1514 * Note that the requesting port may be different than the S_ID in the request.
1515 */
1516static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
1517{
1518 struct fc_frame *fp;
1519 struct fc_exch *ep;
1520 struct fc_exch_mgr *em;
1521 struct fc_els_rec *rp;
1522 struct fc_els_rec_acc *acc;
1523 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
1524 enum fc_els_rjt_explan explan;
1525 u32 sid;
1526 u16 rxid;
1527 u16 oxid;
1528
1529 rp = fc_frame_payload_get(rfp, sizeof(*rp));
1530 explan = ELS_EXPL_INV_LEN;
1531 if (!rp)
1532 goto reject;
1533 sid = ntoh24(rp->rec_s_id);
1534 rxid = ntohs(rp->rec_rx_id);
1535 oxid = ntohs(rp->rec_ox_id);
1536
1537 /*
1538 * Currently it's hard to find the local S_ID from the exchange
1539 * manager. This will eventually be fixed, but for now it's easier
1540 * to lookup the subject exchange twice, once as if we were
1541 * the initiator, and then again if we weren't.
1542 */
1543 em = fc_seq_exch(sp)->em;
1544 ep = fc_exch_find(em, oxid);
1545 explan = ELS_EXPL_OXID_RXID;
1546 if (ep && ep->oid == sid) {
1547 if (ep->rxid != FC_XID_UNKNOWN &&
1548 rxid != FC_XID_UNKNOWN &&
1549 ep->rxid != rxid)
1550 goto rel;
1551 } else {
1552 if (ep)
1553 fc_exch_release(ep);
1554 ep = NULL;
1555 if (rxid != FC_XID_UNKNOWN)
1556 ep = fc_exch_find(em, rxid);
1557 if (!ep)
1558 goto reject;
1559 }
1560
1561 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1562 if (!fp) {
1563 fc_exch_done(sp);
1564 goto out;
1565 }
1566 sp = fc_seq_start_next(sp);
1567 acc = fc_frame_payload_get(fp, sizeof(*acc));
1568 memset(acc, 0, sizeof(*acc));
1569 acc->reca_cmd = ELS_LS_ACC;
1570 acc->reca_ox_id = rp->rec_ox_id;
1571 memcpy(acc->reca_ofid, rp->rec_s_id, 3);
1572 acc->reca_rx_id = htons(ep->rxid);
1573 if (ep->sid == ep->oid)
1574 hton24(acc->reca_rfid, ep->did);
1575 else
1576 hton24(acc->reca_rfid, ep->sid);
1577 acc->reca_fc4value = htonl(ep->seq.rec_data);
1578 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
1579 ESB_ST_SEQ_INIT |
1580 ESB_ST_COMPLETE));
1581 sp = fc_seq_start_next(sp);
1582 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1583out:
1584 fc_exch_release(ep);
1585 fc_frame_free(rfp);
1586 return;
1587
1588rel:
1589 fc_exch_release(ep);
1590reject:
1591 fc_seq_ls_rjt(sp, reason, explan);
1592 fc_frame_free(rfp);
1593}
1594
1595/*
1596 * Handle response from RRQ.
1597 * Not much to do here, really.
1598 * Should report errors.
1599 *
1600 * TODO: fix error handler.
1601 */
1602static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1603{
1604 struct fc_exch *aborted_ep = arg;
1605 unsigned int op;
1606
1607 if (IS_ERR(fp)) {
1608 int err = PTR_ERR(fp);
1609
1610 if (err == -FC_EX_CLOSED)
1611 goto cleanup;
1612 FC_DBG("Cannot process RRQ, because of frame error %d\n", err);
1613 return;
1614 }
1615
1616 op = fc_frame_payload_op(fp);
1617 fc_frame_free(fp);
1618
1619 switch (op) {
1620 case ELS_LS_RJT:
1621 FC_DBG("LS_RJT for RRQ");
1622 /* fall through */
1623 case ELS_LS_ACC:
1624 goto cleanup;
1625 default:
1626 FC_DBG("unexpected response op %x for RRQ", op);
1627 return;
1628 }
1629
1630cleanup:
1631 fc_exch_done(&aborted_ep->seq);
1632 /* drop hold for rec qual */
1633 fc_exch_release(aborted_ep);
1634}
1635
1636/*
1637 * Send ELS RRQ - Reinstate Recovery Qualifier.
1638 * This tells the remote port to stop blocking the use of
1639 * the exchange and the seq_cnt range.
1640 */
1641static void fc_exch_rrq(struct fc_exch *ep)
1642{
1643 struct fc_lport *lp;
1644 struct fc_els_rrq *rrq;
1645 struct fc_frame *fp;
1646 struct fc_seq *rrq_sp;
1647 u32 did;
1648
1649 lp = ep->lp;
1650
1651 fp = fc_frame_alloc(lp, sizeof(*rrq));
1652 if (!fp)
1653 return;
1654 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
1655 memset(rrq, 0, sizeof(*rrq));
1656 rrq->rrq_cmd = ELS_RRQ;
1657 hton24(rrq->rrq_s_id, ep->sid);
1658 rrq->rrq_ox_id = htons(ep->oxid);
1659 rrq->rrq_rx_id = htons(ep->rxid);
1660
1661 did = ep->did;
1662 if (ep->esb_stat & ESB_ST_RESP)
1663 did = ep->sid;
1664
1665 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
1666 fc_host_port_id(lp->host), FC_TYPE_ELS,
1667 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1668
1669 rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep,
1670 lp->e_d_tov);
1671 if (!rrq_sp) {
1672 ep->esb_stat |= ESB_ST_REC_QUAL;
1673 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1674 return;
1675 }
1676}
1677
1678
1679/*
1680 * Handle incoming ELS RRQ - Reset Recovery Qualifier.
1681 */
1682static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
1683{
1684 struct fc_exch *ep; /* request or subject exchange */
1685 struct fc_els_rrq *rp;
1686 u32 sid;
1687 u16 xid;
1688 enum fc_els_rjt_explan explan;
1689
1690 rp = fc_frame_payload_get(fp, sizeof(*rp));
1691 explan = ELS_EXPL_INV_LEN;
1692 if (!rp)
1693 goto reject;
1694
1695 /*
1696 * lookup subject exchange.
1697 */
1698 ep = fc_seq_exch(sp);
1699 sid = ntoh24(rp->rrq_s_id); /* subject source */
1700 xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
1701 ep = fc_exch_find(ep->em, xid);
1702
1703 explan = ELS_EXPL_OXID_RXID;
1704 if (!ep)
1705 goto reject;
1706 spin_lock_bh(&ep->ex_lock);
1707 if (ep->oxid != ntohs(rp->rrq_ox_id))
1708 goto unlock_reject;
1709 if (ep->rxid != ntohs(rp->rrq_rx_id) &&
1710 ep->rxid != FC_XID_UNKNOWN)
1711 goto unlock_reject;
1712 explan = ELS_EXPL_SID;
1713 if (ep->sid != sid)
1714 goto unlock_reject;
1715
1716 /*
1717 * Clear Recovery Qualifier state, and cancel timer if complete.
1718 */
1719 if (ep->esb_stat & ESB_ST_REC_QUAL) {
1720 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1721 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
1722 }
1723 if (ep->esb_stat & ESB_ST_COMPLETE) {
1724 if (cancel_delayed_work(&ep->timeout_work))
1725 atomic_dec(&ep->ex_refcnt); /* drop timer hold */
1726 }
1727
1728 spin_unlock_bh(&ep->ex_lock);
1729
1730 /*
1731 * Send LS_ACC.
1732 */
1733 fc_seq_ls_acc(sp);
1734 fc_frame_free(fp);
1735 return;
1736
1737unlock_reject:
1738 spin_unlock_bh(&ep->ex_lock);
1739 fc_exch_release(ep); /* drop hold from fc_exch_find */
1740reject:
1741 fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
1742 fc_frame_free(fp);
1743}
1744
1745struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1746 enum fc_class class,
1747 u16 min_xid, u16 max_xid)
1748{
1749 struct fc_exch_mgr *mp;
1750 size_t len;
1751
1752 if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
1753 FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
1754 min_xid, max_xid);
1755 return NULL;
1756 }
1757
1758 /*
1759 * Memory need for EM
1760 */
1761#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
1762 len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
1763 len += sizeof(struct fc_exch_mgr);
1764
1765 mp = kzalloc(len, GFP_ATOMIC);
1766 if (!mp)
1767 return NULL;
1768
1769 mp->class = class;
1770 mp->total_exches = 0;
1771 mp->exches = (struct fc_exch **)(mp + 1);
1772 mp->lp = lp;
1773 /* adjust em exch xid range for offload */
1774 mp->min_xid = min_xid;
1775 mp->max_xid = max_xid;
1776 mp->last_xid = min_xid - 1;
1777 mp->max_read = 0;
1778 mp->last_read = 0;
1779 if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
1780 mp->max_read = lp->lro_xid;
1781 mp->last_read = min_xid - 1;
1782 mp->last_xid = mp->max_read;
1783 } else {
1784 /* disable lro if no xid control over read */
1785 lp->lro_enabled = 0;
1786 }
1787
1788 INIT_LIST_HEAD(&mp->ex_list);
1789 spin_lock_init(&mp->em_lock);
1790
1791 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
1792 if (!mp->ep_pool)
1793 goto free_mp;
1794
1795 return mp;
1796
1797free_mp:
1798 kfree(mp);
1799 return NULL;
1800}
1801EXPORT_SYMBOL(fc_exch_mgr_alloc);
1802
1803void fc_exch_mgr_free(struct fc_exch_mgr *mp)
1804{
1805 WARN_ON(!mp);
1806 /*
1807 * The total exch count must be zero
1808 * before freeing exchange manager.
1809 */
1810 WARN_ON(mp->total_exches != 0);
1811 mempool_destroy(mp->ep_pool);
1812 kfree(mp);
1813}
1814EXPORT_SYMBOL(fc_exch_mgr_free);
1815
1816struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
1817{
1818 if (!lp || !lp->emp)
1819 return NULL;
1820
1821 return fc_exch_alloc(lp->emp, fp, 0);
1822}
1823EXPORT_SYMBOL(fc_exch_get);
1824
1825struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
1826 struct fc_frame *fp,
1827 void (*resp)(struct fc_seq *,
1828 struct fc_frame *fp,
1829 void *arg),
1830 void (*destructor)(struct fc_seq *, void *),
1831 void *arg, u32 timer_msec)
1832{
1833 struct fc_exch *ep;
1834 struct fc_seq *sp = NULL;
1835 struct fc_frame_header *fh;
1836 int rc = 1;
1837
1838 ep = lp->tt.exch_get(lp, fp);
1839 if (!ep) {
1840 fc_frame_free(fp);
1841 return NULL;
1842 }
1843 ep->esb_stat |= ESB_ST_SEQ_INIT;
1844 fh = fc_frame_header_get(fp);
1845 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
1846 ep->resp = resp;
1847 ep->destructor = destructor;
1848 ep->arg = arg;
1849 ep->r_a_tov = FC_DEF_R_A_TOV;
1850 ep->lp = lp;
1851 sp = &ep->seq;
1852
1853 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
1854 ep->f_ctl = ntoh24(fh->fh_f_ctl);
1855 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1856 sp->cnt++;
1857
1858 if (unlikely(lp->tt.frame_send(lp, fp)))
1859 goto err;
1860
1861 if (timer_msec)
1862 fc_exch_timer_set_locked(ep, timer_msec);
1863 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
1864
1865 if (ep->f_ctl & FC_FC_SEQ_INIT)
1866 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1867 spin_unlock_bh(&ep->ex_lock);
1868 return sp;
1869err:
1870 rc = fc_exch_done_locked(ep);
1871 spin_unlock_bh(&ep->ex_lock);
1872 if (!rc)
1873 fc_exch_mgr_delete_ep(ep);
1874 return NULL;
1875}
1876EXPORT_SYMBOL(fc_exch_seq_send);
1877
1878/*
1879 * Receive a frame
1880 */
1881void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
1882 struct fc_frame *fp)
1883{
1884 struct fc_frame_header *fh = fc_frame_header_get(fp);
1885 u32 f_ctl;
1886
1887 /* lport lock ? */
1888 if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
1889 FC_DBG("fc_lport or EM is not allocated and configured");
1890 fc_frame_free(fp);
1891 return;
1892 }
1893
1894 /*
1895 * If frame is marked invalid, just drop it.
1896 */
1897 f_ctl = ntoh24(fh->fh_f_ctl);
1898 switch (fr_eof(fp)) {
1899 case FC_EOF_T:
1900 if (f_ctl & FC_FC_END_SEQ)
1901 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
1902 /* fall through */
1903 case FC_EOF_N:
1904 if (fh->fh_type == FC_TYPE_BLS)
1905 fc_exch_recv_bls(mp, fp);
1906 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
1907 FC_FC_EX_CTX)
1908 fc_exch_recv_seq_resp(mp, fp);
1909 else if (f_ctl & FC_FC_SEQ_CTX)
1910 fc_exch_recv_resp(mp, fp);
1911 else
1912 fc_exch_recv_req(lp, mp, fp);
1913 break;
1914 default:
1915 FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
1916 fc_frame_free(fp);
1917 break;
1918 }
1919}
1920EXPORT_SYMBOL(fc_exch_recv);
1921
1922int fc_exch_init(struct fc_lport *lp)
1923{
1924 if (!lp->tt.exch_get) {
1925 /*
1926 * exch_put() should be NULL if
1927 * exch_get() is NULL
1928 */
1929 WARN_ON(lp->tt.exch_put);
1930 lp->tt.exch_get = fc_exch_get;
1931 }
1932
1933 if (!lp->tt.seq_start_next)
1934 lp->tt.seq_start_next = fc_seq_start_next;
1935
1936 if (!lp->tt.exch_seq_send)
1937 lp->tt.exch_seq_send = fc_exch_seq_send;
1938
1939 if (!lp->tt.seq_send)
1940 lp->tt.seq_send = fc_seq_send;
1941
1942 if (!lp->tt.seq_els_rsp_send)
1943 lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
1944
1945 if (!lp->tt.exch_done)
1946 lp->tt.exch_done = fc_exch_done;
1947
1948 if (!lp->tt.exch_mgr_reset)
1949 lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
1950
1951 if (!lp->tt.seq_exch_abort)
1952 lp->tt.seq_exch_abort = fc_seq_exch_abort;
1953
1954 return 0;
1955}
1956EXPORT_SYMBOL(fc_exch_init);
1957
1958int fc_setup_exch_mgr(void)
1959{
1960 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
1961 0, SLAB_HWCACHE_ALIGN, NULL);
1962 if (!fc_em_cachep)
1963 return -ENOMEM;
1964 return 0;
1965}
1966
1967void fc_destroy_exch_mgr(void)
1968{
1969 kmem_cache_destroy(fc_em_cachep);
1970}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
new file mode 100644
index 000000000000..404e63ff46b8
--- /dev/null
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -0,0 +1,2131 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Maintained at www.Open-FCoE.org
20 */
21
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/kernel.h>
25#include <linux/types.h>
26#include <linux/spinlock.h>
27#include <linux/scatterlist.h>
28#include <linux/err.h>
29#include <linux/crc32.h>
30
31#include <scsi/scsi_tcq.h>
32#include <scsi/scsi.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_cmnd.h>
36
37#include <scsi/fc/fc_fc2.h>
38
39#include <scsi/libfc.h>
40#include <scsi/fc_encode.h>
41
42MODULE_AUTHOR("Open-FCoE.org");
43MODULE_DESCRIPTION("libfc");
44MODULE_LICENSE("GPL");
45
46static int fc_fcp_debug;
47
48#define FC_DEBUG_FCP(fmt...) \
49 do { \
50 if (fc_fcp_debug) \
51 FC_DBG(fmt); \
52 } while (0)
53
54static struct kmem_cache *scsi_pkt_cachep;
55
56/* SRB state definitions */
57#define FC_SRB_FREE 0 /* cmd is free */
58#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
59#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
60#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
61#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
62#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
63#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
64#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
65#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
66
67#define FC_SRB_READ (1 << 1)
68#define FC_SRB_WRITE (1 << 0)
69
70/*
71 * The SCp.ptr should be tested and set under the host lock. NULL indicates
72 * that the command has been retruned to the scsi layer.
73 */
74#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
75#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
76#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
77#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
78#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
79
80struct fc_fcp_internal {
81 mempool_t *scsi_pkt_pool;
82 struct list_head scsi_pkt_queue;
83 u8 throttled;
84};
85
86#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
87
88/*
89 * function prototypes
90 * FC scsi I/O related functions
91 */
92static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
93static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
94static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
95static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
96static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
97static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
98static void fc_timeout_error(struct fc_fcp_pkt *);
99static void fc_fcp_timeout(unsigned long data);
100static void fc_fcp_rec(struct fc_fcp_pkt *);
101static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
102static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
103static void fc_io_compl(struct fc_fcp_pkt *);
104
105static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
106static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
107static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
108
109/*
110 * command status codes
111 */
112#define FC_COMPLETE 0
113#define FC_CMD_ABORTED 1
114#define FC_CMD_RESET 2
115#define FC_CMD_PLOGO 3
116#define FC_SNS_RCV 4
117#define FC_TRANS_ERR 5
118#define FC_DATA_OVRRUN 6
119#define FC_DATA_UNDRUN 7
120#define FC_ERROR 8
121#define FC_HRD_ERROR 9
122#define FC_CMD_TIME_OUT 10
123
124/*
125 * Error recovery timeout values.
126 */
127#define FC_SCSI_ER_TIMEOUT (10 * HZ)
128#define FC_SCSI_TM_TOV (10 * HZ)
129#define FC_SCSI_REC_TOV (2 * HZ)
130#define FC_HOST_RESET_TIMEOUT (30 * HZ)
131
132#define FC_MAX_ERROR_CNT 5
133#define FC_MAX_RECOV_RETRY 3
134
135#define FC_FCP_DFLT_QUEUE_DEPTH 32
136
137/**
138 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
139 * @lp: fc lport struct
140 * @gfp: gfp flags for allocation
141 *
142 * This is used by upper layer scsi driver.
143 * Return Value : scsi_pkt structure or null on allocation failure.
144 * Context : call from process context. no locking required.
145 */
146static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
147{
148 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
149 struct fc_fcp_pkt *fsp;
150
151 fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
152 if (fsp) {
153 memset(fsp, 0, sizeof(*fsp));
154 fsp->lp = lp;
155 atomic_set(&fsp->ref_cnt, 1);
156 init_timer(&fsp->timer);
157 INIT_LIST_HEAD(&fsp->list);
158 spin_lock_init(&fsp->scsi_pkt_lock);
159 }
160 return fsp;
161}
162
163/**
164 * fc_fcp_pkt_release - release hold on scsi_pkt packet
165 * @fsp: fcp packet struct
166 *
167 * This is used by upper layer scsi driver.
168 * Context : call from process and interrupt context.
169 * no locking required
170 */
171static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
172{
173 if (atomic_dec_and_test(&fsp->ref_cnt)) {
174 struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
175
176 mempool_free(fsp, si->scsi_pkt_pool);
177 }
178}
179
180static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
181{
182 atomic_inc(&fsp->ref_cnt);
183}
184
185/**
186 * fc_fcp_pkt_destory - release hold on scsi_pkt packet
187 *
188 * @seq: exchange sequence
189 * @fsp: fcp packet struct
190 *
191 * Release hold on scsi_pkt packet set to keep scsi_pkt
192 * till EM layer exch resource is not freed.
193 * Context : called from from EM layer.
194 * no locking required
195 */
196static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
197{
198 fc_fcp_pkt_release(fsp);
199}
200
201/**
202 * fc_fcp_lock_pkt - lock a packet and get a ref to it.
203 * @fsp: fcp packet
204 *
205 * We should only return error if we return a command to scsi-ml before
206 * getting a response. This can happen in cases where we send a abort, but
207 * do not wait for the response and the abort and command can be passing
208 * each other on the wire/network-layer.
209 *
210 * Note: this function locks the packet and gets a reference to allow
211 * callers to call the completion function while the lock is held and
212 * not have to worry about the packets refcount.
213 *
214 * TODO: Maybe we should just have callers grab/release the lock and
215 * have a function that they call to verify the fsp and grab a ref if
216 * needed.
217 */
218static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
219{
220 spin_lock_bh(&fsp->scsi_pkt_lock);
221 if (fsp->state & FC_SRB_COMPL) {
222 spin_unlock_bh(&fsp->scsi_pkt_lock);
223 return -EPERM;
224 }
225
226 fc_fcp_pkt_hold(fsp);
227 return 0;
228}
229
230static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
231{
232 spin_unlock_bh(&fsp->scsi_pkt_lock);
233 fc_fcp_pkt_release(fsp);
234}
235
236static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
237{
238 if (!(fsp->state & FC_SRB_COMPL))
239 mod_timer(&fsp->timer, jiffies + delay);
240}
241
242static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
243{
244 if (!fsp->seq_ptr)
245 return -EINVAL;
246
247 fsp->state |= FC_SRB_ABORT_PENDING;
248 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
249}
250
251/*
252 * Retry command.
253 * An abort isn't needed.
254 */
255static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
256{
257 if (fsp->seq_ptr) {
258 fsp->lp->tt.exch_done(fsp->seq_ptr);
259 fsp->seq_ptr = NULL;
260 }
261
262 fsp->state &= ~FC_SRB_ABORT_PENDING;
263 fsp->io_status = SUGGEST_RETRY << 24;
264 fsp->status_code = FC_ERROR;
265 fc_fcp_complete_locked(fsp);
266}
267
268/*
269 * Receive SCSI data from target.
270 * Called after receiving solicited data.
271 */
272static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
273{
274 struct scsi_cmnd *sc = fsp->cmd;
275 struct fc_lport *lp = fsp->lp;
276 struct fcoe_dev_stats *stats;
277 struct fc_frame_header *fh;
278 size_t start_offset;
279 size_t offset;
280 u32 crc;
281 u32 copy_len = 0;
282 size_t len;
283 void *buf;
284 struct scatterlist *sg;
285 size_t remaining;
286
287 fh = fc_frame_header_get(fp);
288 offset = ntohl(fh->fh_parm_offset);
289 start_offset = offset;
290 len = fr_len(fp) - sizeof(*fh);
291 buf = fc_frame_payload_get(fp, 0);
292
293 if (offset + len > fsp->data_len) {
294 /*
295 * this should never happen
296 */
297 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
298 fc_frame_crc_check(fp))
299 goto crc_err;
300 FC_DEBUG_FCP("data received past end. len %zx offset %zx "
301 "data_len %x\n", len, offset, fsp->data_len);
302 fc_fcp_retry_cmd(fsp);
303 return;
304 }
305 if (offset != fsp->xfer_len)
306 fsp->state |= FC_SRB_DISCONTIG;
307
308 crc = 0;
309 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
310 crc = crc32(~0, (u8 *) fh, sizeof(*fh));
311
312 sg = scsi_sglist(sc);
313 remaining = len;
314
315 while (remaining > 0 && sg) {
316 size_t off;
317 void *page_addr;
318 size_t sg_bytes;
319
320 if (offset >= sg->length) {
321 offset -= sg->length;
322 sg = sg_next(sg);
323 continue;
324 }
325 sg_bytes = min(remaining, sg->length - offset);
326
327 /*
328 * The scatterlist item may be bigger than PAGE_SIZE,
329 * but we are limited to mapping PAGE_SIZE at a time.
330 */
331 off = offset + sg->offset;
332 sg_bytes = min(sg_bytes, (size_t)
333 (PAGE_SIZE - (off & ~PAGE_MASK)));
334 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
335 KM_SOFTIRQ0);
336 if (!page_addr)
337 break; /* XXX panic? */
338
339 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
340 crc = crc32(crc, buf, sg_bytes);
341 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
342 sg_bytes);
343
344 kunmap_atomic(page_addr, KM_SOFTIRQ0);
345 buf += sg_bytes;
346 offset += sg_bytes;
347 remaining -= sg_bytes;
348 copy_len += sg_bytes;
349 }
350
351 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
352 buf = fc_frame_payload_get(fp, 0);
353 if (len % 4) {
354 crc = crc32(crc, buf + len, 4 - (len % 4));
355 len += 4 - (len % 4);
356 }
357
358 if (~crc != le32_to_cpu(fr_crc(fp))) {
359crc_err:
360 stats = lp->dev_stats[smp_processor_id()];
361 stats->ErrorFrames++;
362 if (stats->InvalidCRCCount++ < 5)
363 FC_DBG("CRC error on data frame\n");
364 /*
365 * Assume the frame is total garbage.
366 * We may have copied it over the good part
367 * of the buffer.
368 * If so, we need to retry the entire operation.
369 * Otherwise, ignore it.
370 */
371 if (fsp->state & FC_SRB_DISCONTIG)
372 fc_fcp_retry_cmd(fsp);
373 return;
374 }
375 }
376
377 if (fsp->xfer_contig_end == start_offset)
378 fsp->xfer_contig_end += copy_len;
379 fsp->xfer_len += copy_len;
380
381 /*
382 * In the very rare event that this data arrived after the response
383 * and completes the transfer, call the completion handler.
384 */
385 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
386 fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
387 fc_fcp_complete_locked(fsp);
388}
389
390/*
391 * fc_fcp_send_data - Send SCSI data to target.
392 * @fsp: ptr to fc_fcp_pkt
393 * @sp: ptr to this sequence
394 * @offset: starting offset for this data request
395 * @seq_blen: the burst length for this data request
396 *
397 * Called after receiving a Transfer Ready data descriptor.
398 * if LLD is capable of seq offload then send down seq_blen
399 * size of data in single frame, otherwise send multiple FC
400 * frames of max FC frame payload supported by target port.
401 *
402 * Returns : 0 for success.
403 */
404static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
405 size_t offset, size_t seq_blen)
406{
407 struct fc_exch *ep;
408 struct scsi_cmnd *sc;
409 struct scatterlist *sg;
410 struct fc_frame *fp = NULL;
411 struct fc_lport *lp = fsp->lp;
412 size_t remaining;
413 size_t t_blen;
414 size_t tlen;
415 size_t sg_bytes;
416 size_t frame_offset, fh_parm_offset;
417 int error;
418 void *data = NULL;
419 void *page_addr;
420 int using_sg = lp->sg_supp;
421 u32 f_ctl;
422
423 WARN_ON(seq_blen <= 0);
424 if (unlikely(offset + seq_blen > fsp->data_len)) {
425 /* this should never happen */
426 FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
427 seq_blen, offset);
428 fc_fcp_send_abort(fsp);
429 return 0;
430 } else if (offset != fsp->xfer_len) {
431 /* Out of Order Data Request - no problem, but unexpected. */
432 FC_DEBUG_FCP("xfer-ready non-contiguous. "
433 "seq_blen %zx offset %zx\n", seq_blen, offset);
434 }
435
436 /*
437 * if LLD is capable of seq_offload then set transport
438 * burst length (t_blen) to seq_blen, otherwise set t_blen
439 * to max FC frame payload previously set in fsp->max_payload.
440 */
441 t_blen = lp->seq_offload ? seq_blen : fsp->max_payload;
442 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
443 if (t_blen > 512)
444 t_blen &= ~(512 - 1); /* round down to block size */
445 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
446 sc = fsp->cmd;
447
448 remaining = seq_blen;
449 fh_parm_offset = frame_offset = offset;
450 tlen = 0;
451 seq = lp->tt.seq_start_next(seq);
452 f_ctl = FC_FC_REL_OFF;
453 WARN_ON(!seq);
454
455 /*
456 * If a get_page()/put_page() will fail, don't use sg lists
457 * in the fc_frame structure.
458 *
459 * The put_page() may be long after the I/O has completed
460 * in the case of FCoE, since the network driver does it
461 * via free_skb(). See the test in free_pages_check().
462 *
463 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
464 */
465 if (using_sg) {
466 for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
467 if (page_count(sg_page(sg)) == 0 ||
468 (sg_page(sg)->flags & (1 << PG_lru |
469 1 << PG_private |
470 1 << PG_locked |
471 1 << PG_active |
472 1 << PG_slab |
473 1 << PG_swapcache |
474 1 << PG_writeback |
475 1 << PG_reserved |
476 1 << PG_buddy))) {
477 using_sg = 0;
478 break;
479 }
480 }
481 }
482 sg = scsi_sglist(sc);
483
484 while (remaining > 0 && sg) {
485 if (offset >= sg->length) {
486 offset -= sg->length;
487 sg = sg_next(sg);
488 continue;
489 }
490 if (!fp) {
491 tlen = min(t_blen, remaining);
492
493 /*
494 * TODO. Temporary workaround. fc_seq_send() can't
495 * handle odd lengths in non-linear skbs.
496 * This will be the final fragment only.
497 */
498 if (tlen % 4)
499 using_sg = 0;
500 if (using_sg) {
501 fp = _fc_frame_alloc(lp, 0);
502 if (!fp)
503 return -ENOMEM;
504 } else {
505 fp = fc_frame_alloc(lp, tlen);
506 if (!fp)
507 return -ENOMEM;
508
509 data = (void *)(fr_hdr(fp)) +
510 sizeof(struct fc_frame_header);
511 }
512 fh_parm_offset = frame_offset;
513 fr_max_payload(fp) = fsp->max_payload;
514 }
515 sg_bytes = min(tlen, sg->length - offset);
516 if (using_sg) {
517 WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
518 FC_FRAME_SG_LEN);
519 get_page(sg_page(sg));
520 skb_fill_page_desc(fp_skb(fp),
521 skb_shinfo(fp_skb(fp))->nr_frags,
522 sg_page(sg), sg->offset + offset,
523 sg_bytes);
524 fp_skb(fp)->data_len += sg_bytes;
525 fr_len(fp) += sg_bytes;
526 fp_skb(fp)->truesize += PAGE_SIZE;
527 } else {
528 size_t off = offset + sg->offset;
529
530 /*
531 * The scatterlist item may be bigger than PAGE_SIZE,
532 * but we must not cross pages inside the kmap.
533 */
534 sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
535 (off & ~PAGE_MASK)));
536 page_addr = kmap_atomic(sg_page(sg) +
537 (off >> PAGE_SHIFT),
538 KM_SOFTIRQ0);
539 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
540 sg_bytes);
541 kunmap_atomic(page_addr, KM_SOFTIRQ0);
542 data += sg_bytes;
543 }
544 offset += sg_bytes;
545 frame_offset += sg_bytes;
546 tlen -= sg_bytes;
547 remaining -= sg_bytes;
548
549 if (tlen)
550 continue;
551
552 /*
553 * Send sequence with transfer sequence initiative in case
554 * this is last FCP frame of the sequence.
555 */
556 if (remaining == 0)
557 f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
558
559 ep = fc_seq_exch(seq);
560 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
561 FC_TYPE_FCP, f_ctl, fh_parm_offset);
562
563 /*
564 * send fragment using for a sequence.
565 */
566 error = lp->tt.seq_send(lp, seq, fp);
567 if (error) {
568 WARN_ON(1); /* send error should be rare */
569 fc_fcp_retry_cmd(fsp);
570 return 0;
571 }
572 fp = NULL;
573 }
574 fsp->xfer_len += seq_blen; /* premature count? */
575 return 0;
576}
577
578static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
579{
580 int ba_done = 1;
581 struct fc_ba_rjt *brp;
582 struct fc_frame_header *fh;
583
584 fh = fc_frame_header_get(fp);
585 switch (fh->fh_r_ctl) {
586 case FC_RCTL_BA_ACC:
587 break;
588 case FC_RCTL_BA_RJT:
589 brp = fc_frame_payload_get(fp, sizeof(*brp));
590 if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
591 break;
592 /* fall thru */
593 default:
594 /*
595 * we will let the command timeout
596 * and scsi-ml recover in this case,
597 * therefore cleared the ba_done flag.
598 */
599 ba_done = 0;
600 }
601
602 if (ba_done) {
603 fsp->state |= FC_SRB_ABORTED;
604 fsp->state &= ~FC_SRB_ABORT_PENDING;
605
606 if (fsp->wait_for_comp)
607 complete(&fsp->tm_done);
608 else
609 fc_fcp_complete_locked(fsp);
610 }
611}
612
613/*
614 * fc_fcp_reduce_can_queue - drop can_queue
615 * @lp: lport to drop queueing for
616 *
617 * If we are getting memory allocation failures, then we may
618 * be trying to execute too many commands. We let the running
619 * commands complete or timeout, then try again with a reduced
620 * can_queue. Eventually we will hit the point where we run
621 * on all reserved structs.
622 */
623static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
624{
625 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
626 unsigned long flags;
627 int can_queue;
628
629 spin_lock_irqsave(lp->host->host_lock, flags);
630 if (si->throttled)
631 goto done;
632 si->throttled = 1;
633
634 can_queue = lp->host->can_queue;
635 can_queue >>= 1;
636 if (!can_queue)
637 can_queue = 1;
638 lp->host->can_queue = can_queue;
639 shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n"
640 "Reducing can_queue to %d.\n", can_queue);
641done:
642 spin_unlock_irqrestore(lp->host->host_lock, flags);
643}
644
645/*
646 * exch mgr calls this routine to process scsi
647 * exchanges.
648 *
649 * Return : None
650 * Context : called from Soft IRQ context
651 * can not called holding list lock
652 */
653static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
654{
655 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
656 struct fc_lport *lp;
657 struct fc_frame_header *fh;
658 struct fcp_txrdy *dd;
659 u8 r_ctl;
660 int rc = 0;
661
662 if (IS_ERR(fp))
663 goto errout;
664
665 fh = fc_frame_header_get(fp);
666 r_ctl = fh->fh_r_ctl;
667 lp = fsp->lp;
668
669 if (!(lp->state & LPORT_ST_READY))
670 goto out;
671 if (fc_fcp_lock_pkt(fsp))
672 goto out;
673 fsp->last_pkt_time = jiffies;
674
675 if (fh->fh_type == FC_TYPE_BLS) {
676 fc_fcp_abts_resp(fsp, fp);
677 goto unlock;
678 }
679
680 if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
681 goto unlock;
682
683 if (r_ctl == FC_RCTL_DD_DATA_DESC) {
684 /*
685 * received XFER RDY from the target
686 * need to send data to the target
687 */
688 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
689 dd = fc_frame_payload_get(fp, sizeof(*dd));
690 WARN_ON(!dd);
691
692 rc = fc_fcp_send_data(fsp, seq,
693 (size_t) ntohl(dd->ft_data_ro),
694 (size_t) ntohl(dd->ft_burst_len));
695 if (!rc)
696 seq->rec_data = fsp->xfer_len;
697 else if (rc == -ENOMEM)
698 fsp->state |= FC_SRB_NOMEM;
699 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
700 /*
701 * received a DATA frame
702 * next we will copy the data to the system buffer
703 */
704 WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
705 fc_fcp_recv_data(fsp, fp);
706 seq->rec_data = fsp->xfer_contig_end;
707 } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
708 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
709
710 fc_fcp_resp(fsp, fp);
711 } else {
712 FC_DBG("unexpected frame. r_ctl %x\n", r_ctl);
713 }
714unlock:
715 fc_fcp_unlock_pkt(fsp);
716out:
717 fc_frame_free(fp);
718errout:
719 if (IS_ERR(fp))
720 fc_fcp_error(fsp, fp);
721 else if (rc == -ENOMEM)
722 fc_fcp_reduce_can_queue(lp);
723}
724
725static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
726{
727 struct fc_frame_header *fh;
728 struct fcp_resp *fc_rp;
729 struct fcp_resp_ext *rp_ex;
730 struct fcp_resp_rsp_info *fc_rp_info;
731 u32 plen;
732 u32 expected_len;
733 u32 respl = 0;
734 u32 snsl = 0;
735 u8 flags = 0;
736
737 plen = fr_len(fp);
738 fh = (struct fc_frame_header *)fr_hdr(fp);
739 if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
740 goto len_err;
741 plen -= sizeof(*fh);
742 fc_rp = (struct fcp_resp *)(fh + 1);
743 fsp->cdb_status = fc_rp->fr_status;
744 flags = fc_rp->fr_flags;
745 fsp->scsi_comp_flags = flags;
746 expected_len = fsp->data_len;
747
748 if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
749 rp_ex = (void *)(fc_rp + 1);
750 if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
751 if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
752 goto len_err;
753 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
754 if (flags & FCP_RSP_LEN_VAL) {
755 respl = ntohl(rp_ex->fr_rsp_len);
756 if (respl != sizeof(*fc_rp_info))
757 goto len_err;
758 if (fsp->wait_for_comp) {
759 /* Abuse cdb_status for rsp code */
760 fsp->cdb_status = fc_rp_info->rsp_code;
761 complete(&fsp->tm_done);
762 /*
763 * tmfs will not have any scsi cmd so
764 * exit here
765 */
766 return;
767 } else
768 goto err;
769 }
770 if (flags & FCP_SNS_LEN_VAL) {
771 snsl = ntohl(rp_ex->fr_sns_len);
772 if (snsl > SCSI_SENSE_BUFFERSIZE)
773 snsl = SCSI_SENSE_BUFFERSIZE;
774 memcpy(fsp->cmd->sense_buffer,
775 (char *)fc_rp_info + respl, snsl);
776 }
777 }
778 if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
779 if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
780 goto len_err;
781 if (flags & FCP_RESID_UNDER) {
782 fsp->scsi_resid = ntohl(rp_ex->fr_resid);
783 /*
784 * The cmnd->underflow is the minimum number of
785 * bytes that must be transfered for this
786 * command. Provided a sense condition is not
787 * present, make sure the actual amount
788 * transferred is at least the underflow value
789 * or fail.
790 */
791 if (!(flags & FCP_SNS_LEN_VAL) &&
792 (fc_rp->fr_status == 0) &&
793 (scsi_bufflen(fsp->cmd) -
794 fsp->scsi_resid) < fsp->cmd->underflow)
795 goto err;
796 expected_len -= fsp->scsi_resid;
797 } else {
798 fsp->status_code = FC_ERROR;
799 }
800 }
801 }
802 fsp->state |= FC_SRB_RCV_STATUS;
803
804 /*
805 * Check for missing or extra data frames.
806 */
807 if (unlikely(fsp->xfer_len != expected_len)) {
808 if (fsp->xfer_len < expected_len) {
809 /*
810 * Some data may be queued locally,
811 * Wait a at least one jiffy to see if it is delivered.
812 * If this expires without data, we may do SRR.
813 */
814 fc_fcp_timer_set(fsp, 2);
815 return;
816 }
817 fsp->status_code = FC_DATA_OVRRUN;
818 FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
819 "data len %x\n",
820 fsp->rport->port_id,
821 fsp->xfer_len, expected_len, fsp->data_len);
822 }
823 fc_fcp_complete_locked(fsp);
824 return;
825
826len_err:
827 FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
828 flags, fr_len(fp), respl, snsl);
829err:
830 fsp->status_code = FC_ERROR;
831 fc_fcp_complete_locked(fsp);
832}
833
834/**
835 * fc_fcp_complete_locked - complete processing of a fcp packet
836 * @fsp: fcp packet
837 *
838 * This function may sleep if a timer is pending. The packet lock must be
839 * held, and the host lock must not be held.
840 */
841static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
842{
843 struct fc_lport *lp = fsp->lp;
844 struct fc_seq *seq;
845 struct fc_exch *ep;
846 u32 f_ctl;
847
848 if (fsp->state & FC_SRB_ABORT_PENDING)
849 return;
850
851 if (fsp->state & FC_SRB_ABORTED) {
852 if (!fsp->status_code)
853 fsp->status_code = FC_CMD_ABORTED;
854 } else {
855 /*
856 * Test for transport underrun, independent of response
857 * underrun status.
858 */
859 if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
860 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
861 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
862 fsp->status_code = FC_DATA_UNDRUN;
863 fsp->io_status = SUGGEST_RETRY << 24;
864 }
865 }
866
867 seq = fsp->seq_ptr;
868 if (seq) {
869 fsp->seq_ptr = NULL;
870 if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
871 struct fc_frame *conf_frame;
872 struct fc_seq *csp;
873
874 csp = lp->tt.seq_start_next(seq);
875 conf_frame = fc_frame_alloc(fsp->lp, 0);
876 if (conf_frame) {
877 f_ctl = FC_FC_SEQ_INIT;
878 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
879 ep = fc_seq_exch(seq);
880 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
881 ep->did, ep->sid,
882 FC_TYPE_FCP, f_ctl, 0);
883 lp->tt.seq_send(lp, csp, conf_frame);
884 }
885 }
886 lp->tt.exch_done(seq);
887 }
888 fc_io_compl(fsp);
889}
890
891static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
892{
893 struct fc_lport *lp = fsp->lp;
894
895 if (fsp->seq_ptr) {
896 lp->tt.exch_done(fsp->seq_ptr);
897 fsp->seq_ptr = NULL;
898 }
899 fsp->status_code = error;
900}
901
902/**
903 * fc_fcp_cleanup_each_cmd - run fn on each active command
904 * @lp: logical port
905 * @id: target id
906 * @lun: lun
907 * @error: fsp status code
908 *
909 * If lun or id is -1, they are ignored.
910 */
911static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
912 unsigned int lun, int error)
913{
914 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
915 struct fc_fcp_pkt *fsp;
916 struct scsi_cmnd *sc_cmd;
917 unsigned long flags;
918
919 spin_lock_irqsave(lp->host->host_lock, flags);
920restart:
921 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
922 sc_cmd = fsp->cmd;
923 if (id != -1 && scmd_id(sc_cmd) != id)
924 continue;
925
926 if (lun != -1 && sc_cmd->device->lun != lun)
927 continue;
928
929 fc_fcp_pkt_hold(fsp);
930 spin_unlock_irqrestore(lp->host->host_lock, flags);
931
932 if (!fc_fcp_lock_pkt(fsp)) {
933 fc_fcp_cleanup_cmd(fsp, error);
934 fc_io_compl(fsp);
935 fc_fcp_unlock_pkt(fsp);
936 }
937
938 fc_fcp_pkt_release(fsp);
939 spin_lock_irqsave(lp->host->host_lock, flags);
940 /*
941 * while we dropped the lock multiple pkts could
942 * have been released, so we have to start over.
943 */
944 goto restart;
945 }
946 spin_unlock_irqrestore(lp->host->host_lock, flags);
947}
948
949static void fc_fcp_abort_io(struct fc_lport *lp)
950{
951 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR);
952}
953
954/**
955 * fc_fcp_pkt_send - send a fcp packet to the lower level.
956 * @lp: fc lport
957 * @fsp: fc packet.
958 *
959 * This is called by upper layer protocol.
960 * Return : zero for success and -1 for failure
961 * Context : called from queuecommand which can be called from process
962 * or scsi soft irq.
963 * Locks : called with the host lock and irqs disabled.
964 */
965static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
966{
967 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
968 int rc;
969
970 fsp->cmd->SCp.ptr = (char *)fsp;
971 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
972 fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
973
974 int_to_scsilun(fsp->cmd->device->lun,
975 (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
976 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
977 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
978
979 spin_unlock_irq(lp->host->host_lock);
980 rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv);
981 spin_lock_irq(lp->host->host_lock);
982 if (rc)
983 list_del(&fsp->list);
984
985 return rc;
986}
987
988static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
989 void (*resp)(struct fc_seq *,
990 struct fc_frame *fp,
991 void *arg))
992{
993 struct fc_frame *fp;
994 struct fc_seq *seq;
995 struct fc_rport *rport;
996 struct fc_rport_libfc_priv *rp;
997 const size_t len = sizeof(fsp->cdb_cmd);
998 int rc = 0;
999
1000 if (fc_fcp_lock_pkt(fsp))
1001 return 0;
1002
1003 fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
1004 if (!fp) {
1005 rc = -1;
1006 goto unlock;
1007 }
1008
1009 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
1010 fr_cmd(fp) = fsp->cmd;
1011 rport = fsp->rport;
1012 fsp->max_payload = rport->maxframe_size;
1013 rp = rport->dd_data;
1014
1015 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
1016 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
1017 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1018
1019 seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
1020 if (!seq) {
1021 fc_frame_free(fp);
1022 rc = -1;
1023 goto unlock;
1024 }
1025 fsp->last_pkt_time = jiffies;
1026 fsp->seq_ptr = seq;
1027 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
1028
1029 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
1030 fc_fcp_timer_set(fsp,
1031 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
1032 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
1033unlock:
1034 fc_fcp_unlock_pkt(fsp);
1035 return rc;
1036}
1037
1038/*
1039 * transport error handler
1040 */
1041static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1042{
1043 int error = PTR_ERR(fp);
1044
1045 if (fc_fcp_lock_pkt(fsp))
1046 return;
1047
1048 switch (error) {
1049 case -FC_EX_CLOSED:
1050 fc_fcp_retry_cmd(fsp);
1051 goto unlock;
1052 default:
1053 FC_DBG("unknown error %ld\n", PTR_ERR(fp));
1054 }
1055 /*
1056 * clear abort pending, because the lower layer
1057 * decided to force completion.
1058 */
1059 fsp->state &= ~FC_SRB_ABORT_PENDING;
1060 fsp->status_code = FC_CMD_PLOGO;
1061 fc_fcp_complete_locked(fsp);
1062unlock:
1063 fc_fcp_unlock_pkt(fsp);
1064}
1065
1066/*
1067 * Scsi abort handler- calls to send an abort
1068 * and then wait for abort completion
1069 */
1070static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
1071{
1072 int rc = FAILED;
1073
1074 if (fc_fcp_send_abort(fsp))
1075 return FAILED;
1076
1077 init_completion(&fsp->tm_done);
1078 fsp->wait_for_comp = 1;
1079
1080 spin_unlock_bh(&fsp->scsi_pkt_lock);
1081 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
1082 spin_lock_bh(&fsp->scsi_pkt_lock);
1083 fsp->wait_for_comp = 0;
1084
1085 if (!rc) {
1086 FC_DBG("target abort cmd failed\n");
1087 rc = FAILED;
1088 } else if (fsp->state & FC_SRB_ABORTED) {
1089 FC_DBG("target abort cmd passed\n");
1090 rc = SUCCESS;
1091 fc_fcp_complete_locked(fsp);
1092 }
1093
1094 return rc;
1095}
1096
1097/*
1098 * Retry LUN reset after resource allocation failed.
1099 */
1100static void fc_lun_reset_send(unsigned long data)
1101{
1102 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1103 struct fc_lport *lp = fsp->lp;
1104 if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) {
1105 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
1106 return;
1107 if (fc_fcp_lock_pkt(fsp))
1108 return;
1109 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
1110 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1111 fc_fcp_unlock_pkt(fsp);
1112 }
1113}
1114
1115/*
1116 * Scsi device reset handler- send a LUN RESET to the device
1117 * and wait for reset reply
1118 */
1119static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1120 unsigned int id, unsigned int lun)
1121{
1122 int rc;
1123
1124 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
1125 fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
1126 int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
1127
1128 fsp->wait_for_comp = 1;
1129 init_completion(&fsp->tm_done);
1130
1131 fc_lun_reset_send((unsigned long)fsp);
1132
1133 /*
1134 * wait for completion of reset
1135 * after that make sure all commands are terminated
1136 */
1137 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
1138
1139 spin_lock_bh(&fsp->scsi_pkt_lock);
1140 fsp->state |= FC_SRB_COMPL;
1141 spin_unlock_bh(&fsp->scsi_pkt_lock);
1142
1143 del_timer_sync(&fsp->timer);
1144
1145 spin_lock_bh(&fsp->scsi_pkt_lock);
1146 if (fsp->seq_ptr) {
1147 lp->tt.exch_done(fsp->seq_ptr);
1148 fsp->seq_ptr = NULL;
1149 }
1150 fsp->wait_for_comp = 0;
1151 spin_unlock_bh(&fsp->scsi_pkt_lock);
1152
1153 if (!rc) {
1154 FC_DBG("lun reset failed\n");
1155 return FAILED;
1156 }
1157
1158 /* cdb_status holds the tmf's rsp code */
1159 if (fsp->cdb_status != FCP_TMF_CMPL)
1160 return FAILED;
1161
1162 FC_DBG("lun reset to lun %u completed\n", lun);
1163 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
1164 return SUCCESS;
1165}
1166
1167/*
1168 * Task Managment response handler
1169 */
1170static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1171{
1172 struct fc_fcp_pkt *fsp = arg;
1173 struct fc_frame_header *fh;
1174
1175 if (IS_ERR(fp)) {
1176 /*
1177 * If there is an error just let it timeout or wait
1178 * for TMF to be aborted if it timedout.
1179 *
1180 * scsi-eh will escalate for when either happens.
1181 */
1182 return;
1183 }
1184
1185 if (fc_fcp_lock_pkt(fsp))
1186 return;
1187
1188 /*
1189 * raced with eh timeout handler.
1190 */
1191 if (!fsp->seq_ptr || !fsp->wait_for_comp) {
1192 spin_unlock_bh(&fsp->scsi_pkt_lock);
1193 return;
1194 }
1195
1196 fh = fc_frame_header_get(fp);
1197 if (fh->fh_type != FC_TYPE_BLS)
1198 fc_fcp_resp(fsp, fp);
1199 fsp->seq_ptr = NULL;
1200 fsp->lp->tt.exch_done(seq);
1201 fc_frame_free(fp);
1202 fc_fcp_unlock_pkt(fsp);
1203}
1204
1205static void fc_fcp_cleanup(struct fc_lport *lp)
1206{
1207 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR);
1208}
1209
1210/*
1211 * fc_fcp_timeout: called by OS timer function.
1212 *
1213 * The timer has been inactivated and must be reactivated if desired
1214 * using fc_fcp_timer_set().
1215 *
1216 * Algorithm:
1217 *
1218 * If REC is supported, just issue it, and return. The REC exchange will
1219 * complete or time out, and recovery can continue at that point.
1220 *
1221 * Otherwise, if the response has been received without all the data,
1222 * it has been ER_TIMEOUT since the response was received.
1223 *
1224 * If the response has not been received,
1225 * we see if data was received recently. If it has been, we continue waiting,
1226 * otherwise, we abort the command.
1227 */
1228static void fc_fcp_timeout(unsigned long data)
1229{
1230 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1231 struct fc_rport *rport = fsp->rport;
1232 struct fc_rport_libfc_priv *rp = rport->dd_data;
1233
1234 if (fc_fcp_lock_pkt(fsp))
1235 return;
1236
1237 if (fsp->cdb_cmd.fc_tm_flags)
1238 goto unlock;
1239
1240 fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
1241
1242 if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
1243 fc_fcp_rec(fsp);
1244 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
1245 jiffies))
1246 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1247 else if (fsp->state & FC_SRB_RCV_STATUS)
1248 fc_fcp_complete_locked(fsp);
1249 else
1250 fc_timeout_error(fsp);
1251 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
1252unlock:
1253 fc_fcp_unlock_pkt(fsp);
1254}
1255
1256/*
1257 * Send a REC ELS request
1258 */
1259static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1260{
1261 struct fc_lport *lp;
1262 struct fc_frame *fp;
1263 struct fc_rport *rport;
1264 struct fc_rport_libfc_priv *rp;
1265
1266 lp = fsp->lp;
1267 rport = fsp->rport;
1268 rp = rport->dd_data;
1269 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
1270 fsp->status_code = FC_HRD_ERROR;
1271 fsp->io_status = SUGGEST_RETRY << 24;
1272 fc_fcp_complete_locked(fsp);
1273 return;
1274 }
1275 fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec));
1276 if (!fp)
1277 goto retry;
1278
1279 fr_seq(fp) = fsp->seq_ptr;
1280 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
1281 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
1282 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1283 if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp,
1284 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
1285 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
1286 return;
1287 }
1288 fc_frame_free(fp);
1289retry:
1290 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1291 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1292 else
1293 fc_timeout_error(fsp);
1294}
1295
1296/*
1297 * Receive handler for REC ELS frame
1298 * if it is a reject then let the scsi layer to handle
1299 * the timeout. if it is a LS_ACC then if the io was not completed
1300 * then set the timeout and return otherwise complete the exchange
1301 * and tell the scsi layer to restart the I/O.
1302 */
1303static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1304{
1305 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
1306 struct fc_els_rec_acc *recp;
1307 struct fc_els_ls_rjt *rjt;
1308 u32 e_stat;
1309 u8 opcode;
1310 u32 offset;
1311 enum dma_data_direction data_dir;
1312 enum fc_rctl r_ctl;
1313 struct fc_rport_libfc_priv *rp;
1314
1315 if (IS_ERR(fp)) {
1316 fc_fcp_rec_error(fsp, fp);
1317 return;
1318 }
1319
1320 if (fc_fcp_lock_pkt(fsp))
1321 goto out;
1322
1323 fsp->recov_retry = 0;
1324 opcode = fc_frame_payload_op(fp);
1325 if (opcode == ELS_LS_RJT) {
1326 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1327 switch (rjt->er_reason) {
1328 default:
1329 FC_DEBUG_FCP("device %x unexpected REC reject "
1330 "reason %d expl %d\n",
1331 fsp->rport->port_id, rjt->er_reason,
1332 rjt->er_explan);
1333 /* fall through */
1334 case ELS_RJT_UNSUP:
1335 FC_DEBUG_FCP("device does not support REC\n");
1336 rp = fsp->rport->dd_data;
1337 /*
1338 * if we do not spport RECs or got some bogus
1339 * reason then resetup timer so we check for
1340 * making progress.
1341 */
1342 rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
1343 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1344 break;
1345 case ELS_RJT_LOGIC:
1346 case ELS_RJT_UNAB:
1347 /*
1348 * If no data transfer, the command frame got dropped
1349 * so we just retry. If data was transferred, we
1350 * lost the response but the target has no record,
1351 * so we abort and retry.
1352 */
1353 if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
1354 fsp->xfer_len == 0) {
1355 fc_fcp_retry_cmd(fsp);
1356 break;
1357 }
1358 fc_timeout_error(fsp);
1359 break;
1360 }
1361 } else if (opcode == ELS_LS_ACC) {
1362 if (fsp->state & FC_SRB_ABORTED)
1363 goto unlock_out;
1364
1365 data_dir = fsp->cmd->sc_data_direction;
1366 recp = fc_frame_payload_get(fp, sizeof(*recp));
1367 offset = ntohl(recp->reca_fc4value);
1368 e_stat = ntohl(recp->reca_e_stat);
1369
1370 if (e_stat & ESB_ST_COMPLETE) {
1371
1372 /*
1373 * The exchange is complete.
1374 *
1375 * For output, we must've lost the response.
1376 * For input, all data must've been sent.
1377 * We lost may have lost the response
1378 * (and a confirmation was requested) and maybe
1379 * some data.
1380 *
1381 * If all data received, send SRR
1382 * asking for response. If partial data received,
1383 * or gaps, SRR requests data at start of gap.
1384 * Recovery via SRR relies on in-order-delivery.
1385 */
1386 if (data_dir == DMA_TO_DEVICE) {
1387 r_ctl = FC_RCTL_DD_CMD_STATUS;
1388 } else if (fsp->xfer_contig_end == offset) {
1389 r_ctl = FC_RCTL_DD_CMD_STATUS;
1390 } else {
1391 offset = fsp->xfer_contig_end;
1392 r_ctl = FC_RCTL_DD_SOL_DATA;
1393 }
1394 fc_fcp_srr(fsp, r_ctl, offset);
1395 } else if (e_stat & ESB_ST_SEQ_INIT) {
1396
1397 /*
1398 * The remote port has the initiative, so just
1399 * keep waiting for it to complete.
1400 */
1401 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1402 } else {
1403
1404 /*
1405 * The exchange is incomplete, we have seq. initiative.
1406 * Lost response with requested confirmation,
1407 * lost confirmation, lost transfer ready or
1408 * lost write data.
1409 *
1410 * For output, if not all data was received, ask
1411 * for transfer ready to be repeated.
1412 *
1413 * If we received or sent all the data, send SRR to
1414 * request response.
1415 *
1416 * If we lost a response, we may have lost some read
1417 * data as well.
1418 */
1419 r_ctl = FC_RCTL_DD_SOL_DATA;
1420 if (data_dir == DMA_TO_DEVICE) {
1421 r_ctl = FC_RCTL_DD_CMD_STATUS;
1422 if (offset < fsp->data_len)
1423 r_ctl = FC_RCTL_DD_DATA_DESC;
1424 } else if (offset == fsp->xfer_contig_end) {
1425 r_ctl = FC_RCTL_DD_CMD_STATUS;
1426 } else if (fsp->xfer_contig_end < offset) {
1427 offset = fsp->xfer_contig_end;
1428 }
1429 fc_fcp_srr(fsp, r_ctl, offset);
1430 }
1431 }
1432unlock_out:
1433 fc_fcp_unlock_pkt(fsp);
1434out:
1435 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
1436 fc_frame_free(fp);
1437}
1438
1439/*
1440 * Handle error response or timeout for REC exchange.
1441 */
1442static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1443{
1444 int error = PTR_ERR(fp);
1445
1446 if (fc_fcp_lock_pkt(fsp))
1447 goto out;
1448
1449 switch (error) {
1450 case -FC_EX_CLOSED:
1451 fc_fcp_retry_cmd(fsp);
1452 break;
1453
1454 default:
1455 FC_DBG("REC %p fid %x error unexpected error %d\n",
1456 fsp, fsp->rport->port_id, error);
1457 fsp->status_code = FC_CMD_PLOGO;
1458 /* fall through */
1459
1460 case -FC_EX_TIMEOUT:
1461 /*
1462 * Assume REC or LS_ACC was lost.
1463 * The exchange manager will have aborted REC, so retry.
1464 */
1465 FC_DBG("REC fid %x error error %d retry %d/%d\n",
1466 fsp->rport->port_id, error, fsp->recov_retry,
1467 FC_MAX_RECOV_RETRY);
1468 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1469 fc_fcp_rec(fsp);
1470 else
1471 fc_timeout_error(fsp);
1472 break;
1473 }
1474 fc_fcp_unlock_pkt(fsp);
1475out:
1476 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
1477}
1478
1479/*
1480 * Time out error routine:
1481 * abort's the I/O close the exchange and
1482 * send completion notification to scsi layer
1483 */
1484static void fc_timeout_error(struct fc_fcp_pkt *fsp)
1485{
1486 fsp->status_code = FC_CMD_TIME_OUT;
1487 fsp->cdb_status = 0;
1488 fsp->io_status = 0;
1489 /*
1490 * if this fails then we let the scsi command timer fire and
1491 * scsi-ml escalate.
1492 */
1493 fc_fcp_send_abort(fsp);
1494}
1495
1496/*
1497 * Sequence retransmission request.
1498 * This is called after receiving status but insufficient data, or
1499 * when expecting status but the request has timed out.
1500 */
1501static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1502{
1503 struct fc_lport *lp = fsp->lp;
1504 struct fc_rport *rport;
1505 struct fc_rport_libfc_priv *rp;
1506 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
1507 struct fc_seq *seq;
1508 struct fcp_srr *srr;
1509 struct fc_frame *fp;
1510 u8 cdb_op;
1511
1512 rport = fsp->rport;
1513 rp = rport->dd_data;
1514 cdb_op = fsp->cdb_cmd.fc_cdb[0];
1515
1516 if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
1517 goto retry; /* shouldn't happen */
1518 fp = fc_frame_alloc(lp, sizeof(*srr));
1519 if (!fp)
1520 goto retry;
1521
1522 srr = fc_frame_payload_get(fp, sizeof(*srr));
1523 memset(srr, 0, sizeof(*srr));
1524 srr->srr_op = ELS_SRR;
1525 srr->srr_ox_id = htons(ep->oxid);
1526 srr->srr_rx_id = htons(ep->rxid);
1527 srr->srr_r_ctl = r_ctl;
1528 srr->srr_rel_off = htonl(offset);
1529
1530 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
1531 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
1532 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1533
1534 seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
1535 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
1536 if (!seq) {
1537 fc_frame_free(fp);
1538 goto retry;
1539 }
1540 fsp->recov_seq = seq;
1541 fsp->xfer_len = offset;
1542 fsp->xfer_contig_end = offset;
1543 fsp->state &= ~FC_SRB_RCV_STATUS;
1544 fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
1545 return;
1546retry:
1547 fc_fcp_retry_cmd(fsp);
1548}
1549
1550/*
1551 * Handle response from SRR.
1552 */
1553static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1554{
1555 struct fc_fcp_pkt *fsp = arg;
1556 struct fc_frame_header *fh;
1557
1558 if (IS_ERR(fp)) {
1559 fc_fcp_srr_error(fsp, fp);
1560 return;
1561 }
1562
1563 if (fc_fcp_lock_pkt(fsp))
1564 goto out;
1565
1566 fh = fc_frame_header_get(fp);
1567 /*
1568 * BUG? fc_fcp_srr_error calls exch_done which would release
1569 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
1570 * then fc_exch_timeout would be sending an abort. The exch_done
1571 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
1572 * an abort response though.
1573 */
1574 if (fh->fh_type == FC_TYPE_BLS) {
1575 fc_fcp_unlock_pkt(fsp);
1576 return;
1577 }
1578
1579 fsp->recov_seq = NULL;
1580 switch (fc_frame_payload_op(fp)) {
1581 case ELS_LS_ACC:
1582 fsp->recov_retry = 0;
1583 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1584 break;
1585 case ELS_LS_RJT:
1586 default:
1587 fc_timeout_error(fsp);
1588 break;
1589 }
1590 fc_fcp_unlock_pkt(fsp);
1591 fsp->lp->tt.exch_done(seq);
1592out:
1593 fc_frame_free(fp);
1594 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1595}
1596
1597static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1598{
1599 if (fc_fcp_lock_pkt(fsp))
1600 goto out;
1601 fsp->lp->tt.exch_done(fsp->recov_seq);
1602 fsp->recov_seq = NULL;
1603 switch (PTR_ERR(fp)) {
1604 case -FC_EX_TIMEOUT:
1605 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1606 fc_fcp_rec(fsp);
1607 else
1608 fc_timeout_error(fsp);
1609 break;
1610 case -FC_EX_CLOSED: /* e.g., link failure */
1611 /* fall through */
1612 default:
1613 fc_fcp_retry_cmd(fsp);
1614 break;
1615 }
1616 fc_fcp_unlock_pkt(fsp);
1617out:
1618 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1619}
1620
1621static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
1622{
1623 /* lock ? */
1624 return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP);
1625}
1626
1627/**
1628 * fc_queuecommand - The queuecommand function of the scsi template
1629 * @cmd: struct scsi_cmnd to be executed
1630 * @done: Callback function to be called when cmd is completed
1631 *
1632 * this is the i/o strategy routine, called by the scsi layer
1633 * this routine is called with holding the host_lock.
1634 */
1635int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1636{
1637 struct fc_lport *lp;
1638 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1639 struct fc_fcp_pkt *fsp;
1640 struct fc_rport_libfc_priv *rp;
1641 int rval;
1642 int rc = 0;
1643 struct fcoe_dev_stats *stats;
1644
1645 lp = shost_priv(sc_cmd->device->host);
1646
1647 rval = fc_remote_port_chkready(rport);
1648 if (rval) {
1649 sc_cmd->result = rval;
1650 done(sc_cmd);
1651 goto out;
1652 }
1653
1654 if (!*(struct fc_remote_port **)rport->dd_data) {
1655 /*
1656 * rport is transitioning from blocked/deleted to
1657 * online
1658 */
1659 sc_cmd->result = DID_IMM_RETRY << 16;
1660 done(sc_cmd);
1661 goto out;
1662 }
1663
1664 rp = rport->dd_data;
1665
1666 if (!fc_fcp_lport_queue_ready(lp)) {
1667 rc = SCSI_MLQUEUE_HOST_BUSY;
1668 goto out;
1669 }
1670
1671 fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
1672 if (fsp == NULL) {
1673 rc = SCSI_MLQUEUE_HOST_BUSY;
1674 goto out;
1675 }
1676
1677 /*
1678 * build the libfc request pkt
1679 */
1680 fsp->cmd = sc_cmd; /* save the cmd */
1681 fsp->lp = lp; /* save the softc ptr */
1682 fsp->rport = rport; /* set the remote port ptr */
1683 sc_cmd->scsi_done = done;
1684
1685 /*
1686 * set up the transfer length
1687 */
1688 fsp->data_len = scsi_bufflen(sc_cmd);
1689 fsp->xfer_len = 0;
1690
1691 /*
1692 * setup the data direction
1693 */
1694 stats = lp->dev_stats[smp_processor_id()];
1695 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1696 fsp->req_flags = FC_SRB_READ;
1697 stats->InputRequests++;
1698 stats->InputMegabytes = fsp->data_len;
1699 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1700 fsp->req_flags = FC_SRB_WRITE;
1701 stats->OutputRequests++;
1702 stats->OutputMegabytes = fsp->data_len;
1703 } else {
1704 fsp->req_flags = 0;
1705 stats->ControlRequests++;
1706 }
1707
1708 fsp->tgt_flags = rp->flags;
1709
1710 init_timer(&fsp->timer);
1711 fsp->timer.data = (unsigned long)fsp;
1712
1713 /*
1714 * send it to the lower layer
1715 * if we get -1 return then put the request in the pending
1716 * queue.
1717 */
1718 rval = fc_fcp_pkt_send(lp, fsp);
1719 if (rval != 0) {
1720 fsp->state = FC_SRB_FREE;
1721 fc_fcp_pkt_release(fsp);
1722 rc = SCSI_MLQUEUE_HOST_BUSY;
1723 }
1724out:
1725 return rc;
1726}
1727EXPORT_SYMBOL(fc_queuecommand);
1728
1729/**
1730 * fc_io_compl - Handle responses for completed commands
1731 * @fsp: scsi packet
1732 *
1733 * Translates a error to a Linux SCSI error.
1734 *
1735 * The fcp packet lock must be held when calling.
1736 */
1737static void fc_io_compl(struct fc_fcp_pkt *fsp)
1738{
1739 struct fc_fcp_internal *si;
1740 struct scsi_cmnd *sc_cmd;
1741 struct fc_lport *lp;
1742 unsigned long flags;
1743
1744 fsp->state |= FC_SRB_COMPL;
1745 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
1746 spin_unlock_bh(&fsp->scsi_pkt_lock);
1747 del_timer_sync(&fsp->timer);
1748 spin_lock_bh(&fsp->scsi_pkt_lock);
1749 }
1750
1751 lp = fsp->lp;
1752 si = fc_get_scsi_internal(lp);
1753 spin_lock_irqsave(lp->host->host_lock, flags);
1754 if (!fsp->cmd) {
1755 spin_unlock_irqrestore(lp->host->host_lock, flags);
1756 return;
1757 }
1758
1759 /*
1760 * if a command timed out while we had to try and throttle IO
1761 * and it is now getting cleaned up, then we are about to
1762 * try again so clear the throttled flag incase we get more
1763 * time outs.
1764 */
1765 if (si->throttled && fsp->state & FC_SRB_NOMEM)
1766 si->throttled = 0;
1767
1768 sc_cmd = fsp->cmd;
1769 fsp->cmd = NULL;
1770
1771 if (!sc_cmd->SCp.ptr) {
1772 spin_unlock_irqrestore(lp->host->host_lock, flags);
1773 return;
1774 }
1775
1776 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
1777 switch (fsp->status_code) {
1778 case FC_COMPLETE:
1779 if (fsp->cdb_status == 0) {
1780 /*
1781 * good I/O status
1782 */
1783 sc_cmd->result = DID_OK << 16;
1784 if (fsp->scsi_resid)
1785 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1786 } else if (fsp->cdb_status == QUEUE_FULL) {
1787 struct scsi_device *tmp_sdev;
1788 struct scsi_device *sdev = sc_cmd->device;
1789
1790 shost_for_each_device(tmp_sdev, sdev->host) {
1791 if (tmp_sdev->id != sdev->id)
1792 continue;
1793
1794 if (tmp_sdev->queue_depth > 1) {
1795 scsi_track_queue_full(tmp_sdev,
1796 tmp_sdev->
1797 queue_depth - 1);
1798 }
1799 }
1800 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1801 } else {
1802 /*
1803 * transport level I/O was ok but scsi
1804 * has non zero status
1805 */
1806 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1807 }
1808 break;
1809 case FC_ERROR:
1810 sc_cmd->result = DID_ERROR << 16;
1811 break;
1812 case FC_DATA_UNDRUN:
1813 if (fsp->cdb_status == 0) {
1814 /*
1815 * scsi status is good but transport level
1816 * underrun. for read it should be an error??
1817 */
1818 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1819 } else {
1820 /*
1821 * scsi got underrun, this is an error
1822 */
1823 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1824 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1825 }
1826 break;
1827 case FC_DATA_OVRRUN:
1828 /*
1829 * overrun is an error
1830 */
1831 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1832 break;
1833 case FC_CMD_ABORTED:
1834 sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
1835 break;
1836 case FC_CMD_TIME_OUT:
1837 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
1838 break;
1839 case FC_CMD_RESET:
1840 sc_cmd->result = (DID_RESET << 16);
1841 break;
1842 case FC_HRD_ERROR:
1843 sc_cmd->result = (DID_NO_CONNECT << 16);
1844 break;
1845 default:
1846 sc_cmd->result = (DID_ERROR << 16);
1847 break;
1848 }
1849
1850 list_del(&fsp->list);
1851 sc_cmd->SCp.ptr = NULL;
1852 sc_cmd->scsi_done(sc_cmd);
1853 spin_unlock_irqrestore(lp->host->host_lock, flags);
1854
1855 /* release ref from initial allocation in queue command */
1856 fc_fcp_pkt_release(fsp);
1857}
1858
1859/**
1860 * fc_fcp_complete - complete processing of a fcp packet
1861 * @fsp: fcp packet
1862 *
1863 * This function may sleep if a fsp timer is pending.
1864 * The host lock must not be held by caller.
1865 */
1866void fc_fcp_complete(struct fc_fcp_pkt *fsp)
1867{
1868 if (fc_fcp_lock_pkt(fsp))
1869 return;
1870
1871 fc_fcp_complete_locked(fsp);
1872 fc_fcp_unlock_pkt(fsp);
1873}
1874EXPORT_SYMBOL(fc_fcp_complete);
1875
1876/**
1877 * fc_eh_abort - Abort a command...from scsi host template
1878 * @sc_cmd: scsi command to abort
1879 *
1880 * send ABTS to the target device and wait for the response
1881 * sc_cmd is the pointer to the command to be aborted.
1882 */
1883int fc_eh_abort(struct scsi_cmnd *sc_cmd)
1884{
1885 struct fc_fcp_pkt *fsp;
1886 struct fc_lport *lp;
1887 int rc = FAILED;
1888 unsigned long flags;
1889
1890 lp = shost_priv(sc_cmd->device->host);
1891 if (lp->state != LPORT_ST_READY)
1892 return rc;
1893 else if (!(lp->link_status & FC_LINK_UP))
1894 return rc;
1895
1896 spin_lock_irqsave(lp->host->host_lock, flags);
1897 fsp = CMD_SP(sc_cmd);
1898 if (!fsp) {
1899 /* command completed while scsi eh was setting up */
1900 spin_unlock_irqrestore(lp->host->host_lock, flags);
1901 return SUCCESS;
1902 }
1903 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
1904 fc_fcp_pkt_hold(fsp);
1905 spin_unlock_irqrestore(lp->host->host_lock, flags);
1906
1907 if (fc_fcp_lock_pkt(fsp)) {
1908 /* completed while we were waiting for timer to be deleted */
1909 rc = SUCCESS;
1910 goto release_pkt;
1911 }
1912
1913 rc = fc_fcp_pkt_abort(lp, fsp);
1914 fc_fcp_unlock_pkt(fsp);
1915
1916release_pkt:
1917 fc_fcp_pkt_release(fsp);
1918 return rc;
1919}
1920EXPORT_SYMBOL(fc_eh_abort);
1921
1922/**
1923 * fc_eh_device_reset: Reset a single LUN
1924 * @sc_cmd: scsi command
1925 *
1926 * Set from scsi host template to send tm cmd to the target and wait for the
1927 * response.
1928 */
1929int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1930{
1931 struct fc_lport *lp;
1932 struct fc_fcp_pkt *fsp;
1933 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1934 int rc = FAILED;
1935 struct fc_rport_libfc_priv *rp;
1936 int rval;
1937
1938 rval = fc_remote_port_chkready(rport);
1939 if (rval)
1940 goto out;
1941
1942 rp = rport->dd_data;
1943 lp = shost_priv(sc_cmd->device->host);
1944
1945 if (lp->state != LPORT_ST_READY)
1946 return rc;
1947
1948 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
1949 if (fsp == NULL) {
1950 FC_DBG("could not allocate scsi_pkt\n");
1951 sc_cmd->result = DID_NO_CONNECT << 16;
1952 goto out;
1953 }
1954
1955 /*
1956 * Build the libfc request pkt. Do not set the scsi cmnd, because
1957 * the sc passed in is not setup for execution like when sent
1958 * through the queuecommand callout.
1959 */
1960 fsp->lp = lp; /* save the softc ptr */
1961 fsp->rport = rport; /* set the remote port ptr */
1962
1963 /*
1964 * flush outstanding commands
1965 */
1966 rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
1967 fsp->state = FC_SRB_FREE;
1968 fc_fcp_pkt_release(fsp);
1969
1970out:
1971 return rc;
1972}
1973EXPORT_SYMBOL(fc_eh_device_reset);
1974
1975/**
1976 * fc_eh_host_reset - The reset function will reset the ports on the host.
1977 * @sc_cmd: scsi command
1978 */
1979int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
1980{
1981 struct Scsi_Host *shost = sc_cmd->device->host;
1982 struct fc_lport *lp = shost_priv(shost);
1983 unsigned long wait_tmo;
1984
1985 lp->tt.lport_reset(lp);
1986 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
1987 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
1988 msleep(1000);
1989
1990 if (fc_fcp_lport_queue_ready(lp)) {
1991 shost_printk(KERN_INFO, shost, "Host reset succeeded.\n");
1992 return SUCCESS;
1993 } else {
1994 shost_printk(KERN_INFO, shost, "Host reset failed. "
1995 "lport not ready.\n");
1996 return FAILED;
1997 }
1998}
1999EXPORT_SYMBOL(fc_eh_host_reset);
2000
2001/**
2002 * fc_slave_alloc - configure queue depth
2003 * @sdev: scsi device
2004 *
2005 * Configures queue depth based on host's cmd_per_len. If not set
2006 * then we use the libfc default.
2007 */
2008int fc_slave_alloc(struct scsi_device *sdev)
2009{
2010 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2011 int queue_depth;
2012
2013 if (!rport || fc_remote_port_chkready(rport))
2014 return -ENXIO;
2015
2016 if (sdev->tagged_supported) {
2017 if (sdev->host->hostt->cmd_per_lun)
2018 queue_depth = sdev->host->hostt->cmd_per_lun;
2019 else
2020 queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
2021 scsi_activate_tcq(sdev, queue_depth);
2022 }
2023 return 0;
2024}
2025EXPORT_SYMBOL(fc_slave_alloc);
2026
2027int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
2028{
2029 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2030 return sdev->queue_depth;
2031}
2032EXPORT_SYMBOL(fc_change_queue_depth);
2033
2034int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
2035{
2036 if (sdev->tagged_supported) {
2037 scsi_set_tag_type(sdev, tag_type);
2038 if (tag_type)
2039 scsi_activate_tcq(sdev, sdev->queue_depth);
2040 else
2041 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2042 } else
2043 tag_type = 0;
2044
2045 return tag_type;
2046}
2047EXPORT_SYMBOL(fc_change_queue_type);
2048
2049void fc_fcp_destroy(struct fc_lport *lp)
2050{
2051 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
2052
2053 if (!list_empty(&si->scsi_pkt_queue))
2054 printk(KERN_ERR "Leaked scsi packets.\n");
2055
2056 mempool_destroy(si->scsi_pkt_pool);
2057 kfree(si);
2058 lp->scsi_priv = NULL;
2059}
2060EXPORT_SYMBOL(fc_fcp_destroy);
2061
2062int fc_fcp_init(struct fc_lport *lp)
2063{
2064 int rc;
2065 struct fc_fcp_internal *si;
2066
2067 if (!lp->tt.fcp_cmd_send)
2068 lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
2069
2070 if (!lp->tt.fcp_cleanup)
2071 lp->tt.fcp_cleanup = fc_fcp_cleanup;
2072
2073 if (!lp->tt.fcp_abort_io)
2074 lp->tt.fcp_abort_io = fc_fcp_abort_io;
2075
2076 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
2077 if (!si)
2078 return -ENOMEM;
2079 lp->scsi_priv = si;
2080 INIT_LIST_HEAD(&si->scsi_pkt_queue);
2081
2082 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
2083 if (!si->scsi_pkt_pool) {
2084 rc = -ENOMEM;
2085 goto free_internal;
2086 }
2087 return 0;
2088
2089free_internal:
2090 kfree(si);
2091 return rc;
2092}
2093EXPORT_SYMBOL(fc_fcp_init);
2094
2095static int __init libfc_init(void)
2096{
2097 int rc;
2098
2099 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
2100 sizeof(struct fc_fcp_pkt),
2101 0, SLAB_HWCACHE_ALIGN, NULL);
2102 if (scsi_pkt_cachep == NULL) {
2103 FC_DBG("Unable to allocate SRB cache...module load failed!");
2104 return -ENOMEM;
2105 }
2106
2107 rc = fc_setup_exch_mgr();
2108 if (rc)
2109 goto destroy_pkt_cache;
2110
2111 rc = fc_setup_rport();
2112 if (rc)
2113 goto destroy_em;
2114
2115 return rc;
2116destroy_em:
2117 fc_destroy_exch_mgr();
2118destroy_pkt_cache:
2119 kmem_cache_destroy(scsi_pkt_cachep);
2120 return rc;
2121}
2122
2123static void __exit libfc_exit(void)
2124{
2125 kmem_cache_destroy(scsi_pkt_cachep);
2126 fc_destroy_exch_mgr();
2127 fc_destroy_rport();
2128}
2129
2130module_init(libfc_init);
2131module_exit(libfc_exit);
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
new file mode 100644
index 000000000000..63fe00cfe667
--- /dev/null
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * Frame allocation.
22 */
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/skbuff.h>
26#include <linux/crc32.h>
27
28#include <scsi/fc_frame.h>
29
30/*
31 * Check the CRC in a frame.
32 */
33u32 fc_frame_crc_check(struct fc_frame *fp)
34{
35 u32 crc;
36 u32 error;
37 const u8 *bp;
38 unsigned int len;
39
40 WARN_ON(!fc_frame_is_linear(fp));
41 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
42 len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */
43 bp = (const u8 *) fr_hdr(fp);
44 crc = ~crc32(~0, bp, len);
45 error = crc ^ fr_crc(fp);
46 return error;
47}
48EXPORT_SYMBOL(fc_frame_crc_check);
49
50/*
51 * Allocate a frame intended to be sent via fcoe_xmit.
52 * Get an sk_buff for the frame and set the length.
53 */
54struct fc_frame *__fc_frame_alloc(size_t len)
55{
56 struct fc_frame *fp;
57 struct sk_buff *skb;
58
59 WARN_ON((len % sizeof(u32)) != 0);
60 len += sizeof(struct fc_frame_header);
61 skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM);
62 if (!skb)
63 return NULL;
64 fp = (struct fc_frame *) skb;
65 fc_frame_init(fp);
66 skb_reserve(skb, FC_FRAME_HEADROOM);
67 skb_put(skb, len);
68 return fp;
69}
70EXPORT_SYMBOL(__fc_frame_alloc);
71
72
73struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
74{
75 struct fc_frame *fp;
76 size_t fill;
77
78 fill = payload_len % 4;
79 if (fill != 0)
80 fill = 4 - fill;
81 fp = __fc_frame_alloc(payload_len + fill);
82 if (fp) {
83 memset((char *) fr_hdr(fp) + payload_len, 0, fill);
84 /* trim is OK, we just allocated it so there are no fragments */
85 skb_trim(fp_skb(fp),
86 payload_len + sizeof(struct fc_frame_header));
87 }
88 return fp;
89}
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
new file mode 100644
index 000000000000..0b9bdb1fb807
--- /dev/null
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -0,0 +1,1604 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HEIRARCHY
36 *
37 * The following heirarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the heirarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler becuase no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, succesful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64/*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90#include <linux/timer.h>
91#include <asm/unaligned.h>
92
93#include <scsi/fc/fc_gs.h>
94
95#include <scsi/libfc.h>
96#include <scsi/fc_encode.h>
97
98/* Fabric IDs to use for point-to-point mode, chosen on whims. */
99#define FC_LOCAL_PTP_FID_LO 0x010101
100#define FC_LOCAL_PTP_FID_HI 0x010102
101
102#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
103
104static int fc_lport_debug;
105
106#define FC_DEBUG_LPORT(fmt...) \
107 do { \
108 if (fc_lport_debug) \
109 FC_DBG(fmt); \
110 } while (0)
111
112static void fc_lport_error(struct fc_lport *, struct fc_frame *);
113
114static void fc_lport_enter_reset(struct fc_lport *);
115static void fc_lport_enter_flogi(struct fc_lport *);
116static void fc_lport_enter_dns(struct fc_lport *);
117static void fc_lport_enter_rpn_id(struct fc_lport *);
118static void fc_lport_enter_rft_id(struct fc_lport *);
119static void fc_lport_enter_scr(struct fc_lport *);
120static void fc_lport_enter_ready(struct fc_lport *);
121static void fc_lport_enter_logo(struct fc_lport *);
122
123static const char *fc_lport_state_names[] = {
124 [LPORT_ST_NONE] = "none",
125 [LPORT_ST_FLOGI] = "FLOGI",
126 [LPORT_ST_DNS] = "dNS",
127 [LPORT_ST_RPN_ID] = "RPN_ID",
128 [LPORT_ST_RFT_ID] = "RFT_ID",
129 [LPORT_ST_SCR] = "SCR",
130 [LPORT_ST_READY] = "Ready",
131 [LPORT_ST_LOGO] = "LOGO",
132 [LPORT_ST_RESET] = "reset",
133};
134
135static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
136{
137 fc_frame_free(fp);
138 return 0;
139}
140
141/**
142 * fc_lport_rport_callback - Event handler for rport events
143 * @lport: The lport which is receiving the event
144 * @rport: The rport which the event has occured on
145 * @event: The event that occured
146 *
147 * Locking Note: The rport lock should not be held when calling
148 * this function.
149 */
150static void fc_lport_rport_callback(struct fc_lport *lport,
151 struct fc_rport *rport,
152 enum fc_rport_event event)
153{
154 FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
155 rport->port_id);
156
157 switch (event) {
158 case RPORT_EV_CREATED:
159 if (rport->port_id == FC_FID_DIR_SERV) {
160 mutex_lock(&lport->lp_mutex);
161 if (lport->state == LPORT_ST_DNS) {
162 lport->dns_rp = rport;
163 fc_lport_enter_rpn_id(lport);
164 } else {
165 FC_DEBUG_LPORT("Received an CREATED event on "
166 "port (%6x) for the directory "
167 "server, but the lport is not "
168 "in the DNS state, it's in the "
169 "%d state", rport->port_id,
170 lport->state);
171 lport->tt.rport_logoff(rport);
172 }
173 mutex_unlock(&lport->lp_mutex);
174 } else
175 FC_DEBUG_LPORT("Received an event for port (%6x) "
176 "which is not the directory server\n",
177 rport->port_id);
178 break;
179 case RPORT_EV_LOGO:
180 case RPORT_EV_FAILED:
181 case RPORT_EV_STOP:
182 if (rport->port_id == FC_FID_DIR_SERV) {
183 mutex_lock(&lport->lp_mutex);
184 lport->dns_rp = NULL;
185 mutex_unlock(&lport->lp_mutex);
186
187 } else
188 FC_DEBUG_LPORT("Received an event for port (%6x) "
189 "which is not the directory server\n",
190 rport->port_id);
191 break;
192 case RPORT_EV_NONE:
193 break;
194 }
195}
196
197/**
198 * fc_lport_state - Return a string which represents the lport's state
199 * @lport: The lport whose state is to converted to a string
200 */
201static const char *fc_lport_state(struct fc_lport *lport)
202{
203 const char *cp;
204
205 cp = fc_lport_state_names[lport->state];
206 if (!cp)
207 cp = "unknown";
208 return cp;
209}
210
211/**
212 * fc_lport_ptp_setup - Create an rport for point-to-point mode
213 * @lport: The lport to attach the ptp rport to
214 * @fid: The FID of the ptp rport
215 * @remote_wwpn: The WWPN of the ptp rport
216 * @remote_wwnn: The WWNN of the ptp rport
217 */
218static void fc_lport_ptp_setup(struct fc_lport *lport,
219 u32 remote_fid, u64 remote_wwpn,
220 u64 remote_wwnn)
221{
222 struct fc_disc_port dp;
223
224 dp.lp = lport;
225 dp.ids.port_id = remote_fid;
226 dp.ids.port_name = remote_wwpn;
227 dp.ids.node_name = remote_wwnn;
228 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
229
230 if (lport->ptp_rp) {
231 lport->tt.rport_logoff(lport->ptp_rp);
232 lport->ptp_rp = NULL;
233 }
234
235 lport->ptp_rp = fc_rport_rogue_create(&dp);
236
237 lport->tt.rport_login(lport->ptp_rp);
238
239 fc_lport_enter_ready(lport);
240}
241
242void fc_get_host_port_type(struct Scsi_Host *shost)
243{
244 /* TODO - currently just NPORT */
245 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
246}
247EXPORT_SYMBOL(fc_get_host_port_type);
248
249void fc_get_host_port_state(struct Scsi_Host *shost)
250{
251 struct fc_lport *lp = shost_priv(shost);
252
253 if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
254 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
255 else
256 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
257}
258EXPORT_SYMBOL(fc_get_host_port_state);
259
260void fc_get_host_speed(struct Scsi_Host *shost)
261{
262 struct fc_lport *lport = shost_priv(shost);
263
264 fc_host_speed(shost) = lport->link_speed;
265}
266EXPORT_SYMBOL(fc_get_host_speed);
267
268struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
269{
270 int i;
271 struct fc_host_statistics *fcoe_stats;
272 struct fc_lport *lp = shost_priv(shost);
273 struct timespec v0, v1;
274
275 fcoe_stats = &lp->host_stats;
276 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
277
278 jiffies_to_timespec(jiffies, &v0);
279 jiffies_to_timespec(lp->boot_time, &v1);
280 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
281
282 for_each_online_cpu(i) {
283 struct fcoe_dev_stats *stats = lp->dev_stats[i];
284 if (stats == NULL)
285 continue;
286 fcoe_stats->tx_frames += stats->TxFrames;
287 fcoe_stats->tx_words += stats->TxWords;
288 fcoe_stats->rx_frames += stats->RxFrames;
289 fcoe_stats->rx_words += stats->RxWords;
290 fcoe_stats->error_frames += stats->ErrorFrames;
291 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
292 fcoe_stats->fcp_input_requests += stats->InputRequests;
293 fcoe_stats->fcp_output_requests += stats->OutputRequests;
294 fcoe_stats->fcp_control_requests += stats->ControlRequests;
295 fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
296 fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
297 fcoe_stats->link_failure_count += stats->LinkFailureCount;
298 }
299 fcoe_stats->lip_count = -1;
300 fcoe_stats->nos_count = -1;
301 fcoe_stats->loss_of_sync_count = -1;
302 fcoe_stats->loss_of_signal_count = -1;
303 fcoe_stats->prim_seq_protocol_err_count = -1;
304 fcoe_stats->dumped_frames = -1;
305 return fcoe_stats;
306}
307EXPORT_SYMBOL(fc_get_host_stats);
308
309/*
310 * Fill in FLOGI command for request.
311 */
312static void
313fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
314 unsigned int op)
315{
316 struct fc_els_csp *sp;
317 struct fc_els_cssp *cp;
318
319 memset(flogi, 0, sizeof(*flogi));
320 flogi->fl_cmd = (u8) op;
321 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
322 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
323 sp = &flogi->fl_csp;
324 sp->sp_hi_ver = 0x20;
325 sp->sp_lo_ver = 0x20;
326 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
327 sp->sp_bb_data = htons((u16) lport->mfs);
328 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
329 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
330 if (op != ELS_FLOGI) {
331 sp->sp_features = htons(FC_SP_FT_CIRO);
332 sp->sp_tot_seq = htons(255); /* seq. we accept */
333 sp->sp_rel_off = htons(0x1f);
334 sp->sp_e_d_tov = htonl(lport->e_d_tov);
335
336 cp->cp_rdfs = htons((u16) lport->mfs);
337 cp->cp_con_seq = htons(255);
338 cp->cp_open_seq = 1;
339 }
340}
341
342/*
343 * Add a supported FC-4 type.
344 */
345static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
346{
347 __be32 *mp;
348
349 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
350 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
351}
352
353/**
354 * fc_lport_recv_rlir_req - Handle received Registered Link Incident Report.
355 * @lport: Fibre Channel local port recieving the RLIR
356 * @sp: current sequence in the RLIR exchange
357 * @fp: RLIR request frame
358 *
359 * Locking Note: The lport lock is exected to be held before calling
360 * this function.
361 */
362static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
363 struct fc_lport *lport)
364{
365 FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
366 fc_lport_state(lport));
367
368 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
369 fc_frame_free(fp);
370}
371
372/**
373 * fc_lport_recv_echo_req - Handle received ECHO request
374 * @lport: Fibre Channel local port recieving the ECHO
375 * @sp: current sequence in the ECHO exchange
376 * @fp: ECHO request frame
377 *
378 * Locking Note: The lport lock is exected to be held before calling
379 * this function.
380 */
381static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
382 struct fc_lport *lport)
383{
384 struct fc_frame *fp;
385 struct fc_exch *ep = fc_seq_exch(sp);
386 unsigned int len;
387 void *pp;
388 void *dp;
389 u32 f_ctl;
390
391 FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
392 fc_lport_state(lport));
393
394 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
395 pp = fc_frame_payload_get(in_fp, len);
396
397 if (len < sizeof(__be32))
398 len = sizeof(__be32);
399
400 fp = fc_frame_alloc(lport, len);
401 if (fp) {
402 dp = fc_frame_payload_get(fp, len);
403 memcpy(dp, pp, len);
404 *((u32 *)dp) = htonl(ELS_LS_ACC << 24);
405 sp = lport->tt.seq_start_next(sp);
406 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
407 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
408 FC_TYPE_ELS, f_ctl, 0);
409 lport->tt.seq_send(lport, sp, fp);
410 }
411 fc_frame_free(in_fp);
412}
413
414/**
415 * fc_lport_recv_echo_req - Handle received Request Node ID data request
416 * @lport: Fibre Channel local port recieving the RNID
417 * @sp: current sequence in the RNID exchange
418 * @fp: RNID request frame
419 *
420 * Locking Note: The lport lock is exected to be held before calling
421 * this function.
422 */
423static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
424 struct fc_lport *lport)
425{
426 struct fc_frame *fp;
427 struct fc_exch *ep = fc_seq_exch(sp);
428 struct fc_els_rnid *req;
429 struct {
430 struct fc_els_rnid_resp rnid;
431 struct fc_els_rnid_cid cid;
432 struct fc_els_rnid_gen gen;
433 } *rp;
434 struct fc_seq_els_data rjt_data;
435 u8 fmt;
436 size_t len;
437 u32 f_ctl;
438
439 FC_DEBUG_LPORT("Received RNID request while in state %s\n",
440 fc_lport_state(lport));
441
442 req = fc_frame_payload_get(in_fp, sizeof(*req));
443 if (!req) {
444 rjt_data.fp = NULL;
445 rjt_data.reason = ELS_RJT_LOGIC;
446 rjt_data.explan = ELS_EXPL_NONE;
447 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
448 } else {
449 fmt = req->rnid_fmt;
450 len = sizeof(*rp);
451 if (fmt != ELS_RNIDF_GEN ||
452 ntohl(lport->rnid_gen.rnid_atype) == 0) {
453 fmt = ELS_RNIDF_NONE; /* nothing to provide */
454 len -= sizeof(rp->gen);
455 }
456 fp = fc_frame_alloc(lport, len);
457 if (fp) {
458 rp = fc_frame_payload_get(fp, len);
459 memset(rp, 0, len);
460 rp->rnid.rnid_cmd = ELS_LS_ACC;
461 rp->rnid.rnid_fmt = fmt;
462 rp->rnid.rnid_cid_len = sizeof(rp->cid);
463 rp->cid.rnid_wwpn = htonll(lport->wwpn);
464 rp->cid.rnid_wwnn = htonll(lport->wwnn);
465 if (fmt == ELS_RNIDF_GEN) {
466 rp->rnid.rnid_sid_len = sizeof(rp->gen);
467 memcpy(&rp->gen, &lport->rnid_gen,
468 sizeof(rp->gen));
469 }
470 sp = lport->tt.seq_start_next(sp);
471 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
472 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
473 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
474 FC_TYPE_ELS, f_ctl, 0);
475 lport->tt.seq_send(lport, sp, fp);
476 }
477 }
478 fc_frame_free(in_fp);
479}
480
481/**
482 * fc_lport_recv_adisc_req - Handle received Address Discovery Request
483 * @lport: Fibre Channel local port recieving the ADISC
484 * @sp: current sequence in the ADISC exchange
485 * @fp: ADISC request frame
486 *
487 * Locking Note: The lport lock is expected to be held before calling
488 * this function.
489 */
490static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
491 struct fc_lport *lport)
492{
493 struct fc_frame *fp;
494 struct fc_exch *ep = fc_seq_exch(sp);
495 struct fc_els_adisc *req, *rp;
496 struct fc_seq_els_data rjt_data;
497 size_t len;
498 u32 f_ctl;
499
500 FC_DEBUG_LPORT("Received ADISC request while in state %s\n",
501 fc_lport_state(lport));
502
503 req = fc_frame_payload_get(in_fp, sizeof(*req));
504 if (!req) {
505 rjt_data.fp = NULL;
506 rjt_data.reason = ELS_RJT_LOGIC;
507 rjt_data.explan = ELS_EXPL_NONE;
508 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
509 } else {
510 len = sizeof(*rp);
511 fp = fc_frame_alloc(lport, len);
512 if (fp) {
513 rp = fc_frame_payload_get(fp, len);
514 memset(rp, 0, len);
515 rp->adisc_cmd = ELS_LS_ACC;
516 rp->adisc_wwpn = htonll(lport->wwpn);
517 rp->adisc_wwnn = htonll(lport->wwnn);
518 hton24(rp->adisc_port_id,
519 fc_host_port_id(lport->host));
520 sp = lport->tt.seq_start_next(sp);
521 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
522 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
523 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
524 FC_TYPE_ELS, f_ctl, 0);
525 lport->tt.seq_send(lport, sp, fp);
526 }
527 }
528 fc_frame_free(in_fp);
529}
530
531/**
532 * fc_lport_recv_logo_req - Handle received fabric LOGO request
533 * @lport: Fibre Channel local port recieving the LOGO
534 * @sp: current sequence in the LOGO exchange
535 * @fp: LOGO request frame
536 *
537 * Locking Note: The lport lock is exected to be held before calling
538 * this function.
539 */
540static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
541 struct fc_lport *lport)
542{
543 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
544 fc_lport_enter_reset(lport);
545 fc_frame_free(fp);
546}
547
548/**
549 * fc_fabric_login - Start the lport state machine
550 * @lport: The lport that should log into the fabric
551 *
552 * Locking Note: This function should not be called
553 * with the lport lock held.
554 */
555int fc_fabric_login(struct fc_lport *lport)
556{
557 int rc = -1;
558
559 mutex_lock(&lport->lp_mutex);
560 if (lport->state == LPORT_ST_NONE) {
561 fc_lport_enter_reset(lport);
562 rc = 0;
563 }
564 mutex_unlock(&lport->lp_mutex);
565
566 return rc;
567}
568EXPORT_SYMBOL(fc_fabric_login);
569
570/**
571 * fc_linkup - Handler for transport linkup events
572 * @lport: The lport whose link is up
573 */
574void fc_linkup(struct fc_lport *lport)
575{
576 FC_DEBUG_LPORT("Link is up for port (%6x)\n",
577 fc_host_port_id(lport->host));
578
579 mutex_lock(&lport->lp_mutex);
580 if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) {
581 lport->link_status |= FC_LINK_UP;
582
583 if (lport->state == LPORT_ST_RESET)
584 fc_lport_enter_flogi(lport);
585 }
586 mutex_unlock(&lport->lp_mutex);
587}
588EXPORT_SYMBOL(fc_linkup);
589
590/**
591 * fc_linkdown - Handler for transport linkdown events
592 * @lport: The lport whose link is down
593 */
594void fc_linkdown(struct fc_lport *lport)
595{
596 mutex_lock(&lport->lp_mutex);
597 FC_DEBUG_LPORT("Link is down for port (%6x)\n",
598 fc_host_port_id(lport->host));
599
600 if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) {
601 lport->link_status &= ~(FC_LINK_UP);
602 fc_lport_enter_reset(lport);
603 lport->tt.fcp_cleanup(lport);
604 }
605 mutex_unlock(&lport->lp_mutex);
606}
607EXPORT_SYMBOL(fc_linkdown);
608
609/**
610 * fc_pause - Pause the flow of frames
611 * @lport: The lport to be paused
612 */
613void fc_pause(struct fc_lport *lport)
614{
615 mutex_lock(&lport->lp_mutex);
616 lport->link_status |= FC_PAUSE;
617 mutex_unlock(&lport->lp_mutex);
618}
619EXPORT_SYMBOL(fc_pause);
620
621/**
622 * fc_unpause - Unpause the flow of frames
623 * @lport: The lport to be unpaused
624 */
625void fc_unpause(struct fc_lport *lport)
626{
627 mutex_lock(&lport->lp_mutex);
628 lport->link_status &= ~(FC_PAUSE);
629 mutex_unlock(&lport->lp_mutex);
630}
631EXPORT_SYMBOL(fc_unpause);
632
633/**
634 * fc_fabric_logoff - Logout of the fabric
635 * @lport: fc_lport pointer to logoff the fabric
636 *
637 * Return value:
638 * 0 for success, -1 for failure
639 **/
640int fc_fabric_logoff(struct fc_lport *lport)
641{
642 lport->tt.disc_stop_final(lport);
643 mutex_lock(&lport->lp_mutex);
644 fc_lport_enter_logo(lport);
645 mutex_unlock(&lport->lp_mutex);
646 return 0;
647}
648EXPORT_SYMBOL(fc_fabric_logoff);
649
650/**
651 * fc_lport_destroy - unregister a fc_lport
652 * @lport: fc_lport pointer to unregister
653 *
654 * Return value:
655 * None
656 * Note:
657 * exit routine for fc_lport instance
658 * clean-up all the allocated memory
659 * and free up other system resources.
660 *
661 **/
662int fc_lport_destroy(struct fc_lport *lport)
663{
664 lport->tt.frame_send = fc_frame_drop;
665 lport->tt.fcp_abort_io(lport);
666 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
667 return 0;
668}
669EXPORT_SYMBOL(fc_lport_destroy);
670
671/**
672 * fc_set_mfs - sets up the mfs for the corresponding fc_lport
673 * @lport: fc_lport pointer to unregister
674 * @mfs: the new mfs for fc_lport
675 *
676 * Set mfs for the given fc_lport to the new mfs.
677 *
678 * Return: 0 for success
679 *
680 **/
681int fc_set_mfs(struct fc_lport *lport, u32 mfs)
682{
683 unsigned int old_mfs;
684 int rc = -EINVAL;
685
686 mutex_lock(&lport->lp_mutex);
687
688 old_mfs = lport->mfs;
689
690 if (mfs >= FC_MIN_MAX_FRAME) {
691 mfs &= ~3;
692 if (mfs > FC_MAX_FRAME)
693 mfs = FC_MAX_FRAME;
694 mfs -= sizeof(struct fc_frame_header);
695 lport->mfs = mfs;
696 rc = 0;
697 }
698
699 if (!rc && mfs < old_mfs)
700 fc_lport_enter_reset(lport);
701
702 mutex_unlock(&lport->lp_mutex);
703
704 return rc;
705}
706EXPORT_SYMBOL(fc_set_mfs);
707
708/**
709 * fc_lport_disc_callback - Callback for discovery events
710 * @lport: FC local port
711 * @event: The discovery event
712 */
713void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
714{
715 switch (event) {
716 case DISC_EV_SUCCESS:
717 FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n",
718 fc_host_port_id(lport->host));
719 break;
720 case DISC_EV_FAILED:
721 FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n",
722 fc_host_port_id(lport->host));
723 mutex_lock(&lport->lp_mutex);
724 fc_lport_enter_reset(lport);
725 mutex_unlock(&lport->lp_mutex);
726 break;
727 case DISC_EV_NONE:
728 WARN_ON(1);
729 break;
730 }
731}
732
733/**
734 * fc_rport_enter_ready - Enter the ready state and start discovery
735 * @lport: Fibre Channel local port that is ready
736 *
737 * Locking Note: The lport lock is expected to be held before calling
738 * this routine.
739 */
740static void fc_lport_enter_ready(struct fc_lport *lport)
741{
742 FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n",
743 fc_host_port_id(lport->host), fc_lport_state(lport));
744
745 fc_lport_state_enter(lport, LPORT_ST_READY);
746
747 lport->tt.disc_start(fc_lport_disc_callback, lport);
748}
749
750/**
751 * fc_lport_recv_flogi_req - Receive a FLOGI request
752 * @sp_in: The sequence the FLOGI is on
753 * @rx_fp: The frame the FLOGI is in
754 * @lport: The lport that recieved the request
755 *
756 * A received FLOGI request indicates a point-to-point connection.
757 * Accept it with the common service parameters indicating our N port.
758 * Set up to do a PLOGI if we have the higher-number WWPN.
759 *
760 * Locking Note: The lport lock is exected to be held before calling
761 * this function.
762 */
763static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
764 struct fc_frame *rx_fp,
765 struct fc_lport *lport)
766{
767 struct fc_frame *fp;
768 struct fc_frame_header *fh;
769 struct fc_seq *sp;
770 struct fc_exch *ep;
771 struct fc_els_flogi *flp;
772 struct fc_els_flogi *new_flp;
773 u64 remote_wwpn;
774 u32 remote_fid;
775 u32 local_fid;
776 u32 f_ctl;
777
778 FC_DEBUG_LPORT("Received FLOGI request while in state %s\n",
779 fc_lport_state(lport));
780
781 fh = fc_frame_header_get(rx_fp);
782 remote_fid = ntoh24(fh->fh_s_id);
783 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
784 if (!flp)
785 goto out;
786 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
787 if (remote_wwpn == lport->wwpn) {
788 FC_DBG("FLOGI from port with same WWPN %llx "
789 "possible configuration error\n", remote_wwpn);
790 goto out;
791 }
792 FC_DBG("FLOGI from port WWPN %llx\n", remote_wwpn);
793
794 /*
795 * XXX what is the right thing to do for FIDs?
796 * The originator might expect our S_ID to be 0xfffffe.
797 * But if so, both of us could end up with the same FID.
798 */
799 local_fid = FC_LOCAL_PTP_FID_LO;
800 if (remote_wwpn < lport->wwpn) {
801 local_fid = FC_LOCAL_PTP_FID_HI;
802 if (!remote_fid || remote_fid == local_fid)
803 remote_fid = FC_LOCAL_PTP_FID_LO;
804 } else if (!remote_fid) {
805 remote_fid = FC_LOCAL_PTP_FID_HI;
806 }
807
808 fc_host_port_id(lport->host) = local_fid;
809
810 fp = fc_frame_alloc(lport, sizeof(*flp));
811 if (fp) {
812 sp = lport->tt.seq_start_next(fr_seq(rx_fp));
813 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
814 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
815 new_flp->fl_cmd = (u8) ELS_LS_ACC;
816
817 /*
818 * Send the response. If this fails, the originator should
819 * repeat the sequence.
820 */
821 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
822 ep = fc_seq_exch(sp);
823 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
824 FC_TYPE_ELS, f_ctl, 0);
825 lport->tt.seq_send(lport, sp, fp);
826
827 } else {
828 fc_lport_error(lport, fp);
829 }
830 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
831 get_unaligned_be64(&flp->fl_wwnn));
832
833 lport->tt.disc_start(fc_lport_disc_callback, lport);
834
835out:
836 sp = fr_seq(rx_fp);
837 fc_frame_free(rx_fp);
838}
839
840/**
841 * fc_lport_recv_req - The generic lport request handler
842 * @lport: The lport that received the request
843 * @sp: The sequence the request is on
844 * @fp: The frame the request is in
845 *
846 * This function will see if the lport handles the request or
847 * if an rport should handle the request.
848 *
849 * Locking Note: This function should not be called with the lport
850 * lock held becuase it will grab the lock.
851 */
852static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
853 struct fc_frame *fp)
854{
855 struct fc_frame_header *fh = fc_frame_header_get(fp);
856 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
857 struct fc_rport *rport;
858 u32 s_id;
859 u32 d_id;
860 struct fc_seq_els_data rjt_data;
861
862 mutex_lock(&lport->lp_mutex);
863
864 /*
865 * Handle special ELS cases like FLOGI, LOGO, and
866 * RSCN here. These don't require a session.
867 * Even if we had a session, it might not be ready.
868 */
869 if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
870 /*
871 * Check opcode.
872 */
873 recv = NULL;
874 switch (fc_frame_payload_op(fp)) {
875 case ELS_FLOGI:
876 recv = fc_lport_recv_flogi_req;
877 break;
878 case ELS_LOGO:
879 fh = fc_frame_header_get(fp);
880 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
881 recv = fc_lport_recv_logo_req;
882 break;
883 case ELS_RSCN:
884 recv = lport->tt.disc_recv_req;
885 break;
886 case ELS_ECHO:
887 recv = fc_lport_recv_echo_req;
888 break;
889 case ELS_RLIR:
890 recv = fc_lport_recv_rlir_req;
891 break;
892 case ELS_RNID:
893 recv = fc_lport_recv_rnid_req;
894 break;
895 case ELS_ADISC:
896 recv = fc_lport_recv_adisc_req;
897 break;
898 }
899
900 if (recv)
901 recv(sp, fp, lport);
902 else {
903 /*
904 * Find session.
905 * If this is a new incoming PLOGI, we won't find it.
906 */
907 s_id = ntoh24(fh->fh_s_id);
908 d_id = ntoh24(fh->fh_d_id);
909
910 rport = lport->tt.rport_lookup(lport, s_id);
911 if (rport)
912 lport->tt.rport_recv_req(sp, fp, rport);
913 else {
914 rjt_data.fp = NULL;
915 rjt_data.reason = ELS_RJT_UNAB;
916 rjt_data.explan = ELS_EXPL_NONE;
917 lport->tt.seq_els_rsp_send(sp,
918 ELS_LS_RJT,
919 &rjt_data);
920 fc_frame_free(fp);
921 }
922 }
923 } else {
924 FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp));
925 fc_frame_free(fp);
926 }
927 mutex_unlock(&lport->lp_mutex);
928
929 /*
930 * The common exch_done for all request may not be good
931 * if any request requires longer hold on exhange. XXX
932 */
933 lport->tt.exch_done(sp);
934}
935
936/**
937 * fc_lport_reset - Reset an lport
938 * @lport: The lport which should be reset
939 *
940 * Locking Note: This functions should not be called with the
941 * lport lock held.
942 */
943int fc_lport_reset(struct fc_lport *lport)
944{
945 mutex_lock(&lport->lp_mutex);
946 fc_lport_enter_reset(lport);
947 mutex_unlock(&lport->lp_mutex);
948 return 0;
949}
950EXPORT_SYMBOL(fc_lport_reset);
951
952/**
953 * fc_rport_enter_reset - Reset the local port
954 * @lport: Fibre Channel local port to be reset
955 *
956 * Locking Note: The lport lock is expected to be held before calling
957 * this routine.
958 */
959static void fc_lport_enter_reset(struct fc_lport *lport)
960{
961 FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n",
962 fc_host_port_id(lport->host), fc_lport_state(lport));
963
964 fc_lport_state_enter(lport, LPORT_ST_RESET);
965
966 if (lport->dns_rp)
967 lport->tt.rport_logoff(lport->dns_rp);
968
969 if (lport->ptp_rp) {
970 lport->tt.rport_logoff(lport->ptp_rp);
971 lport->ptp_rp = NULL;
972 }
973
974 lport->tt.disc_stop(lport);
975
976 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
977 fc_host_fabric_name(lport->host) = 0;
978 fc_host_port_id(lport->host) = 0;
979
980 if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP)
981 fc_lport_enter_flogi(lport);
982}
983
984/**
985 * fc_lport_error - Handler for any errors
986 * @lport: The fc_lport object
987 * @fp: The frame pointer
988 *
989 * If the error was caused by a resource allocation failure
990 * then wait for half a second and retry, otherwise retry
991 * after the e_d_tov time.
992 */
993static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
994{
995 unsigned long delay = 0;
996 FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n",
997 PTR_ERR(fp), fc_lport_state(lport),
998 lport->retry_count);
999
1000 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
1001 /*
1002 * Memory allocation failure, or the exchange timed out.
1003 * Retry after delay
1004 */
1005 if (lport->retry_count < lport->max_retry_count) {
1006 lport->retry_count++;
1007 if (!fp)
1008 delay = msecs_to_jiffies(500);
1009 else
1010 delay = msecs_to_jiffies(lport->e_d_tov);
1011
1012 schedule_delayed_work(&lport->retry_work, delay);
1013 } else {
1014 switch (lport->state) {
1015 case LPORT_ST_NONE:
1016 case LPORT_ST_READY:
1017 case LPORT_ST_RESET:
1018 case LPORT_ST_RPN_ID:
1019 case LPORT_ST_RFT_ID:
1020 case LPORT_ST_SCR:
1021 case LPORT_ST_DNS:
1022 case LPORT_ST_FLOGI:
1023 case LPORT_ST_LOGO:
1024 fc_lport_enter_reset(lport);
1025 break;
1026 }
1027 }
1028 }
1029}
1030
1031/**
1032 * fc_lport_rft_id_resp - Handle response to Register Fibre
1033 * Channel Types by ID (RPN_ID) request
1034 * @sp: current sequence in RPN_ID exchange
1035 * @fp: response frame
1036 * @lp_arg: Fibre Channel host port instance
1037 *
1038 * Locking Note: This function will be called without the lport lock
1039 * held, but it will lock, call an _enter_* function or fc_lport_error
1040 * and then unlock the lport.
1041 */
1042static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1043 void *lp_arg)
1044{
1045 struct fc_lport *lport = lp_arg;
1046 struct fc_frame_header *fh;
1047 struct fc_ct_hdr *ct;
1048
1049 if (fp == ERR_PTR(-FC_EX_CLOSED))
1050 return;
1051
1052 mutex_lock(&lport->lp_mutex);
1053
1054 FC_DEBUG_LPORT("Received a RFT_ID response\n");
1055
1056 if (lport->state != LPORT_ST_RFT_ID) {
1057 FC_DBG("Received a RFT_ID response, but in state %s\n",
1058 fc_lport_state(lport));
1059 goto out;
1060 }
1061
1062 if (IS_ERR(fp)) {
1063 fc_lport_error(lport, fp);
1064 goto err;
1065 }
1066
1067 fh = fc_frame_header_get(fp);
1068 ct = fc_frame_payload_get(fp, sizeof(*ct));
1069
1070 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1071 ct->ct_fs_type == FC_FST_DIR &&
1072 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1073 ntohs(ct->ct_cmd) == FC_FS_ACC)
1074 fc_lport_enter_scr(lport);
1075 else
1076 fc_lport_error(lport, fp);
1077out:
1078 fc_frame_free(fp);
1079err:
1080 mutex_unlock(&lport->lp_mutex);
1081}
1082
1083/**
1084 * fc_lport_rpn_id_resp - Handle response to Register Port
1085 * Name by ID (RPN_ID) request
1086 * @sp: current sequence in RPN_ID exchange
1087 * @fp: response frame
1088 * @lp_arg: Fibre Channel host port instance
1089 *
1090 * Locking Note: This function will be called without the lport lock
1091 * held, but it will lock, call an _enter_* function or fc_lport_error
1092 * and then unlock the lport.
1093 */
1094static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1095 void *lp_arg)
1096{
1097 struct fc_lport *lport = lp_arg;
1098 struct fc_frame_header *fh;
1099 struct fc_ct_hdr *ct;
1100
1101 if (fp == ERR_PTR(-FC_EX_CLOSED))
1102 return;
1103
1104 mutex_lock(&lport->lp_mutex);
1105
1106 FC_DEBUG_LPORT("Received a RPN_ID response\n");
1107
1108 if (lport->state != LPORT_ST_RPN_ID) {
1109 FC_DBG("Received a RPN_ID response, but in state %s\n",
1110 fc_lport_state(lport));
1111 goto out;
1112 }
1113
1114 if (IS_ERR(fp)) {
1115 fc_lport_error(lport, fp);
1116 goto err;
1117 }
1118
1119 fh = fc_frame_header_get(fp);
1120 ct = fc_frame_payload_get(fp, sizeof(*ct));
1121 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1122 ct->ct_fs_type == FC_FST_DIR &&
1123 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1124 ntohs(ct->ct_cmd) == FC_FS_ACC)
1125 fc_lport_enter_rft_id(lport);
1126 else
1127 fc_lport_error(lport, fp);
1128
1129out:
1130 fc_frame_free(fp);
1131err:
1132 mutex_unlock(&lport->lp_mutex);
1133}
1134
1135/**
1136 * fc_lport_scr_resp - Handle response to State Change Register (SCR) request
1137 * @sp: current sequence in SCR exchange
1138 * @fp: response frame
1139 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1140 *
1141 * Locking Note: This function will be called without the lport lock
1142 * held, but it will lock, call an _enter_* function or fc_lport_error
1143 * and then unlock the lport.
1144 */
1145static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1146 void *lp_arg)
1147{
1148 struct fc_lport *lport = lp_arg;
1149 u8 op;
1150
1151 if (fp == ERR_PTR(-FC_EX_CLOSED))
1152 return;
1153
1154 mutex_lock(&lport->lp_mutex);
1155
1156 FC_DEBUG_LPORT("Received a SCR response\n");
1157
1158 if (lport->state != LPORT_ST_SCR) {
1159 FC_DBG("Received a SCR response, but in state %s\n",
1160 fc_lport_state(lport));
1161 goto out;
1162 }
1163
1164 if (IS_ERR(fp)) {
1165 fc_lport_error(lport, fp);
1166 goto err;
1167 }
1168
1169 op = fc_frame_payload_op(fp);
1170 if (op == ELS_LS_ACC)
1171 fc_lport_enter_ready(lport);
1172 else
1173 fc_lport_error(lport, fp);
1174
1175out:
1176 fc_frame_free(fp);
1177err:
1178 mutex_unlock(&lport->lp_mutex);
1179}
1180
1181/**
1182 * fc_lport_enter_scr - Send a State Change Register (SCR) request
1183 * @lport: Fibre Channel local port to register for state changes
1184 *
1185 * Locking Note: The lport lock is expected to be held before calling
1186 * this routine.
1187 */
1188static void fc_lport_enter_scr(struct fc_lport *lport)
1189{
1190 struct fc_frame *fp;
1191
1192 FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n",
1193 fc_host_port_id(lport->host), fc_lport_state(lport));
1194
1195 fc_lport_state_enter(lport, LPORT_ST_SCR);
1196
1197 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1198 if (!fp) {
1199 fc_lport_error(lport, fp);
1200 return;
1201 }
1202
1203 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_SCR,
1204 fc_lport_scr_resp, lport, lport->e_d_tov))
1205 fc_lport_error(lport, fp);
1206}
1207
1208/**
1209 * fc_lport_enter_rft_id - Register FC4-types with the name server
1210 * @lport: Fibre Channel local port to register
1211 *
1212 * Locking Note: The lport lock is expected to be held before calling
1213 * this routine.
1214 */
1215static void fc_lport_enter_rft_id(struct fc_lport *lport)
1216{
1217 struct fc_frame *fp;
1218 struct fc_ns_fts *lps;
1219 int i;
1220
1221 FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n",
1222 fc_host_port_id(lport->host), fc_lport_state(lport));
1223
1224 fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
1225
1226 lps = &lport->fcts;
1227 i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
1228 while (--i >= 0)
1229 if (ntohl(lps->ff_type_map[i]) != 0)
1230 break;
1231 if (i < 0) {
1232 /* nothing to register, move on to SCR */
1233 fc_lport_enter_scr(lport);
1234 return;
1235 }
1236
1237 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1238 sizeof(struct fc_ns_rft));
1239 if (!fp) {
1240 fc_lport_error(lport, fp);
1241 return;
1242 }
1243
1244 if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RFT_ID,
1245 fc_lport_rft_id_resp,
1246 lport, lport->e_d_tov))
1247 fc_lport_error(lport, fp);
1248}
1249
1250/**
1251 * fc_rport_enter_rft_id - Register port name with the name server
1252 * @lport: Fibre Channel local port to register
1253 *
1254 * Locking Note: The lport lock is expected to be held before calling
1255 * this routine.
1256 */
1257static void fc_lport_enter_rpn_id(struct fc_lport *lport)
1258{
1259 struct fc_frame *fp;
1260
1261 FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n",
1262 fc_host_port_id(lport->host), fc_lport_state(lport));
1263
1264 fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
1265
1266 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1267 sizeof(struct fc_ns_rn_id));
1268 if (!fp) {
1269 fc_lport_error(lport, fp);
1270 return;
1271 }
1272
1273 if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RPN_ID,
1274 fc_lport_rpn_id_resp,
1275 lport, lport->e_d_tov))
1276 fc_lport_error(lport, fp);
1277}
1278
1279static struct fc_rport_operations fc_lport_rport_ops = {
1280 .event_callback = fc_lport_rport_callback,
1281};
1282
1283/**
1284 * fc_rport_enter_dns - Create a rport to the name server
1285 * @lport: Fibre Channel local port requesting a rport for the name server
1286 *
1287 * Locking Note: The lport lock is expected to be held before calling
1288 * this routine.
1289 */
1290static void fc_lport_enter_dns(struct fc_lport *lport)
1291{
1292 struct fc_rport *rport;
1293 struct fc_rport_libfc_priv *rdata;
1294 struct fc_disc_port dp;
1295
1296 dp.ids.port_id = FC_FID_DIR_SERV;
1297 dp.ids.port_name = -1;
1298 dp.ids.node_name = -1;
1299 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
1300 dp.lp = lport;
1301
1302 FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n",
1303 fc_host_port_id(lport->host), fc_lport_state(lport));
1304
1305 fc_lport_state_enter(lport, LPORT_ST_DNS);
1306
1307 rport = fc_rport_rogue_create(&dp);
1308 if (!rport)
1309 goto err;
1310
1311 rdata = rport->dd_data;
1312 rdata->ops = &fc_lport_rport_ops;
1313 lport->tt.rport_login(rport);
1314 return;
1315
1316err:
1317 fc_lport_error(lport, NULL);
1318}
1319
1320/**
1321 * fc_lport_timeout - Handler for the retry_work timer.
1322 * @work: The work struct of the fc_lport
1323 */
1324static void fc_lport_timeout(struct work_struct *work)
1325{
1326 struct fc_lport *lport =
1327 container_of(work, struct fc_lport,
1328 retry_work.work);
1329
1330 mutex_lock(&lport->lp_mutex);
1331
1332 switch (lport->state) {
1333 case LPORT_ST_NONE:
1334 case LPORT_ST_READY:
1335 case LPORT_ST_RESET:
1336 WARN_ON(1);
1337 break;
1338 case LPORT_ST_FLOGI:
1339 fc_lport_enter_flogi(lport);
1340 break;
1341 case LPORT_ST_DNS:
1342 fc_lport_enter_dns(lport);
1343 break;
1344 case LPORT_ST_RPN_ID:
1345 fc_lport_enter_rpn_id(lport);
1346 break;
1347 case LPORT_ST_RFT_ID:
1348 fc_lport_enter_rft_id(lport);
1349 break;
1350 case LPORT_ST_SCR:
1351 fc_lport_enter_scr(lport);
1352 break;
1353 case LPORT_ST_LOGO:
1354 fc_lport_enter_logo(lport);
1355 break;
1356 }
1357
1358 mutex_unlock(&lport->lp_mutex);
1359}
1360
1361/**
1362 * fc_lport_logo_resp - Handle response to LOGO request
1363 * @sp: current sequence in LOGO exchange
1364 * @fp: response frame
1365 * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
1366 *
1367 * Locking Note: This function will be called without the lport lock
1368 * held, but it will lock, call an _enter_* function or fc_lport_error
1369 * and then unlock the lport.
1370 */
1371static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1372 void *lp_arg)
1373{
1374 struct fc_lport *lport = lp_arg;
1375 u8 op;
1376
1377 if (fp == ERR_PTR(-FC_EX_CLOSED))
1378 return;
1379
1380 mutex_lock(&lport->lp_mutex);
1381
1382 FC_DEBUG_LPORT("Received a LOGO response\n");
1383
1384 if (lport->state != LPORT_ST_LOGO) {
1385 FC_DBG("Received a LOGO response, but in state %s\n",
1386 fc_lport_state(lport));
1387 goto out;
1388 }
1389
1390 if (IS_ERR(fp)) {
1391 fc_lport_error(lport, fp);
1392 goto err;
1393 }
1394
1395 op = fc_frame_payload_op(fp);
1396 if (op == ELS_LS_ACC)
1397 fc_lport_enter_reset(lport);
1398 else
1399 fc_lport_error(lport, fp);
1400
1401out:
1402 fc_frame_free(fp);
1403err:
1404 mutex_unlock(&lport->lp_mutex);
1405}
1406
1407/**
1408 * fc_rport_enter_logo - Logout of the fabric
1409 * @lport: Fibre Channel local port to be logged out
1410 *
1411 * Locking Note: The lport lock is expected to be held before calling
1412 * this routine.
1413 */
1414static void fc_lport_enter_logo(struct fc_lport *lport)
1415{
1416 struct fc_frame *fp;
1417 struct fc_els_logo *logo;
1418
1419 FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n",
1420 fc_host_port_id(lport->host), fc_lport_state(lport));
1421
1422 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1423
1424 /* DNS session should be closed so we can release it here */
1425 if (lport->dns_rp)
1426 lport->tt.rport_logoff(lport->dns_rp);
1427
1428 fp = fc_frame_alloc(lport, sizeof(*logo));
1429 if (!fp) {
1430 fc_lport_error(lport, fp);
1431 return;
1432 }
1433
1434 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_LOGO, fc_lport_logo_resp,
1435 lport, lport->e_d_tov))
1436 fc_lport_error(lport, fp);
1437}
1438
1439/**
1440 * fc_lport_flogi_resp - Handle response to FLOGI request
1441 * @sp: current sequence in FLOGI exchange
1442 * @fp: response frame
1443 * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
1444 *
1445 * Locking Note: This function will be called without the lport lock
1446 * held, but it will lock, call an _enter_* function or fc_lport_error
1447 * and then unlock the lport.
1448 */
1449static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1450 void *lp_arg)
1451{
1452 struct fc_lport *lport = lp_arg;
1453 struct fc_frame_header *fh;
1454 struct fc_els_flogi *flp;
1455 u32 did;
1456 u16 csp_flags;
1457 unsigned int r_a_tov;
1458 unsigned int e_d_tov;
1459 u16 mfs;
1460
1461 if (fp == ERR_PTR(-FC_EX_CLOSED))
1462 return;
1463
1464 mutex_lock(&lport->lp_mutex);
1465
1466 FC_DEBUG_LPORT("Received a FLOGI response\n");
1467
1468 if (lport->state != LPORT_ST_FLOGI) {
1469 FC_DBG("Received a FLOGI response, but in state %s\n",
1470 fc_lport_state(lport));
1471 goto out;
1472 }
1473
1474 if (IS_ERR(fp)) {
1475 fc_lport_error(lport, fp);
1476 goto err;
1477 }
1478
1479 fh = fc_frame_header_get(fp);
1480 did = ntoh24(fh->fh_d_id);
1481 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1482
1483 FC_DEBUG_LPORT("Assigned fid %x\n", did);
1484 fc_host_port_id(lport->host) = did;
1485
1486 flp = fc_frame_payload_get(fp, sizeof(*flp));
1487 if (flp) {
1488 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1489 FC_SP_BB_DATA_MASK;
1490 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1491 mfs < lport->mfs)
1492 lport->mfs = mfs;
1493 csp_flags = ntohs(flp->fl_csp.sp_features);
1494 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1495 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1496 if (csp_flags & FC_SP_FT_EDTR)
1497 e_d_tov /= 1000000;
1498 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1499 if (e_d_tov > lport->e_d_tov)
1500 lport->e_d_tov = e_d_tov;
1501 lport->r_a_tov = 2 * e_d_tov;
1502 FC_DBG("Point-to-Point mode\n");
1503 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1504 get_unaligned_be64(
1505 &flp->fl_wwpn),
1506 get_unaligned_be64(
1507 &flp->fl_wwnn));
1508 } else {
1509 lport->e_d_tov = e_d_tov;
1510 lport->r_a_tov = r_a_tov;
1511 fc_host_fabric_name(lport->host) =
1512 get_unaligned_be64(&flp->fl_wwnn);
1513 fc_lport_enter_dns(lport);
1514 }
1515 }
1516
1517 if (flp) {
1518 csp_flags = ntohs(flp->fl_csp.sp_features);
1519 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1520 lport->tt.disc_start(fc_lport_disc_callback,
1521 lport);
1522 }
1523 }
1524 } else {
1525 FC_DBG("bad FLOGI response\n");
1526 }
1527
1528out:
1529 fc_frame_free(fp);
1530err:
1531 mutex_unlock(&lport->lp_mutex);
1532}
1533
1534/**
1535 * fc_rport_enter_flogi - Send a FLOGI request to the fabric manager
1536 * @lport: Fibre Channel local port to be logged in to the fabric
1537 *
1538 * Locking Note: The lport lock is expected to be held before calling
1539 * this routine.
1540 */
1541void fc_lport_enter_flogi(struct fc_lport *lport)
1542{
1543 struct fc_frame *fp;
1544
1545 FC_DEBUG_LPORT("Processing FLOGI state\n");
1546
1547 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1548
1549 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1550 if (!fp)
1551 return fc_lport_error(lport, fp);
1552
1553 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_FLOGI,
1554 fc_lport_flogi_resp, lport, lport->e_d_tov))
1555 fc_lport_error(lport, fp);
1556}
1557
1558/* Configure a fc_lport */
1559int fc_lport_config(struct fc_lport *lport)
1560{
1561 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1562 mutex_init(&lport->lp_mutex);
1563
1564 fc_lport_state_enter(lport, LPORT_ST_NONE);
1565
1566 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1567 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1568
1569 return 0;
1570}
1571EXPORT_SYMBOL(fc_lport_config);
1572
1573int fc_lport_init(struct fc_lport *lport)
1574{
1575 if (!lport->tt.lport_recv)
1576 lport->tt.lport_recv = fc_lport_recv_req;
1577
1578 if (!lport->tt.lport_reset)
1579 lport->tt.lport_reset = fc_lport_reset;
1580
1581 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1582 fc_host_node_name(lport->host) = lport->wwnn;
1583 fc_host_port_name(lport->host) = lport->wwpn;
1584 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1585 memset(fc_host_supported_fc4s(lport->host), 0,
1586 sizeof(fc_host_supported_fc4s(lport->host)));
1587 fc_host_supported_fc4s(lport->host)[2] = 1;
1588 fc_host_supported_fc4s(lport->host)[7] = 1;
1589
1590 /* This value is also unchanging */
1591 memset(fc_host_active_fc4s(lport->host), 0,
1592 sizeof(fc_host_active_fc4s(lport->host)));
1593 fc_host_active_fc4s(lport->host)[2] = 1;
1594 fc_host_active_fc4s(lport->host)[7] = 1;
1595 fc_host_maxframe_size(lport->host) = lport->mfs;
1596 fc_host_supported_speeds(lport->host) = 0;
1597 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1598 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1599 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1600 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1601
1602 return 0;
1603}
1604EXPORT_SYMBOL(fc_lport_init);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
new file mode 100644
index 000000000000..e780d8caf70e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -0,0 +1,1291 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * RPORT GENERAL INFO
22 *
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
27 *
28 * fc_rport's represent N_Port's within the fabric.
29 */
30
31/*
32 * RPORT LOCKING
33 *
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
38 *
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
45 */
46
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/interrupt.h>
50#include <linux/rcupdate.h>
51#include <linux/timer.h>
52#include <linux/workqueue.h>
53#include <asm/unaligned.h>
54
55#include <scsi/libfc.h>
56#include <scsi/fc_encode.h>
57
58static int fc_rport_debug;
59
60#define FC_DEBUG_RPORT(fmt...) \
61 do { \
62 if (fc_rport_debug) \
63 FC_DBG(fmt); \
64 } while (0)
65
66struct workqueue_struct *rport_event_queue;
67
68static void fc_rport_enter_plogi(struct fc_rport *);
69static void fc_rport_enter_prli(struct fc_rport *);
70static void fc_rport_enter_rtv(struct fc_rport *);
71static void fc_rport_enter_ready(struct fc_rport *);
72static void fc_rport_enter_logo(struct fc_rport *);
73
74static void fc_rport_recv_plogi_req(struct fc_rport *,
75 struct fc_seq *, struct fc_frame *);
76static void fc_rport_recv_prli_req(struct fc_rport *,
77 struct fc_seq *, struct fc_frame *);
78static void fc_rport_recv_prlo_req(struct fc_rport *,
79 struct fc_seq *, struct fc_frame *);
80static void fc_rport_recv_logo_req(struct fc_rport *,
81 struct fc_seq *, struct fc_frame *);
82static void fc_rport_timeout(struct work_struct *);
83static void fc_rport_error(struct fc_rport *, struct fc_frame *);
84static void fc_rport_work(struct work_struct *);
85
86static const char *fc_rport_state_names[] = {
87 [RPORT_ST_NONE] = "None",
88 [RPORT_ST_INIT] = "Init",
89 [RPORT_ST_PLOGI] = "PLOGI",
90 [RPORT_ST_PRLI] = "PRLI",
91 [RPORT_ST_RTV] = "RTV",
92 [RPORT_ST_READY] = "Ready",
93 [RPORT_ST_LOGO] = "LOGO",
94};
95
96static void fc_rport_rogue_destroy(struct device *dev)
97{
98 struct fc_rport *rport = dev_to_rport(dev);
99 FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id);
100 kfree(rport);
101}
102
103struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
104{
105 struct fc_rport *rport;
106 struct fc_rport_libfc_priv *rdata;
107 rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
108
109 if (!rport)
110 return NULL;
111
112 rdata = RPORT_TO_PRIV(rport);
113
114 rport->dd_data = rdata;
115 rport->port_id = dp->ids.port_id;
116 rport->port_name = dp->ids.port_name;
117 rport->node_name = dp->ids.node_name;
118 rport->roles = dp->ids.roles;
119 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
120 /*
121 * Note: all this libfc rogue rport code will be removed for
122 * upstream so it fine that this is really ugly and hacky right now.
123 */
124 device_initialize(&rport->dev);
125 rport->dev.release = fc_rport_rogue_destroy;
126
127 mutex_init(&rdata->rp_mutex);
128 rdata->local_port = dp->lp;
129 rdata->trans_state = FC_PORTSTATE_ROGUE;
130 rdata->rp_state = RPORT_ST_INIT;
131 rdata->event = RPORT_EV_NONE;
132 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
133 rdata->ops = NULL;
134 rdata->e_d_tov = dp->lp->e_d_tov;
135 rdata->r_a_tov = dp->lp->r_a_tov;
136 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
137 INIT_WORK(&rdata->event_work, fc_rport_work);
138 /*
139 * For good measure, but not necessary as we should only
140 * add REAL rport to the lport list.
141 */
142 INIT_LIST_HEAD(&rdata->peers);
143
144 return rport;
145}
146
147/**
148 * fc_rport_state - return a string for the state the rport is in
149 * @rport: The rport whose state we want to get a string for
150 */
151static const char *fc_rport_state(struct fc_rport *rport)
152{
153 const char *cp;
154 struct fc_rport_libfc_priv *rdata = rport->dd_data;
155
156 cp = fc_rport_state_names[rdata->rp_state];
157 if (!cp)
158 cp = "Unknown";
159 return cp;
160}
161
162/**
163 * fc_set_rport_loss_tmo - Set the remote port loss timeout in seconds.
164 * @rport: Pointer to Fibre Channel remote port structure
165 * @timeout: timeout in seconds
166 */
167void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
168{
169 if (timeout)
170 rport->dev_loss_tmo = timeout + 5;
171 else
172 rport->dev_loss_tmo = 30;
173}
174EXPORT_SYMBOL(fc_set_rport_loss_tmo);
175
176/**
177 * fc_plogi_get_maxframe - Get max payload from the common service parameters
178 * @flp: FLOGI payload structure
179 * @maxval: upper limit, may be less than what is in the service parameters
180 */
181static unsigned int
182fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval)
183{
184 unsigned int mfs;
185
186 /*
187 * Get max payload from the common service parameters and the
188 * class 3 receive data field size.
189 */
190 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
191 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
192 maxval = mfs;
193 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
194 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
195 maxval = mfs;
196 return maxval;
197}
198
199/**
200 * fc_rport_state_enter - Change the rport's state
201 * @rport: The rport whose state should change
202 * @new: The new state of the rport
203 *
204 * Locking Note: Called with the rport lock held
205 */
206static void fc_rport_state_enter(struct fc_rport *rport,
207 enum fc_rport_state new)
208{
209 struct fc_rport_libfc_priv *rdata = rport->dd_data;
210 if (rdata->rp_state != new)
211 rdata->retries = 0;
212 rdata->rp_state = new;
213}
214
215static void fc_rport_work(struct work_struct *work)
216{
217 struct fc_rport_libfc_priv *rdata =
218 container_of(work, struct fc_rport_libfc_priv, event_work);
219 enum fc_rport_event event;
220 enum fc_rport_trans_state trans_state;
221 struct fc_lport *lport = rdata->local_port;
222 struct fc_rport_operations *rport_ops;
223 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
224
225 mutex_lock(&rdata->rp_mutex);
226 event = rdata->event;
227 rport_ops = rdata->ops;
228
229 if (event == RPORT_EV_CREATED) {
230 struct fc_rport *new_rport;
231 struct fc_rport_libfc_priv *new_rdata;
232 struct fc_rport_identifiers ids;
233
234 ids.port_id = rport->port_id;
235 ids.roles = rport->roles;
236 ids.port_name = rport->port_name;
237 ids.node_name = rport->node_name;
238
239 mutex_unlock(&rdata->rp_mutex);
240
241 new_rport = fc_remote_port_add(lport->host, 0, &ids);
242 if (new_rport) {
243 /*
244 * Switch from the rogue rport to the rport
245 * returned by the FC class.
246 */
247 new_rport->maxframe_size = rport->maxframe_size;
248
249 new_rdata = new_rport->dd_data;
250 new_rdata->e_d_tov = rdata->e_d_tov;
251 new_rdata->r_a_tov = rdata->r_a_tov;
252 new_rdata->ops = rdata->ops;
253 new_rdata->local_port = rdata->local_port;
254 new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
255 new_rdata->trans_state = FC_PORTSTATE_REAL;
256 mutex_init(&new_rdata->rp_mutex);
257 INIT_DELAYED_WORK(&new_rdata->retry_work,
258 fc_rport_timeout);
259 INIT_LIST_HEAD(&new_rdata->peers);
260 INIT_WORK(&new_rdata->event_work, fc_rport_work);
261
262 fc_rport_state_enter(new_rport, RPORT_ST_READY);
263 } else {
264 FC_DBG("Failed to create the rport for port "
265 "(%6x).\n", ids.port_id);
266 event = RPORT_EV_FAILED;
267 }
268 put_device(&rport->dev);
269 rport = new_rport;
270 rdata = new_rport->dd_data;
271 if (rport_ops->event_callback)
272 rport_ops->event_callback(lport, rport, event);
273 } else if ((event == RPORT_EV_FAILED) ||
274 (event == RPORT_EV_LOGO) ||
275 (event == RPORT_EV_STOP)) {
276 trans_state = rdata->trans_state;
277 mutex_unlock(&rdata->rp_mutex);
278 if (rport_ops->event_callback)
279 rport_ops->event_callback(lport, rport, event);
280 if (trans_state == FC_PORTSTATE_ROGUE)
281 put_device(&rport->dev);
282 else
283 fc_remote_port_delete(rport);
284 } else
285 mutex_unlock(&rdata->rp_mutex);
286}
287
288/**
289 * fc_rport_login - Start the remote port login state machine
290 * @rport: Fibre Channel remote port
291 *
292 * Locking Note: Called without the rport lock held. This
293 * function will hold the rport lock, call an _enter_*
294 * function and then unlock the rport.
295 */
296int fc_rport_login(struct fc_rport *rport)
297{
298 struct fc_rport_libfc_priv *rdata = rport->dd_data;
299
300 mutex_lock(&rdata->rp_mutex);
301
302 FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id);
303
304 fc_rport_enter_plogi(rport);
305
306 mutex_unlock(&rdata->rp_mutex);
307
308 return 0;
309}
310
311/**
312 * fc_rport_logoff - Logoff and remove an rport
313 * @rport: Fibre Channel remote port to be removed
314 *
315 * Locking Note: Called without the rport lock held. This
316 * function will hold the rport lock, call an _enter_*
317 * function and then unlock the rport.
318 */
319int fc_rport_logoff(struct fc_rport *rport)
320{
321 struct fc_rport_libfc_priv *rdata = rport->dd_data;
322
323 mutex_lock(&rdata->rp_mutex);
324
325 FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
326
327 fc_rport_enter_logo(rport);
328
329 /*
330 * Change the state to NONE so that we discard
331 * the response.
332 */
333 fc_rport_state_enter(rport, RPORT_ST_NONE);
334
335 mutex_unlock(&rdata->rp_mutex);
336
337 cancel_delayed_work_sync(&rdata->retry_work);
338
339 mutex_lock(&rdata->rp_mutex);
340
341 rdata->event = RPORT_EV_STOP;
342 queue_work(rport_event_queue, &rdata->event_work);
343
344 mutex_unlock(&rdata->rp_mutex);
345
346 return 0;
347}
348
349/**
350 * fc_rport_enter_ready - The rport is ready
351 * @rport: Fibre Channel remote port that is ready
352 *
353 * Locking Note: The rport lock is expected to be held before calling
354 * this routine.
355 */
356static void fc_rport_enter_ready(struct fc_rport *rport)
357{
358 struct fc_rport_libfc_priv *rdata = rport->dd_data;
359
360 fc_rport_state_enter(rport, RPORT_ST_READY);
361
362 FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id);
363
364 rdata->event = RPORT_EV_CREATED;
365 queue_work(rport_event_queue, &rdata->event_work);
366}
367
368/**
369 * fc_rport_timeout - Handler for the retry_work timer.
370 * @work: The work struct of the fc_rport_libfc_priv
371 *
372 * Locking Note: Called without the rport lock held. This
373 * function will hold the rport lock, call an _enter_*
374 * function and then unlock the rport.
375 */
376static void fc_rport_timeout(struct work_struct *work)
377{
378 struct fc_rport_libfc_priv *rdata =
379 container_of(work, struct fc_rport_libfc_priv, retry_work.work);
380 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
381
382 mutex_lock(&rdata->rp_mutex);
383
384 switch (rdata->rp_state) {
385 case RPORT_ST_PLOGI:
386 fc_rport_enter_plogi(rport);
387 break;
388 case RPORT_ST_PRLI:
389 fc_rport_enter_prli(rport);
390 break;
391 case RPORT_ST_RTV:
392 fc_rport_enter_rtv(rport);
393 break;
394 case RPORT_ST_LOGO:
395 fc_rport_enter_logo(rport);
396 break;
397 case RPORT_ST_READY:
398 case RPORT_ST_INIT:
399 case RPORT_ST_NONE:
400 break;
401 }
402
403 mutex_unlock(&rdata->rp_mutex);
404 put_device(&rport->dev);
405}
406
407/**
408 * fc_rport_error - Handler for any errors
409 * @rport: The fc_rport object
410 * @fp: The frame pointer
411 *
412 * If the error was caused by a resource allocation failure
413 * then wait for half a second and retry, otherwise retry
414 * immediately.
415 *
416 * Locking Note: The rport lock is expected to be held before
417 * calling this routine
418 */
419static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
420{
421 struct fc_rport_libfc_priv *rdata = rport->dd_data;
422 unsigned long delay = 0;
423
424 FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n",
425 PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
426
427 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
428 /*
429 * Memory allocation failure, or the exchange timed out.
430 * Retry after delay
431 */
432 if (rdata->retries < rdata->local_port->max_retry_count) {
433 rdata->retries++;
434 if (!fp)
435 delay = msecs_to_jiffies(500);
436 get_device(&rport->dev);
437 schedule_delayed_work(&rdata->retry_work, delay);
438 } else {
439 switch (rdata->rp_state) {
440 case RPORT_ST_PLOGI:
441 case RPORT_ST_PRLI:
442 case RPORT_ST_LOGO:
443 rdata->event = RPORT_EV_FAILED;
444 queue_work(rport_event_queue,
445 &rdata->event_work);
446 break;
447 case RPORT_ST_RTV:
448 fc_rport_enter_ready(rport);
449 break;
450 case RPORT_ST_NONE:
451 case RPORT_ST_READY:
452 case RPORT_ST_INIT:
453 break;
454 }
455 }
456 }
457}
458
459/**
460 * fc_rport_plogi_recv_resp - Handle incoming ELS PLOGI response
461 * @sp: current sequence in the PLOGI exchange
462 * @fp: response frame
463 * @rp_arg: Fibre Channel remote port
464 *
465 * Locking Note: This function will be called without the rport lock
466 * held, but it will lock, call an _enter_* function or fc_rport_error
467 * and then unlock the rport.
468 */
469static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
470 void *rp_arg)
471{
472 struct fc_rport *rport = rp_arg;
473 struct fc_rport_libfc_priv *rdata = rport->dd_data;
474 struct fc_lport *lport = rdata->local_port;
475 struct fc_els_flogi *plp;
476 unsigned int tov;
477 u16 csp_seq;
478 u16 cssp_seq;
479 u8 op;
480
481 mutex_lock(&rdata->rp_mutex);
482
483 FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n",
484 rport->port_id);
485
486 if (rdata->rp_state != RPORT_ST_PLOGI) {
487 FC_DBG("Received a PLOGI response, but in state %s\n",
488 fc_rport_state(rport));
489 goto out;
490 }
491
492 if (IS_ERR(fp)) {
493 fc_rport_error(rport, fp);
494 goto err;
495 }
496
497 op = fc_frame_payload_op(fp);
498 if (op == ELS_LS_ACC &&
499 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
500 rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
501 rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
502
503 tov = ntohl(plp->fl_csp.sp_e_d_tov);
504 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
505 tov /= 1000;
506 if (tov > rdata->e_d_tov)
507 rdata->e_d_tov = tov;
508 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
509 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
510 if (cssp_seq < csp_seq)
511 csp_seq = cssp_seq;
512 rdata->max_seq = csp_seq;
513 rport->maxframe_size =
514 fc_plogi_get_maxframe(plp, lport->mfs);
515
516 /*
517 * If the rport is one of the well known addresses
518 * we skip PRLI and RTV and go straight to READY.
519 */
520 if (rport->port_id >= FC_FID_DOM_MGR)
521 fc_rport_enter_ready(rport);
522 else
523 fc_rport_enter_prli(rport);
524 } else
525 fc_rport_error(rport, fp);
526
527out:
528 fc_frame_free(fp);
529err:
530 mutex_unlock(&rdata->rp_mutex);
531 put_device(&rport->dev);
532}
533
534/**
535 * fc_rport_enter_plogi - Send Port Login (PLOGI) request to peer
536 * @rport: Fibre Channel remote port to send PLOGI to
537 *
538 * Locking Note: The rport lock is expected to be held before calling
539 * this routine.
540 */
541static void fc_rport_enter_plogi(struct fc_rport *rport)
542{
543 struct fc_rport_libfc_priv *rdata = rport->dd_data;
544 struct fc_lport *lport = rdata->local_port;
545 struct fc_frame *fp;
546
547 FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n",
548 rport->port_id, fc_rport_state(rport));
549
550 fc_rport_state_enter(rport, RPORT_ST_PLOGI);
551
552 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
553 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
554 if (!fp) {
555 fc_rport_error(rport, fp);
556 return;
557 }
558 rdata->e_d_tov = lport->e_d_tov;
559
560 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
561 fc_rport_plogi_resp, rport, lport->e_d_tov))
562 fc_rport_error(rport, fp);
563 else
564 get_device(&rport->dev);
565}
566
567/**
568 * fc_rport_prli_resp - Process Login (PRLI) response handler
569 * @sp: current sequence in the PRLI exchange
570 * @fp: response frame
571 * @rp_arg: Fibre Channel remote port
572 *
573 * Locking Note: This function will be called without the rport lock
574 * held, but it will lock, call an _enter_* function or fc_rport_error
575 * and then unlock the rport.
576 */
577static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
578 void *rp_arg)
579{
580 struct fc_rport *rport = rp_arg;
581 struct fc_rport_libfc_priv *rdata = rport->dd_data;
582 struct {
583 struct fc_els_prli prli;
584 struct fc_els_spp spp;
585 } *pp;
586 u32 roles = FC_RPORT_ROLE_UNKNOWN;
587 u32 fcp_parm = 0;
588 u8 op;
589
590 mutex_lock(&rdata->rp_mutex);
591
592 FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n",
593 rport->port_id);
594
595 if (rdata->rp_state != RPORT_ST_PRLI) {
596 FC_DBG("Received a PRLI response, but in state %s\n",
597 fc_rport_state(rport));
598 goto out;
599 }
600
601 if (IS_ERR(fp)) {
602 fc_rport_error(rport, fp);
603 goto err;
604 }
605
606 op = fc_frame_payload_op(fp);
607 if (op == ELS_LS_ACC) {
608 pp = fc_frame_payload_get(fp, sizeof(*pp));
609 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
610 fcp_parm = ntohl(pp->spp.spp_params);
611 if (fcp_parm & FCP_SPPF_RETRY)
612 rdata->flags |= FC_RP_FLAGS_RETRY;
613 }
614
615 rport->supported_classes = FC_COS_CLASS3;
616 if (fcp_parm & FCP_SPPF_INIT_FCN)
617 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
618 if (fcp_parm & FCP_SPPF_TARG_FCN)
619 roles |= FC_RPORT_ROLE_FCP_TARGET;
620
621 rport->roles = roles;
622 fc_rport_enter_rtv(rport);
623
624 } else {
625 FC_DBG("Bad ELS response\n");
626 rdata->event = RPORT_EV_FAILED;
627 queue_work(rport_event_queue, &rdata->event_work);
628 }
629
630out:
631 fc_frame_free(fp);
632err:
633 mutex_unlock(&rdata->rp_mutex);
634 put_device(&rport->dev);
635}
636
637/**
638 * fc_rport_logo_resp - Logout (LOGO) response handler
639 * @sp: current sequence in the LOGO exchange
640 * @fp: response frame
641 * @rp_arg: Fibre Channel remote port
642 *
643 * Locking Note: This function will be called without the rport lock
644 * held, but it will lock, call an _enter_* function or fc_rport_error
645 * and then unlock the rport.
646 */
647static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
648 void *rp_arg)
649{
650 struct fc_rport *rport = rp_arg;
651 struct fc_rport_libfc_priv *rdata = rport->dd_data;
652 u8 op;
653
654 mutex_lock(&rdata->rp_mutex);
655
656 FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n",
657 rport->port_id);
658
659 if (IS_ERR(fp)) {
660 fc_rport_error(rport, fp);
661 goto err;
662 }
663
664 if (rdata->rp_state != RPORT_ST_LOGO) {
665 FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n",
666 fc_rport_state(rport));
667 goto out;
668 }
669
670 op = fc_frame_payload_op(fp);
671 if (op == ELS_LS_ACC) {
672 fc_rport_enter_rtv(rport);
673 } else {
674 FC_DBG("Bad ELS response\n");
675 rdata->event = RPORT_EV_LOGO;
676 queue_work(rport_event_queue, &rdata->event_work);
677 }
678
679out:
680 fc_frame_free(fp);
681err:
682 mutex_unlock(&rdata->rp_mutex);
683 put_device(&rport->dev);
684}
685
686/**
687 * fc_rport_enter_prli - Send Process Login (PRLI) request to peer
688 * @rport: Fibre Channel remote port to send PRLI to
689 *
690 * Locking Note: The rport lock is expected to be held before calling
691 * this routine.
692 */
693static void fc_rport_enter_prli(struct fc_rport *rport)
694{
695 struct fc_rport_libfc_priv *rdata = rport->dd_data;
696 struct fc_lport *lport = rdata->local_port;
697 struct {
698 struct fc_els_prli prli;
699 struct fc_els_spp spp;
700 } *pp;
701 struct fc_frame *fp;
702
703 FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n",
704 rport->port_id, fc_rport_state(rport));
705
706 fc_rport_state_enter(rport, RPORT_ST_PRLI);
707
708 fp = fc_frame_alloc(lport, sizeof(*pp));
709 if (!fp) {
710 fc_rport_error(rport, fp);
711 return;
712 }
713
714 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
715 fc_rport_prli_resp, rport, lport->e_d_tov))
716 fc_rport_error(rport, fp);
717 else
718 get_device(&rport->dev);
719}
720
721/**
722 * fc_rport_els_rtv_resp - Request Timeout Value response handler
723 * @sp: current sequence in the RTV exchange
724 * @fp: response frame
725 * @rp_arg: Fibre Channel remote port
726 *
727 * Many targets don't seem to support this.
728 *
729 * Locking Note: This function will be called without the rport lock
730 * held, but it will lock, call an _enter_* function or fc_rport_error
731 * and then unlock the rport.
732 */
733static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
734 void *rp_arg)
735{
736 struct fc_rport *rport = rp_arg;
737 struct fc_rport_libfc_priv *rdata = rport->dd_data;
738 u8 op;
739
740 mutex_lock(&rdata->rp_mutex);
741
742 FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n",
743 rport->port_id);
744
745 if (rdata->rp_state != RPORT_ST_RTV) {
746 FC_DBG("Received a RTV response, but in state %s\n",
747 fc_rport_state(rport));
748 goto out;
749 }
750
751 if (IS_ERR(fp)) {
752 fc_rport_error(rport, fp);
753 goto err;
754 }
755
756 op = fc_frame_payload_op(fp);
757 if (op == ELS_LS_ACC) {
758 struct fc_els_rtv_acc *rtv;
759 u32 toq;
760 u32 tov;
761
762 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
763 if (rtv) {
764 toq = ntohl(rtv->rtv_toq);
765 tov = ntohl(rtv->rtv_r_a_tov);
766 if (tov == 0)
767 tov = 1;
768 rdata->r_a_tov = tov;
769 tov = ntohl(rtv->rtv_e_d_tov);
770 if (toq & FC_ELS_RTV_EDRES)
771 tov /= 1000000;
772 if (tov == 0)
773 tov = 1;
774 rdata->e_d_tov = tov;
775 }
776 }
777
778 fc_rport_enter_ready(rport);
779
780out:
781 fc_frame_free(fp);
782err:
783 mutex_unlock(&rdata->rp_mutex);
784 put_device(&rport->dev);
785}
786
787/**
788 * fc_rport_enter_rtv - Send Request Timeout Value (RTV) request to peer
789 * @rport: Fibre Channel remote port to send RTV to
790 *
791 * Locking Note: The rport lock is expected to be held before calling
792 * this routine.
793 */
794static void fc_rport_enter_rtv(struct fc_rport *rport)
795{
796 struct fc_frame *fp;
797 struct fc_rport_libfc_priv *rdata = rport->dd_data;
798 struct fc_lport *lport = rdata->local_port;
799
800 FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n",
801 rport->port_id, fc_rport_state(rport));
802
803 fc_rport_state_enter(rport, RPORT_ST_RTV);
804
805 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
806 if (!fp) {
807 fc_rport_error(rport, fp);
808 return;
809 }
810
811 if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
812 fc_rport_rtv_resp, rport, lport->e_d_tov))
813 fc_rport_error(rport, fp);
814 else
815 get_device(&rport->dev);
816}
817
818/**
819 * fc_rport_enter_logo - Send Logout (LOGO) request to peer
820 * @rport: Fibre Channel remote port to send LOGO to
821 *
822 * Locking Note: The rport lock is expected to be held before calling
823 * this routine.
824 */
825static void fc_rport_enter_logo(struct fc_rport *rport)
826{
827 struct fc_rport_libfc_priv *rdata = rport->dd_data;
828 struct fc_lport *lport = rdata->local_port;
829 struct fc_frame *fp;
830
831 FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n",
832 rport->port_id, fc_rport_state(rport));
833
834 fc_rport_state_enter(rport, RPORT_ST_LOGO);
835
836 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
837 if (!fp) {
838 fc_rport_error(rport, fp);
839 return;
840 }
841
842 if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
843 fc_rport_logo_resp, rport, lport->e_d_tov))
844 fc_rport_error(rport, fp);
845 else
846 get_device(&rport->dev);
847}
848
849
850/**
851 * fc_rport_recv_req - Receive a request from a rport
852 * @sp: current sequence in the PLOGI exchange
853 * @fp: response frame
854 * @rp_arg: Fibre Channel remote port
855 *
856 * Locking Note: Called without the rport lock held. This
857 * function will hold the rport lock, call an _enter_*
858 * function and then unlock the rport.
859 */
860void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
861 struct fc_rport *rport)
862{
863 struct fc_rport_libfc_priv *rdata = rport->dd_data;
864 struct fc_lport *lport = rdata->local_port;
865
866 struct fc_frame_header *fh;
867 struct fc_seq_els_data els_data;
868 u8 op;
869
870 mutex_lock(&rdata->rp_mutex);
871
872 els_data.fp = NULL;
873 els_data.explan = ELS_EXPL_NONE;
874 els_data.reason = ELS_RJT_NONE;
875
876 fh = fc_frame_header_get(fp);
877
878 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
879 op = fc_frame_payload_op(fp);
880 switch (op) {
881 case ELS_PLOGI:
882 fc_rport_recv_plogi_req(rport, sp, fp);
883 break;
884 case ELS_PRLI:
885 fc_rport_recv_prli_req(rport, sp, fp);
886 break;
887 case ELS_PRLO:
888 fc_rport_recv_prlo_req(rport, sp, fp);
889 break;
890 case ELS_LOGO:
891 fc_rport_recv_logo_req(rport, sp, fp);
892 break;
893 case ELS_RRQ:
894 els_data.fp = fp;
895 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
896 break;
897 case ELS_REC:
898 els_data.fp = fp;
899 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
900 break;
901 default:
902 els_data.reason = ELS_RJT_UNSUP;
903 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
904 break;
905 }
906 }
907
908 mutex_unlock(&rdata->rp_mutex);
909}
910
911/**
912 * fc_rport_recv_plogi_req - Handle incoming Port Login (PLOGI) request
913 * @rport: Fibre Channel remote port that initiated PLOGI
914 * @sp: current sequence in the PLOGI exchange
915 * @fp: PLOGI request frame
916 *
917 * Locking Note: The rport lock is exected to be held before calling
918 * this function.
919 */
920static void fc_rport_recv_plogi_req(struct fc_rport *rport,
921 struct fc_seq *sp, struct fc_frame *rx_fp)
922{
923 struct fc_rport_libfc_priv *rdata = rport->dd_data;
924 struct fc_lport *lport = rdata->local_port;
925 struct fc_frame *fp = rx_fp;
926 struct fc_exch *ep;
927 struct fc_frame_header *fh;
928 struct fc_els_flogi *pl;
929 struct fc_seq_els_data rjt_data;
930 u32 sid;
931 u64 wwpn;
932 u64 wwnn;
933 enum fc_els_rjt_reason reject = 0;
934 u32 f_ctl;
935 rjt_data.fp = NULL;
936
937 fh = fc_frame_header_get(fp);
938
939 FC_DEBUG_RPORT("Received PLOGI request from port (%6x) "
940 "while in state %s\n", ntoh24(fh->fh_s_id),
941 fc_rport_state(rport));
942
943 sid = ntoh24(fh->fh_s_id);
944 pl = fc_frame_payload_get(fp, sizeof(*pl));
945 if (!pl) {
946 FC_DBG("incoming PLOGI from %x too short\n", sid);
947 WARN_ON(1);
948 /* XXX TBD: send reject? */
949 fc_frame_free(fp);
950 return;
951 }
952 wwpn = get_unaligned_be64(&pl->fl_wwpn);
953 wwnn = get_unaligned_be64(&pl->fl_wwnn);
954
955 /*
956 * If the session was just created, possibly due to the incoming PLOGI,
957 * set the state appropriately and accept the PLOGI.
958 *
959 * If we had also sent a PLOGI, and if the received PLOGI is from a
960 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
961 * "command already in progress".
962 *
963 * XXX TBD: If the session was ready before, the PLOGI should result in
964 * all outstanding exchanges being reset.
965 */
966 switch (rdata->rp_state) {
967 case RPORT_ST_INIT:
968 FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT "
969 "- reject\n", sid, wwpn);
970 reject = ELS_RJT_UNSUP;
971 break;
972 case RPORT_ST_PLOGI:
973 FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n",
974 sid, rdata->rp_state);
975 if (wwpn < lport->wwpn)
976 reject = ELS_RJT_INPROG;
977 break;
978 case RPORT_ST_PRLI:
979 case RPORT_ST_READY:
980 FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d "
981 "- ignored for now\n", sid, rdata->rp_state);
982 /* XXX TBD - should reset */
983 break;
984 case RPORT_ST_NONE:
985 default:
986 FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected "
987 "state %d\n", sid, rdata->rp_state);
988 break;
989 }
990
991 if (reject) {
992 rjt_data.reason = reject;
993 rjt_data.explan = ELS_EXPL_NONE;
994 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
995 fc_frame_free(fp);
996 } else {
997 fp = fc_frame_alloc(lport, sizeof(*pl));
998 if (fp == NULL) {
999 fp = rx_fp;
1000 rjt_data.reason = ELS_RJT_UNAB;
1001 rjt_data.explan = ELS_EXPL_NONE;
1002 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1003 fc_frame_free(fp);
1004 } else {
1005 sp = lport->tt.seq_start_next(sp);
1006 WARN_ON(!sp);
1007 fc_rport_set_name(rport, wwpn, wwnn);
1008
1009 /*
1010 * Get session payload size from incoming PLOGI.
1011 */
1012 rport->maxframe_size =
1013 fc_plogi_get_maxframe(pl, lport->mfs);
1014 fc_frame_free(rx_fp);
1015 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1016
1017 /*
1018 * Send LS_ACC. If this fails,
1019 * the originator should retry.
1020 */
1021 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1022 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1023 ep = fc_seq_exch(sp);
1024 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1025 FC_TYPE_ELS, f_ctl, 0);
1026 lport->tt.seq_send(lport, sp, fp);
1027 if (rdata->rp_state == RPORT_ST_PLOGI)
1028 fc_rport_enter_prli(rport);
1029 }
1030 }
1031}
1032
1033/**
1034 * fc_rport_recv_prli_req - Handle incoming Process Login (PRLI) request
1035 * @rport: Fibre Channel remote port that initiated PRLI
1036 * @sp: current sequence in the PRLI exchange
1037 * @fp: PRLI request frame
1038 *
1039 * Locking Note: The rport lock is exected to be held before calling
1040 * this function.
1041 */
1042static void fc_rport_recv_prli_req(struct fc_rport *rport,
1043 struct fc_seq *sp, struct fc_frame *rx_fp)
1044{
1045 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1046 struct fc_lport *lport = rdata->local_port;
1047 struct fc_exch *ep;
1048 struct fc_frame *fp;
1049 struct fc_frame_header *fh;
1050 struct {
1051 struct fc_els_prli prli;
1052 struct fc_els_spp spp;
1053 } *pp;
1054 struct fc_els_spp *rspp; /* request service param page */
1055 struct fc_els_spp *spp; /* response spp */
1056 unsigned int len;
1057 unsigned int plen;
1058 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1059 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1060 enum fc_els_spp_resp resp;
1061 struct fc_seq_els_data rjt_data;
1062 u32 f_ctl;
1063 u32 fcp_parm;
1064 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1065 rjt_data.fp = NULL;
1066
1067 fh = fc_frame_header_get(rx_fp);
1068
1069 FC_DEBUG_RPORT("Received PRLI request from port (%6x) "
1070 "while in state %s\n", ntoh24(fh->fh_s_id),
1071 fc_rport_state(rport));
1072
1073 switch (rdata->rp_state) {
1074 case RPORT_ST_PRLI:
1075 case RPORT_ST_READY:
1076 reason = ELS_RJT_NONE;
1077 break;
1078 default:
1079 break;
1080 }
1081 len = fr_len(rx_fp) - sizeof(*fh);
1082 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1083 if (pp == NULL) {
1084 reason = ELS_RJT_PROT;
1085 explan = ELS_EXPL_INV_LEN;
1086 } else {
1087 plen = ntohs(pp->prli.prli_len);
1088 if ((plen % 4) != 0 || plen > len) {
1089 reason = ELS_RJT_PROT;
1090 explan = ELS_EXPL_INV_LEN;
1091 } else if (plen < len) {
1092 len = plen;
1093 }
1094 plen = pp->prli.prli_spp_len;
1095 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1096 plen > len || len < sizeof(*pp)) {
1097 reason = ELS_RJT_PROT;
1098 explan = ELS_EXPL_INV_LEN;
1099 }
1100 rspp = &pp->spp;
1101 }
1102 if (reason != ELS_RJT_NONE ||
1103 (fp = fc_frame_alloc(lport, len)) == NULL) {
1104 rjt_data.reason = reason;
1105 rjt_data.explan = explan;
1106 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1107 } else {
1108 sp = lport->tt.seq_start_next(sp);
1109 WARN_ON(!sp);
1110 pp = fc_frame_payload_get(fp, len);
1111 WARN_ON(!pp);
1112 memset(pp, 0, len);
1113 pp->prli.prli_cmd = ELS_LS_ACC;
1114 pp->prli.prli_spp_len = plen;
1115 pp->prli.prli_len = htons(len);
1116 len -= sizeof(struct fc_els_prli);
1117
1118 /*
1119 * Go through all the service parameter pages and build
1120 * response. If plen indicates longer SPP than standard,
1121 * use that. The entire response has been pre-cleared above.
1122 */
1123 spp = &pp->spp;
1124 while (len >= plen) {
1125 spp->spp_type = rspp->spp_type;
1126 spp->spp_type_ext = rspp->spp_type_ext;
1127 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1128 resp = FC_SPP_RESP_ACK;
1129 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1130 resp = FC_SPP_RESP_NO_PA;
1131 switch (rspp->spp_type) {
1132 case 0: /* common to all FC-4 types */
1133 break;
1134 case FC_TYPE_FCP:
1135 fcp_parm = ntohl(rspp->spp_params);
1136 if (fcp_parm * FCP_SPPF_RETRY)
1137 rdata->flags |= FC_RP_FLAGS_RETRY;
1138 rport->supported_classes = FC_COS_CLASS3;
1139 if (fcp_parm & FCP_SPPF_INIT_FCN)
1140 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1141 if (fcp_parm & FCP_SPPF_TARG_FCN)
1142 roles |= FC_RPORT_ROLE_FCP_TARGET;
1143 rport->roles = roles;
1144
1145 spp->spp_params =
1146 htonl(lport->service_params);
1147 break;
1148 default:
1149 resp = FC_SPP_RESP_INVL;
1150 break;
1151 }
1152 spp->spp_flags |= resp;
1153 len -= plen;
1154 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1155 spp = (struct fc_els_spp *)((char *)spp + plen);
1156 }
1157
1158 /*
1159 * Send LS_ACC. If this fails, the originator should retry.
1160 */
1161 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1162 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1163 ep = fc_seq_exch(sp);
1164 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1165 FC_TYPE_ELS, f_ctl, 0);
1166 lport->tt.seq_send(lport, sp, fp);
1167
1168 /*
1169 * Get lock and re-check state.
1170 */
1171 switch (rdata->rp_state) {
1172 case RPORT_ST_PRLI:
1173 fc_rport_enter_ready(rport);
1174 break;
1175 case RPORT_ST_READY:
1176 break;
1177 default:
1178 break;
1179 }
1180 }
1181 fc_frame_free(rx_fp);
1182}
1183
1184/**
1185 * fc_rport_recv_prlo_req - Handle incoming Process Logout (PRLO) request
1186 * @rport: Fibre Channel remote port that initiated PRLO
1187 * @sp: current sequence in the PRLO exchange
1188 * @fp: PRLO request frame
1189 *
1190 * Locking Note: The rport lock is exected to be held before calling
1191 * this function.
1192 */
1193static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
1194 struct fc_frame *fp)
1195{
1196 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1197 struct fc_lport *lport = rdata->local_port;
1198
1199 struct fc_frame_header *fh;
1200 struct fc_seq_els_data rjt_data;
1201
1202 fh = fc_frame_header_get(fp);
1203
1204 FC_DEBUG_RPORT("Received PRLO request from port (%6x) "
1205 "while in state %s\n", ntoh24(fh->fh_s_id),
1206 fc_rport_state(rport));
1207
1208 rjt_data.fp = NULL;
1209 rjt_data.reason = ELS_RJT_UNAB;
1210 rjt_data.explan = ELS_EXPL_NONE;
1211 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1212 fc_frame_free(fp);
1213}
1214
1215/**
1216 * fc_rport_recv_logo_req - Handle incoming Logout (LOGO) request
1217 * @rport: Fibre Channel remote port that initiated LOGO
1218 * @sp: current sequence in the LOGO exchange
1219 * @fp: LOGO request frame
1220 *
1221 * Locking Note: The rport lock is exected to be held before calling
1222 * this function.
1223 */
1224static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
1225 struct fc_frame *fp)
1226{
1227 struct fc_frame_header *fh;
1228 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1229 struct fc_lport *lport = rdata->local_port;
1230
1231 fh = fc_frame_header_get(fp);
1232
1233 FC_DEBUG_RPORT("Received LOGO request from port (%6x) "
1234 "while in state %s\n", ntoh24(fh->fh_s_id),
1235 fc_rport_state(rport));
1236
1237 rdata->event = RPORT_EV_LOGO;
1238 queue_work(rport_event_queue, &rdata->event_work);
1239
1240 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1241 fc_frame_free(fp);
1242}
1243
1244static void fc_rport_flush_queue(void)
1245{
1246 flush_workqueue(rport_event_queue);
1247}
1248
1249
1250int fc_rport_init(struct fc_lport *lport)
1251{
1252 if (!lport->tt.rport_login)
1253 lport->tt.rport_login = fc_rport_login;
1254
1255 if (!lport->tt.rport_logoff)
1256 lport->tt.rport_logoff = fc_rport_logoff;
1257
1258 if (!lport->tt.rport_recv_req)
1259 lport->tt.rport_recv_req = fc_rport_recv_req;
1260
1261 if (!lport->tt.rport_flush_queue)
1262 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1263
1264 return 0;
1265}
1266EXPORT_SYMBOL(fc_rport_init);
1267
1268int fc_setup_rport()
1269{
1270 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1271 if (!rport_event_queue)
1272 return -ENOMEM;
1273 return 0;
1274}
1275EXPORT_SYMBOL(fc_setup_rport);
1276
1277void fc_destroy_rport()
1278{
1279 destroy_workqueue(rport_event_queue);
1280}
1281EXPORT_SYMBOL(fc_destroy_rport);
1282
1283void fc_rport_terminate_io(struct fc_rport *rport)
1284{
1285 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1286 struct fc_lport *lport = rdata->local_port;
1287
1288 lport->tt.exch_mgr_reset(lport->emp, 0, rport->port_id);
1289 lport->tt.exch_mgr_reset(lport->emp, rport->port_id, 0);
1290}
1291EXPORT_SYMBOL(fc_rport_terminate_io);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 3fdee7370ccc..7225b6e2029e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -88,34 +88,47 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
88} 88}
89EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 89EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
90 90
91void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task, 91/**
92 struct iscsi_data *hdr) 92 * iscsi_prep_data_out_pdu - initialize Data-Out
93 * @task: scsi command task
94 * @r2t: R2T info
95 * @hdr: iscsi data in pdu
96 *
97 * Notes:
98 * Initialize Data-Out within this R2T sequence and finds
99 * proper data_offset within this SCSI command.
100 *
101 * This function is called with connection lock taken.
102 **/
103void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
104 struct iscsi_data *hdr)
93{ 105{
94 struct iscsi_conn *conn = task->conn; 106 struct iscsi_conn *conn = task->conn;
107 unsigned int left = r2t->data_length - r2t->sent;
108
109 task->hdr_len = sizeof(struct iscsi_data);
95 110
96 memset(hdr, 0, sizeof(struct iscsi_data)); 111 memset(hdr, 0, sizeof(struct iscsi_data));
97 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); 112 hdr->ttt = r2t->ttt;
98 hdr->datasn = cpu_to_be32(task->unsol_datasn); 113 hdr->datasn = cpu_to_be32(r2t->datasn);
99 task->unsol_datasn++; 114 r2t->datasn++;
100 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 115 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
101 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); 116 memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
102 117 hdr->itt = task->hdr_itt;
103 hdr->itt = task->hdr->itt; 118 hdr->exp_statsn = r2t->exp_statsn;
104 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 119 hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
105 hdr->offset = cpu_to_be32(task->unsol_offset); 120 if (left > conn->max_xmit_dlength) {
106
107 if (task->unsol_count > conn->max_xmit_dlength) {
108 hton24(hdr->dlength, conn->max_xmit_dlength); 121 hton24(hdr->dlength, conn->max_xmit_dlength);
109 task->data_count = conn->max_xmit_dlength; 122 r2t->data_count = conn->max_xmit_dlength;
110 task->unsol_offset += task->data_count;
111 hdr->flags = 0; 123 hdr->flags = 0;
112 } else { 124 } else {
113 hton24(hdr->dlength, task->unsol_count); 125 hton24(hdr->dlength, left);
114 task->data_count = task->unsol_count; 126 r2t->data_count = left;
115 hdr->flags = ISCSI_FLAG_CMD_FINAL; 127 hdr->flags = ISCSI_FLAG_CMD_FINAL;
116 } 128 }
129 conn->dataout_pdus_cnt++;
117} 130}
118EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu); 131EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu);
119 132
120static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) 133static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
121{ 134{
@@ -206,11 +219,24 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
206{ 219{
207 struct iscsi_conn *conn = task->conn; 220 struct iscsi_conn *conn = task->conn;
208 struct iscsi_session *session = conn->session; 221 struct iscsi_session *session = conn->session;
209 struct iscsi_cmd *hdr = task->hdr;
210 struct scsi_cmnd *sc = task->sc; 222 struct scsi_cmnd *sc = task->sc;
223 struct iscsi_cmd *hdr;
211 unsigned hdrlength, cmd_len; 224 unsigned hdrlength, cmd_len;
225 itt_t itt;
212 int rc; 226 int rc;
213 227
228 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
229 if (rc)
230 return rc;
231 hdr = (struct iscsi_cmd *) task->hdr;
232 itt = hdr->itt;
233 memset(hdr, 0, sizeof(*hdr));
234
235 if (session->tt->parse_pdu_itt)
236 hdr->itt = task->hdr_itt = itt;
237 else
238 hdr->itt = task->hdr_itt = build_itt(task->itt,
239 task->conn->session->age);
214 task->hdr_len = 0; 240 task->hdr_len = 0;
215 rc = iscsi_add_hdr(task, sizeof(*hdr)); 241 rc = iscsi_add_hdr(task, sizeof(*hdr));
216 if (rc) 242 if (rc)
@@ -218,8 +244,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
218 hdr->opcode = ISCSI_OP_SCSI_CMD; 244 hdr->opcode = ISCSI_OP_SCSI_CMD;
219 hdr->flags = ISCSI_ATTR_SIMPLE; 245 hdr->flags = ISCSI_ATTR_SIMPLE;
220 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 246 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
221 hdr->itt = build_itt(task->itt, session->age); 247 memcpy(task->lun, hdr->lun, sizeof(task->lun));
222 hdr->cmdsn = cpu_to_be32(session->cmdsn); 248 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
223 session->cmdsn++; 249 session->cmdsn++;
224 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 250 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
225 cmd_len = sc->cmd_len; 251 cmd_len = sc->cmd_len;
@@ -242,6 +268,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
242 } 268 }
243 if (sc->sc_data_direction == DMA_TO_DEVICE) { 269 if (sc->sc_data_direction == DMA_TO_DEVICE) {
244 unsigned out_len = scsi_out(sc)->length; 270 unsigned out_len = scsi_out(sc)->length;
271 struct iscsi_r2t_info *r2t = &task->unsol_r2t;
272
245 hdr->data_length = cpu_to_be32(out_len); 273 hdr->data_length = cpu_to_be32(out_len);
246 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 274 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
247 /* 275 /*
@@ -254,13 +282,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
254 * without R2T ack right after 282 * without R2T ack right after
255 * immediate data 283 * immediate data
256 * 284 *
257 * r2t_data_count bytes to be sent via R2T ack's 285 * r2t data_length bytes to be sent via R2T ack's
258 * 286 *
259 * pad_count bytes to be sent as zero-padding 287 * pad_count bytes to be sent as zero-padding
260 */ 288 */
261 task->unsol_count = 0; 289 memset(r2t, 0, sizeof(*r2t));
262 task->unsol_offset = 0;
263 task->unsol_datasn = 0;
264 290
265 if (session->imm_data_en) { 291 if (session->imm_data_en) {
266 if (out_len >= session->first_burst) 292 if (out_len >= session->first_burst)
@@ -274,12 +300,14 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
274 zero_data(hdr->dlength); 300 zero_data(hdr->dlength);
275 301
276 if (!session->initial_r2t_en) { 302 if (!session->initial_r2t_en) {
277 task->unsol_count = min(session->first_burst, out_len) 303 r2t->data_length = min(session->first_burst, out_len) -
278 - task->imm_count; 304 task->imm_count;
279 task->unsol_offset = task->imm_count; 305 r2t->data_offset = task->imm_count;
306 r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
307 r2t->exp_statsn = cpu_to_be32(conn->exp_statsn);
280 } 308 }
281 309
282 if (!task->unsol_count) 310 if (!task->unsol_r2t.data_length)
283 /* No unsolicit Data-Out's */ 311 /* No unsolicit Data-Out's */
284 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 312 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
285 } else { 313 } else {
@@ -300,8 +328,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
300 WARN_ON(hdrlength >= 256); 328 WARN_ON(hdrlength >= 256);
301 hdr->hlength = hdrlength & 0xFF; 329 hdr->hlength = hdrlength & 0xFF;
302 330
303 if (conn->session->tt->init_task && 331 if (session->tt->init_task && session->tt->init_task(task))
304 conn->session->tt->init_task(task))
305 return -EIO; 332 return -EIO;
306 333
307 task->state = ISCSI_TASK_RUNNING; 334 task->state = ISCSI_TASK_RUNNING;
@@ -332,6 +359,7 @@ static void iscsi_complete_command(struct iscsi_task *task)
332 struct iscsi_session *session = conn->session; 359 struct iscsi_session *session = conn->session;
333 struct scsi_cmnd *sc = task->sc; 360 struct scsi_cmnd *sc = task->sc;
334 361
362 session->tt->cleanup_task(task);
335 list_del_init(&task->running); 363 list_del_init(&task->running);
336 task->state = ISCSI_TASK_COMPLETED; 364 task->state = ISCSI_TASK_COMPLETED;
337 task->sc = NULL; 365 task->sc = NULL;
@@ -402,8 +430,6 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
402 * the cmd in the sequencing 430 * the cmd in the sequencing
403 */ 431 */
404 conn->session->queued_cmdsn--; 432 conn->session->queued_cmdsn--;
405 else
406 conn->session->tt->cleanup_task(conn, task);
407 433
408 sc->result = err; 434 sc->result = err;
409 if (!scsi_bidi_cmnd(sc)) 435 if (!scsi_bidi_cmnd(sc))
@@ -423,7 +449,7 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
423 struct iscsi_task *task) 449 struct iscsi_task *task)
424{ 450{
425 struct iscsi_session *session = conn->session; 451 struct iscsi_session *session = conn->session;
426 struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr; 452 struct iscsi_hdr *hdr = task->hdr;
427 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; 453 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
428 454
429 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 455 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
@@ -437,7 +463,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
437 */ 463 */
438 nop->cmdsn = cpu_to_be32(session->cmdsn); 464 nop->cmdsn = cpu_to_be32(session->cmdsn);
439 if (hdr->itt != RESERVED_ITT) { 465 if (hdr->itt != RESERVED_ITT) {
440 hdr->itt = build_itt(task->itt, session->age);
441 /* 466 /*
442 * TODO: We always use immediate, so we never hit this. 467 * TODO: We always use immediate, so we never hit this.
443 * If we start to send tmfs or nops as non-immediate then 468 * If we start to send tmfs or nops as non-immediate then
@@ -450,12 +475,13 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
450 } 475 }
451 } 476 }
452 477
453 if (session->tt->init_task) 478 if (session->tt->init_task && session->tt->init_task(task))
454 session->tt->init_task(task); 479 return -EIO;
455 480
456 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 481 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
457 session->state = ISCSI_STATE_LOGGING_OUT; 482 session->state = ISCSI_STATE_LOGGING_OUT;
458 483
484 task->state = ISCSI_TASK_RUNNING;
459 list_move_tail(&task->running, &conn->mgmt_run_list); 485 list_move_tail(&task->running, &conn->mgmt_run_list);
460 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n", 486 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
461 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, 487 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
@@ -469,6 +495,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
469{ 495{
470 struct iscsi_session *session = conn->session; 496 struct iscsi_session *session = conn->session;
471 struct iscsi_task *task; 497 struct iscsi_task *task;
498 itt_t itt;
472 499
473 if (session->state == ISCSI_STATE_TERMINATE) 500 if (session->state == ISCSI_STATE_TERMINATE)
474 return NULL; 501 return NULL;
@@ -505,23 +532,47 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
505 } else 532 } else
506 task->data_count = 0; 533 task->data_count = 0;
507 534
535 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
536 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
537 "pdu for mgmt task.\n");
538 goto requeue_task;
539 }
540 itt = task->hdr->itt;
541 task->hdr_len = sizeof(struct iscsi_hdr);
508 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); 542 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
543
544 if (hdr->itt != RESERVED_ITT) {
545 if (session->tt->parse_pdu_itt)
546 task->hdr->itt = itt;
547 else
548 task->hdr->itt = build_itt(task->itt,
549 task->conn->session->age);
550 }
551
509 INIT_LIST_HEAD(&task->running); 552 INIT_LIST_HEAD(&task->running);
510 list_add_tail(&task->running, &conn->mgmtqueue); 553 list_add_tail(&task->running, &conn->mgmtqueue);
511 554
512 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 555 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
513 if (iscsi_prep_mgmt_task(conn, task)) { 556 if (iscsi_prep_mgmt_task(conn, task))
514 __iscsi_put_task(task); 557 goto free_task;
515 return NULL;
516 }
517 558
518 if (session->tt->xmit_task(task)) 559 if (session->tt->xmit_task(task))
519 task = NULL; 560 goto free_task;
520 561
521 } else 562 } else
522 scsi_queue_work(conn->session->host, &conn->xmitwork); 563 scsi_queue_work(conn->session->host, &conn->xmitwork);
523 564
524 return task; 565 return task;
566
567free_task:
568 __iscsi_put_task(task);
569 return NULL;
570
571requeue_task:
572 if (task != conn->login_task)
573 __kfifo_put(session->cmdpool.queue, (void*)&task,
574 sizeof(void*));
575 return NULL;
525} 576}
526 577
527int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 578int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -709,7 +760,6 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
709{ 760{
710 struct iscsi_reject *reject = (struct iscsi_reject *)hdr; 761 struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
711 struct iscsi_hdr rejected_pdu; 762 struct iscsi_hdr rejected_pdu;
712 uint32_t itt;
713 763
714 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1; 764 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
715 765
@@ -719,10 +769,9 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
719 769
720 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { 770 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
721 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); 771 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
722 itt = get_itt(rejected_pdu.itt);
723 iscsi_conn_printk(KERN_ERR, conn, 772 iscsi_conn_printk(KERN_ERR, conn,
724 "itt 0x%x had pdu (op 0x%x) rejected " 773 "pdu (op 0x%x) rejected "
725 "due to DataDigest error.\n", itt, 774 "due to DataDigest error.\n",
726 rejected_pdu.opcode); 775 rejected_pdu.opcode);
727 } 776 }
728 } 777 }
@@ -742,12 +791,15 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
742static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) 791static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
743{ 792{
744 struct iscsi_session *session = conn->session; 793 struct iscsi_session *session = conn->session;
745 uint32_t i; 794 int i;
746 795
747 if (itt == RESERVED_ITT) 796 if (itt == RESERVED_ITT)
748 return NULL; 797 return NULL;
749 798
750 i = get_itt(itt); 799 if (session->tt->parse_pdu_itt)
800 session->tt->parse_pdu_itt(conn, itt, &i, NULL);
801 else
802 i = get_itt(itt);
751 if (i >= session->cmds_max) 803 if (i >= session->cmds_max)
752 return NULL; 804 return NULL;
753 805
@@ -922,20 +974,25 @@ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
922int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt) 974int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
923{ 975{
924 struct iscsi_session *session = conn->session; 976 struct iscsi_session *session = conn->session;
925 uint32_t i; 977 int age = 0, i = 0;
926 978
927 if (itt == RESERVED_ITT) 979 if (itt == RESERVED_ITT)
928 return 0; 980 return 0;
929 981
930 if (((__force u32)itt & ISCSI_AGE_MASK) != 982 if (session->tt->parse_pdu_itt)
931 (session->age << ISCSI_AGE_SHIFT)) { 983 session->tt->parse_pdu_itt(conn, itt, &i, &age);
984 else {
985 i = get_itt(itt);
986 age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK;
987 }
988
989 if (age != session->age) {
932 iscsi_conn_printk(KERN_ERR, conn, 990 iscsi_conn_printk(KERN_ERR, conn,
933 "received itt %x expected session age (%x)\n", 991 "received itt %x expected session age (%x)\n",
934 (__force u32)itt, session->age); 992 (__force u32)itt, session->age);
935 return ISCSI_ERR_BAD_ITT; 993 return ISCSI_ERR_BAD_ITT;
936 } 994 }
937 995
938 i = get_itt(itt);
939 if (i >= session->cmds_max) { 996 if (i >= session->cmds_max) {
940 iscsi_conn_printk(KERN_ERR, conn, 997 iscsi_conn_printk(KERN_ERR, conn,
941 "received invalid itt index %u (max cmds " 998 "received invalid itt index %u (max cmds "
@@ -1136,8 +1193,13 @@ check_mgmt:
1136 fail_command(conn, conn->task, DID_IMM_RETRY << 16); 1193 fail_command(conn, conn->task, DID_IMM_RETRY << 16);
1137 continue; 1194 continue;
1138 } 1195 }
1139 if (iscsi_prep_scsi_cmd_pdu(conn->task)) { 1196 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
1140 fail_command(conn, conn->task, DID_ABORT << 16); 1197 if (rc) {
1198 if (rc == -ENOMEM) {
1199 conn->task = NULL;
1200 goto again;
1201 } else
1202 fail_command(conn, conn->task, DID_ABORT << 16);
1141 continue; 1203 continue;
1142 } 1204 }
1143 rc = iscsi_xmit_task(conn); 1205 rc = iscsi_xmit_task(conn);
@@ -1195,6 +1257,26 @@ static void iscsi_xmitworker(struct work_struct *work)
1195 } while (rc >= 0 || rc == -EAGAIN); 1257 } while (rc >= 0 || rc == -EAGAIN);
1196} 1258}
1197 1259
1260static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
1261 struct scsi_cmnd *sc)
1262{
1263 struct iscsi_task *task;
1264
1265 if (!__kfifo_get(conn->session->cmdpool.queue,
1266 (void *) &task, sizeof(void *)))
1267 return NULL;
1268
1269 sc->SCp.phase = conn->session->age;
1270 sc->SCp.ptr = (char *) task;
1271
1272 atomic_set(&task->refcount, 1);
1273 task->state = ISCSI_TASK_PENDING;
1274 task->conn = conn;
1275 task->sc = sc;
1276 INIT_LIST_HEAD(&task->running);
1277 return task;
1278}
1279
1198enum { 1280enum {
1199 FAILURE_BAD_HOST = 1, 1281 FAILURE_BAD_HOST = 1,
1200 FAILURE_SESSION_FAILED, 1282 FAILURE_SESSION_FAILED,
@@ -1281,33 +1363,27 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1281 goto reject; 1363 goto reject;
1282 } 1364 }
1283 1365
1284 if (!__kfifo_get(session->cmdpool.queue, (void*)&task, 1366 task = iscsi_alloc_task(conn, sc);
1285 sizeof(void*))) { 1367 if (!task) {
1286 reason = FAILURE_OOM; 1368 reason = FAILURE_OOM;
1287 goto reject; 1369 goto reject;
1288 } 1370 }
1289 sc->SCp.phase = session->age;
1290 sc->SCp.ptr = (char *)task;
1291
1292 atomic_set(&task->refcount, 1);
1293 task->state = ISCSI_TASK_PENDING;
1294 task->conn = conn;
1295 task->sc = sc;
1296 INIT_LIST_HEAD(&task->running);
1297 list_add_tail(&task->running, &conn->xmitqueue); 1371 list_add_tail(&task->running, &conn->xmitqueue);
1298 1372
1299 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 1373 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
1300 if (iscsi_prep_scsi_cmd_pdu(task)) { 1374 reason = iscsi_prep_scsi_cmd_pdu(task);
1301 sc->result = DID_ABORT << 16; 1375 if (reason) {
1302 sc->scsi_done = NULL; 1376 if (reason == -ENOMEM) {
1303 iscsi_complete_command(task); 1377 reason = FAILURE_OOM;
1304 goto fault; 1378 goto prepd_reject;
1379 } else {
1380 sc->result = DID_ABORT << 16;
1381 goto prepd_fault;
1382 }
1305 } 1383 }
1306 if (session->tt->xmit_task(task)) { 1384 if (session->tt->xmit_task(task)) {
1307 sc->scsi_done = NULL;
1308 iscsi_complete_command(task);
1309 reason = FAILURE_SESSION_NOT_READY; 1385 reason = FAILURE_SESSION_NOT_READY;
1310 goto reject; 1386 goto prepd_reject;
1311 } 1387 }
1312 } else 1388 } else
1313 scsi_queue_work(session->host, &conn->xmitwork); 1389 scsi_queue_work(session->host, &conn->xmitwork);
@@ -1317,12 +1393,18 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1317 spin_lock(host->host_lock); 1393 spin_lock(host->host_lock);
1318 return 0; 1394 return 0;
1319 1395
1396prepd_reject:
1397 sc->scsi_done = NULL;
1398 iscsi_complete_command(task);
1320reject: 1399reject:
1321 spin_unlock(&session->lock); 1400 spin_unlock(&session->lock);
1322 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); 1401 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
1323 spin_lock(host->host_lock); 1402 spin_lock(host->host_lock);
1324 return SCSI_MLQUEUE_TARGET_BUSY; 1403 return SCSI_MLQUEUE_TARGET_BUSY;
1325 1404
1405prepd_fault:
1406 sc->scsi_done = NULL;
1407 iscsi_complete_command(task);
1326fault: 1408fault:
1327 spin_unlock(&session->lock); 1409 spin_unlock(&session->lock);
1328 debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); 1410 debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
@@ -1634,9 +1716,9 @@ static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
1634 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 1716 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1635 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; 1717 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
1636 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 1718 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1637 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); 1719 memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
1638 hdr->rtt = task->hdr->itt; 1720 hdr->rtt = task->hdr_itt;
1639 hdr->refcmdsn = task->hdr->cmdsn; 1721 hdr->refcmdsn = task->cmdsn;
1640} 1722}
1641 1723
1642int iscsi_eh_abort(struct scsi_cmnd *sc) 1724int iscsi_eh_abort(struct scsi_cmnd *sc)
@@ -2223,7 +2305,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2223 } 2305 }
2224 spin_unlock_bh(&session->lock); 2306 spin_unlock_bh(&session->lock);
2225 2307
2226 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL); 2308 data = (char *) __get_free_pages(GFP_KERNEL,
2309 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
2227 if (!data) 2310 if (!data)
2228 goto login_task_data_alloc_fail; 2311 goto login_task_data_alloc_fail;
2229 conn->login_task->data = conn->data = data; 2312 conn->login_task->data = conn->data = data;
@@ -2294,7 +2377,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2294 iscsi_suspend_tx(conn); 2377 iscsi_suspend_tx(conn);
2295 2378
2296 spin_lock_bh(&session->lock); 2379 spin_lock_bh(&session->lock);
2297 kfree(conn->data); 2380 free_pages((unsigned long) conn->data,
2381 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
2298 kfree(conn->persistent_address); 2382 kfree(conn->persistent_address);
2299 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, 2383 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
2300 sizeof(void*)); 2384 sizeof(void*));
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
new file mode 100644
index 000000000000..a745f91d2928
--- /dev/null
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -0,0 +1,1163 @@
1/*
2 * iSCSI over TCP/IP Data-Path lib
3 *
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 - 2006 Mike Christie
7 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
8 * maintained by open-iscsi@googlegroups.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published
12 * by the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * See the file COPYING included with this distribution for more details.
21 *
22 * Credits:
23 * Christoph Hellwig
24 * FUJITA Tomonori
25 * Arne Redlich
26 * Zhenyu Wang
27 */
28
29#include <linux/types.h>
30#include <linux/list.h>
31#include <linux/inet.h>
32#include <linux/file.h>
33#include <linux/blkdev.h>
34#include <linux/crypto.h>
35#include <linux/delay.h>
36#include <linux/kfifo.h>
37#include <linux/scatterlist.h>
38#include <net/tcp.h>
39#include <scsi/scsi_cmnd.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_host.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_transport_iscsi.h>
44
45#include "iscsi_tcp.h"
46
47MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
48 "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
49 "Alex Aizman <itn780@yahoo.com>");
50MODULE_DESCRIPTION("iSCSI/TCP data-path");
51MODULE_LICENSE("GPL");
52#undef DEBUG_TCP
53
54#ifdef DEBUG_TCP
55#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
56#else
57#define debug_tcp(fmt...)
58#endif
59
60static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
61 struct iscsi_segment *segment);
62
63/*
64 * Scatterlist handling: inside the iscsi_segment, we
65 * remember an index into the scatterlist, and set data/size
66 * to the current scatterlist entry. For highmem pages, we
67 * kmap as needed.
68 *
69 * Note that the page is unmapped when we return from
70 * TCP's data_ready handler, so we may end up mapping and
71 * unmapping the same page repeatedly. The whole reason
72 * for this is that we shouldn't keep the page mapped
73 * outside the softirq.
74 */
75
76/**
77 * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
78 * @segment: the buffer object
79 * @sg: scatterlist
80 * @offset: byte offset into that sg entry
81 *
82 * This function sets up the segment so that subsequent
83 * data is copied to the indicated sg entry, at the given
84 * offset.
85 */
86static inline void
87iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
88 struct scatterlist *sg, unsigned int offset)
89{
90 segment->sg = sg;
91 segment->sg_offset = offset;
92 segment->size = min(sg->length - offset,
93 segment->total_size - segment->total_copied);
94 segment->data = NULL;
95}
96
97/**
98 * iscsi_tcp_segment_map - map the current S/G page
99 * @segment: iscsi_segment
100 * @recv: 1 if called from recv path
101 *
102 * We only need to possibly kmap data if scatter lists are being used,
103 * because the iscsi passthrough and internal IO paths will never use high
104 * mem pages.
105 */
106static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
107{
108 struct scatterlist *sg;
109
110 if (segment->data != NULL || !segment->sg)
111 return;
112
113 sg = segment->sg;
114 BUG_ON(segment->sg_mapped);
115 BUG_ON(sg->length == 0);
116
117 /*
118 * If the page count is greater than one it is ok to send
119 * to the network layer's zero copy send path. If not we
120 * have to go the slow sendmsg path. We always map for the
121 * recv path.
122 */
123 if (page_count(sg_page(sg)) >= 1 && !recv)
124 return;
125
126 debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
127 segment);
128 segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
129 segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
130}
131
132void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
133{
134 debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
135
136 if (segment->sg_mapped) {
137 debug_tcp("iscsi_tcp_segment_unmap valid\n");
138 kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
139 segment->sg_mapped = NULL;
140 segment->data = NULL;
141 }
142}
143EXPORT_SYMBOL_GPL(iscsi_tcp_segment_unmap);
144
145/*
146 * Splice the digest buffer into the buffer
147 */
148static inline void
149iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
150{
151 segment->data = digest;
152 segment->digest_len = ISCSI_DIGEST_SIZE;
153 segment->total_size += ISCSI_DIGEST_SIZE;
154 segment->size = ISCSI_DIGEST_SIZE;
155 segment->copied = 0;
156 segment->sg = NULL;
157 segment->hash = NULL;
158}
159
160/**
161 * iscsi_tcp_segment_done - check whether the segment is complete
162 * @tcp_conn: iscsi tcp connection
163 * @segment: iscsi segment to check
164 * @recv: set to one of this is called from the recv path
165 * @copied: number of bytes copied
166 *
167 * Check if we're done receiving this segment. If the receive
168 * buffer is full but we expect more data, move on to the
169 * next entry in the scatterlist.
170 *
171 * If the amount of data we received isn't a multiple of 4,
172 * we will transparently receive the pad bytes, too.
173 *
174 * This function must be re-entrant.
175 */
176int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
177 struct iscsi_segment *segment, int recv,
178 unsigned copied)
179{
180 static unsigned char padbuf[ISCSI_PAD_LEN];
181 struct scatterlist sg;
182 unsigned int pad;
183
184 debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
185 segment->size, recv ? "recv" : "xmit");
186 if (segment->hash && copied) {
187 /*
188 * If a segment is kmapd we must unmap it before sending
189 * to the crypto layer since that will try to kmap it again.
190 */
191 iscsi_tcp_segment_unmap(segment);
192
193 if (!segment->data) {
194 sg_init_table(&sg, 1);
195 sg_set_page(&sg, sg_page(segment->sg), copied,
196 segment->copied + segment->sg_offset +
197 segment->sg->offset);
198 } else
199 sg_init_one(&sg, segment->data + segment->copied,
200 copied);
201 crypto_hash_update(segment->hash, &sg, copied);
202 }
203
204 segment->copied += copied;
205 if (segment->copied < segment->size) {
206 iscsi_tcp_segment_map(segment, recv);
207 return 0;
208 }
209
210 segment->total_copied += segment->copied;
211 segment->copied = 0;
212 segment->size = 0;
213
214 /* Unmap the current scatterlist page, if there is one. */
215 iscsi_tcp_segment_unmap(segment);
216
217 /* Do we have more scatterlist entries? */
218 debug_tcp("total copied %u total size %u\n", segment->total_copied,
219 segment->total_size);
220 if (segment->total_copied < segment->total_size) {
221 /* Proceed to the next entry in the scatterlist. */
222 iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
223 0);
224 iscsi_tcp_segment_map(segment, recv);
225 BUG_ON(segment->size == 0);
226 return 0;
227 }
228
229 /* Do we need to handle padding? */
230 if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) {
231 pad = iscsi_padding(segment->total_copied);
232 if (pad != 0) {
233 debug_tcp("consume %d pad bytes\n", pad);
234 segment->total_size += pad;
235 segment->size = pad;
236 segment->data = padbuf;
237 return 0;
238 }
239 }
240
241 /*
242 * Set us up for transferring the data digest. hdr digest
243 * is completely handled in hdr done function.
244 */
245 if (segment->hash) {
246 crypto_hash_final(segment->hash, segment->digest);
247 iscsi_tcp_segment_splice_digest(segment,
248 recv ? segment->recv_digest : segment->digest);
249 return 0;
250 }
251
252 return 1;
253}
254EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done);
255
256/**
257 * iscsi_tcp_segment_recv - copy data to segment
258 * @tcp_conn: the iSCSI TCP connection
259 * @segment: the buffer to copy to
260 * @ptr: data pointer
261 * @len: amount of data available
262 *
263 * This function copies up to @len bytes to the
264 * given buffer, and returns the number of bytes
265 * consumed, which can actually be less than @len.
266 *
267 * If hash digest is enabled, the function will update the
268 * hash while copying.
269 * Combining these two operations doesn't buy us a lot (yet),
270 * but in the future we could implement combined copy+crc,
271 * just way we do for network layer checksums.
272 */
273static int
274iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
275 struct iscsi_segment *segment, const void *ptr,
276 unsigned int len)
277{
278 unsigned int copy = 0, copied = 0;
279
280 while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) {
281 if (copied == len) {
282 debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
283 len);
284 break;
285 }
286
287 copy = min(len - copied, segment->size - segment->copied);
288 debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
289 memcpy(segment->data + segment->copied, ptr + copied, copy);
290 copied += copy;
291 }
292 return copied;
293}
294
295inline void
296iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
297 unsigned char digest[ISCSI_DIGEST_SIZE])
298{
299 struct scatterlist sg;
300
301 sg_init_one(&sg, hdr, hdrlen);
302 crypto_hash_digest(hash, &sg, hdrlen, digest);
303}
304EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
305
306static inline int
307iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
308 struct iscsi_segment *segment)
309{
310 if (!segment->digest_len)
311 return 1;
312
313 if (memcmp(segment->recv_digest, segment->digest,
314 segment->digest_len)) {
315 debug_scsi("digest mismatch\n");
316 return 0;
317 }
318
319 return 1;
320}
321
322/*
323 * Helper function to set up segment buffer
324 */
325static inline void
326__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
327 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
328{
329 memset(segment, 0, sizeof(*segment));
330 segment->total_size = size;
331 segment->done = done;
332
333 if (hash) {
334 segment->hash = hash;
335 crypto_hash_init(hash);
336 }
337}
338
339inline void
340iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
341 size_t size, iscsi_segment_done_fn_t *done,
342 struct hash_desc *hash)
343{
344 __iscsi_segment_init(segment, size, done, hash);
345 segment->data = data;
346 segment->size = size;
347}
348EXPORT_SYMBOL_GPL(iscsi_segment_init_linear);
349
350inline int
351iscsi_segment_seek_sg(struct iscsi_segment *segment,
352 struct scatterlist *sg_list, unsigned int sg_count,
353 unsigned int offset, size_t size,
354 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
355{
356 struct scatterlist *sg;
357 unsigned int i;
358
359 debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
360 offset, size);
361 __iscsi_segment_init(segment, size, done, hash);
362 for_each_sg(sg_list, sg, sg_count, i) {
363 debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
364 sg->offset);
365 if (offset < sg->length) {
366 iscsi_tcp_segment_init_sg(segment, sg, offset);
367 return 0;
368 }
369 offset -= sg->length;
370 }
371
372 return ISCSI_ERR_DATA_OFFSET;
373}
374EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
375
376/**
377 * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
378 * @tcp_conn: iscsi connection to prep for
379 *
380 * This function always passes NULL for the hash argument, because when this
381 * function is called we do not yet know the final size of the header and want
382 * to delay the digest processing until we know that.
383 */
384void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
385{
386 debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
387 tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
388 iscsi_segment_init_linear(&tcp_conn->in.segment,
389 tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
390 iscsi_tcp_hdr_recv_done, NULL);
391}
392EXPORT_SYMBOL_GPL(iscsi_tcp_hdr_recv_prep);
393
394/*
395 * Handle incoming reply to any other type of command
396 */
397static int
398iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
399 struct iscsi_segment *segment)
400{
401 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
402 int rc = 0;
403
404 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
405 return ISCSI_ERR_DATA_DGST;
406
407 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
408 conn->data, tcp_conn->in.datalen);
409 if (rc)
410 return rc;
411
412 iscsi_tcp_hdr_recv_prep(tcp_conn);
413 return 0;
414}
415
416static void
417iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
418{
419 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
420 struct hash_desc *rx_hash = NULL;
421
422 if (conn->datadgst_en &
423 !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
424 rx_hash = tcp_conn->rx_hash;
425
426 iscsi_segment_init_linear(&tcp_conn->in.segment,
427 conn->data, tcp_conn->in.datalen,
428 iscsi_tcp_data_recv_done, rx_hash);
429}
430
431/**
432 * iscsi_tcp_cleanup_task - free tcp_task resources
433 * @task: iscsi task
434 *
435 * must be called with session lock
436 */
437void iscsi_tcp_cleanup_task(struct iscsi_task *task)
438{
439 struct iscsi_tcp_task *tcp_task = task->dd_data;
440 struct iscsi_r2t_info *r2t;
441
442 /* nothing to do for mgmt or pending tasks */
443 if (!task->sc || task->state == ISCSI_TASK_PENDING)
444 return;
445
446 /* flush task's r2t queues */
447 while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
448 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
449 sizeof(void*));
450 debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
451 }
452
453 r2t = tcp_task->r2t;
454 if (r2t != NULL) {
455 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
456 sizeof(void*));
457 tcp_task->r2t = NULL;
458 }
459}
460EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
461
462/**
463 * iscsi_tcp_data_in - SCSI Data-In Response processing
464 * @conn: iscsi connection
465 * @task: scsi command task
466 */
467static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
468{
469 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
470 struct iscsi_tcp_task *tcp_task = task->dd_data;
471 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
472 int datasn = be32_to_cpu(rhdr->datasn);
473 unsigned total_in_length = scsi_in(task->sc)->length;
474
475 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
476 if (tcp_conn->in.datalen == 0)
477 return 0;
478
479 if (tcp_task->exp_datasn != datasn) {
480 debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
481 __func__, tcp_task->exp_datasn, datasn);
482 return ISCSI_ERR_DATASN;
483 }
484
485 tcp_task->exp_datasn++;
486
487 tcp_task->data_offset = be32_to_cpu(rhdr->offset);
488 if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
489 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
490 __func__, tcp_task->data_offset,
491 tcp_conn->in.datalen, total_in_length);
492 return ISCSI_ERR_DATA_OFFSET;
493 }
494
495 conn->datain_pdus_cnt++;
496 return 0;
497}
498
499/**
500 * iscsi_tcp_r2t_rsp - iSCSI R2T Response processing
501 * @conn: iscsi connection
502 * @task: scsi command task
503 */
504static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
505{
506 struct iscsi_session *session = conn->session;
507 struct iscsi_tcp_task *tcp_task = task->dd_data;
508 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
509 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
510 struct iscsi_r2t_info *r2t;
511 int r2tsn = be32_to_cpu(rhdr->r2tsn);
512 int rc;
513
514 if (tcp_conn->in.datalen) {
515 iscsi_conn_printk(KERN_ERR, conn,
516 "invalid R2t with datalen %d\n",
517 tcp_conn->in.datalen);
518 return ISCSI_ERR_DATALEN;
519 }
520
521 if (tcp_task->exp_datasn != r2tsn){
522 debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
523 __func__, tcp_task->exp_datasn, r2tsn);
524 return ISCSI_ERR_R2TSN;
525 }
526
527 /* fill-in new R2T associated with the task */
528 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
529
530 if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
531 iscsi_conn_printk(KERN_INFO, conn,
532 "dropping R2T itt %d in recovery.\n",
533 task->itt);
534 return 0;
535 }
536
537 rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
538 if (!rc) {
539 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
540 "Target has sent more R2Ts than it "
541 "negotiated for or driver has has leaked.\n");
542 return ISCSI_ERR_PROTO;
543 }
544
545 r2t->exp_statsn = rhdr->statsn;
546 r2t->data_length = be32_to_cpu(rhdr->data_length);
547 if (r2t->data_length == 0) {
548 iscsi_conn_printk(KERN_ERR, conn,
549 "invalid R2T with zero data len\n");
550 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
551 sizeof(void*));
552 return ISCSI_ERR_DATALEN;
553 }
554
555 if (r2t->data_length > session->max_burst)
556 debug_scsi("invalid R2T with data len %u and max burst %u."
557 "Attempting to execute request.\n",
558 r2t->data_length, session->max_burst);
559
560 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
561 if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
562 iscsi_conn_printk(KERN_ERR, conn,
563 "invalid R2T with data len %u at offset %u "
564 "and total length %d\n", r2t->data_length,
565 r2t->data_offset, scsi_out(task->sc)->length);
566 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
567 sizeof(void*));
568 return ISCSI_ERR_DATALEN;
569 }
570
571 r2t->ttt = rhdr->ttt; /* no flip */
572 r2t->datasn = 0;
573 r2t->sent = 0;
574
575 tcp_task->exp_datasn = r2tsn + 1;
576 __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
577 conn->r2t_pdus_cnt++;
578
579 iscsi_requeue_task(task);
580 return 0;
581}
582
583/*
584 * Handle incoming reply to DataIn command
585 */
586static int
587iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
588 struct iscsi_segment *segment)
589{
590 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
591 struct iscsi_hdr *hdr = tcp_conn->in.hdr;
592 int rc;
593
594 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
595 return ISCSI_ERR_DATA_DGST;
596
597 /* check for non-exceptional status */
598 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
599 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
600 if (rc)
601 return rc;
602 }
603
604 iscsi_tcp_hdr_recv_prep(tcp_conn);
605 return 0;
606}
607
608/**
609 * iscsi_tcp_hdr_dissect - process PDU header
610 * @conn: iSCSI connection
611 * @hdr: PDU header
612 *
613 * This function analyzes the header of the PDU received,
614 * and performs several sanity checks. If the PDU is accompanied
615 * by data, the receive buffer is set up to copy the incoming data
616 * to the correct location.
617 */
618static int
619iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
620{
621 int rc = 0, opcode, ahslen;
622 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
623 struct iscsi_task *task;
624
625 /* verify PDU length */
626 tcp_conn->in.datalen = ntoh24(hdr->dlength);
627 if (tcp_conn->in.datalen > conn->max_recv_dlength) {
628 iscsi_conn_printk(KERN_ERR, conn,
629 "iscsi_tcp: datalen %d > %d\n",
630 tcp_conn->in.datalen, conn->max_recv_dlength);
631 return ISCSI_ERR_DATALEN;
632 }
633
634 /* Additional header segments. So far, we don't
635 * process additional headers.
636 */
637 ahslen = hdr->hlength << 2;
638
639 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
640 /* verify itt (itt encoding: age+cid+itt) */
641 rc = iscsi_verify_itt(conn, hdr->itt);
642 if (rc)
643 return rc;
644
645 debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
646 opcode, ahslen, tcp_conn->in.datalen);
647
648 switch(opcode) {
649 case ISCSI_OP_SCSI_DATA_IN:
650 spin_lock(&conn->session->lock);
651 task = iscsi_itt_to_ctask(conn, hdr->itt);
652 if (!task)
653 rc = ISCSI_ERR_BAD_ITT;
654 else
655 rc = iscsi_tcp_data_in(conn, task);
656 if (rc) {
657 spin_unlock(&conn->session->lock);
658 break;
659 }
660
661 if (tcp_conn->in.datalen) {
662 struct iscsi_tcp_task *tcp_task = task->dd_data;
663 struct hash_desc *rx_hash = NULL;
664 struct scsi_data_buffer *sdb = scsi_in(task->sc);
665
666 /*
667 * Setup copy of Data-In into the Scsi_Cmnd
668 * Scatterlist case:
669 * We set up the iscsi_segment to point to the next
670 * scatterlist entry to copy to. As we go along,
671 * we move on to the next scatterlist entry and
672 * update the digest per-entry.
673 */
674 if (conn->datadgst_en &&
675 !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
676 rx_hash = tcp_conn->rx_hash;
677
678 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
679 "datalen=%d)\n", tcp_conn,
680 tcp_task->data_offset,
681 tcp_conn->in.datalen);
682 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
683 sdb->table.sgl,
684 sdb->table.nents,
685 tcp_task->data_offset,
686 tcp_conn->in.datalen,
687 iscsi_tcp_process_data_in,
688 rx_hash);
689 spin_unlock(&conn->session->lock);
690 return rc;
691 }
692 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
693 spin_unlock(&conn->session->lock);
694 break;
695 case ISCSI_OP_SCSI_CMD_RSP:
696 if (tcp_conn->in.datalen) {
697 iscsi_tcp_data_recv_prep(tcp_conn);
698 return 0;
699 }
700 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
701 break;
702 case ISCSI_OP_R2T:
703 spin_lock(&conn->session->lock);
704 task = iscsi_itt_to_ctask(conn, hdr->itt);
705 if (!task)
706 rc = ISCSI_ERR_BAD_ITT;
707 else if (ahslen)
708 rc = ISCSI_ERR_AHSLEN;
709 else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
710 rc = iscsi_tcp_r2t_rsp(conn, task);
711 else
712 rc = ISCSI_ERR_PROTO;
713 spin_unlock(&conn->session->lock);
714 break;
715 case ISCSI_OP_LOGIN_RSP:
716 case ISCSI_OP_TEXT_RSP:
717 case ISCSI_OP_REJECT:
718 case ISCSI_OP_ASYNC_EVENT:
719 /*
720 * It is possible that we could get a PDU with a buffer larger
721 * than 8K, but there are no targets that currently do this.
722 * For now we fail until we find a vendor that needs it
723 */
724 if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
725 iscsi_conn_printk(KERN_ERR, conn,
726 "iscsi_tcp: received buffer of "
727 "len %u but conn buffer is only %u "
728 "(opcode %0x)\n",
729 tcp_conn->in.datalen,
730 ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
731 rc = ISCSI_ERR_PROTO;
732 break;
733 }
734
735 /* If there's data coming in with the response,
736 * receive it to the connection's buffer.
737 */
738 if (tcp_conn->in.datalen) {
739 iscsi_tcp_data_recv_prep(tcp_conn);
740 return 0;
741 }
742 /* fall through */
743 case ISCSI_OP_LOGOUT_RSP:
744 case ISCSI_OP_NOOP_IN:
745 case ISCSI_OP_SCSI_TMFUNC_RSP:
746 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
747 break;
748 default:
749 rc = ISCSI_ERR_BAD_OPCODE;
750 break;
751 }
752
753 if (rc == 0) {
754 /* Anything that comes with data should have
755 * been handled above. */
756 if (tcp_conn->in.datalen)
757 return ISCSI_ERR_PROTO;
758 iscsi_tcp_hdr_recv_prep(tcp_conn);
759 }
760
761 return rc;
762}
763
764/**
765 * iscsi_tcp_hdr_recv_done - process PDU header
766 *
767 * This is the callback invoked when the PDU header has
768 * been received. If the header is followed by additional
769 * header segments, we go back for more data.
770 */
771static int
772iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
773 struct iscsi_segment *segment)
774{
775 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
776 struct iscsi_hdr *hdr;
777
778 /* Check if there are additional header segments
779 * *prior* to computing the digest, because we
780 * may need to go back to the caller for more.
781 */
782 hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
783 if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
784 /* Bump the header length - the caller will
785 * just loop around and get the AHS for us, and
786 * call again. */
787 unsigned int ahslen = hdr->hlength << 2;
788
789 /* Make sure we don't overflow */
790 if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
791 return ISCSI_ERR_AHSLEN;
792
793 segment->total_size += ahslen;
794 segment->size += ahslen;
795 return 0;
796 }
797
798 /* We're done processing the header. See if we're doing
799 * header digests; if so, set up the recv_digest buffer
800 * and go back for more. */
801 if (conn->hdrdgst_en &&
802 !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) {
803 if (segment->digest_len == 0) {
804 /*
805 * Even if we offload the digest processing we
806 * splice it in so we can increment the skb/segment
807 * counters in preparation for the data segment.
808 */
809 iscsi_tcp_segment_splice_digest(segment,
810 segment->recv_digest);
811 return 0;
812 }
813
814 iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr,
815 segment->total_copied - ISCSI_DIGEST_SIZE,
816 segment->digest);
817
818 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
819 return ISCSI_ERR_HDR_DGST;
820 }
821
822 tcp_conn->in.hdr = hdr;
823 return iscsi_tcp_hdr_dissect(conn, hdr);
824}
825
826/**
827 * iscsi_tcp_recv_segment_is_hdr - tests if we are reading in a header
828 * @tcp_conn: iscsi tcp conn
829 *
830 * returns non zero if we are currently processing or setup to process
831 * a header.
832 */
833inline int iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn)
834{
835 return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done;
836}
837EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
838
839/**
840 * iscsi_tcp_recv_skb - Process skb
841 * @conn: iscsi connection
842 * @skb: network buffer with header and/or data segment
843 * @offset: offset in skb
844 * @offload: bool indicating if transfer was offloaded
845 *
846 * Will return status of transfer in status. And will return
847 * number of bytes copied.
848 */
849int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
850 unsigned int offset, bool offloaded, int *status)
851{
852 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
853 struct iscsi_segment *segment = &tcp_conn->in.segment;
854 struct skb_seq_state seq;
855 unsigned int consumed = 0;
856 int rc = 0;
857
858 debug_tcp("in %d bytes\n", skb->len - offset);
859
860 if (unlikely(conn->suspend_rx)) {
861 debug_tcp("conn %d Rx suspended!\n", conn->id);
862 *status = ISCSI_TCP_SUSPENDED;
863 return 0;
864 }
865
866 if (offloaded) {
867 segment->total_copied = segment->total_size;
868 goto segment_done;
869 }
870
871 skb_prepare_seq_read(skb, offset, skb->len, &seq);
872 while (1) {
873 unsigned int avail;
874 const u8 *ptr;
875
876 avail = skb_seq_read(consumed, &ptr, &seq);
877 if (avail == 0) {
878 debug_tcp("no more data avail. Consumed %d\n",
879 consumed);
880 *status = ISCSI_TCP_SKB_DONE;
881 skb_abort_seq_read(&seq);
882 goto skb_done;
883 }
884 BUG_ON(segment->copied >= segment->size);
885
886 debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
887 rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
888 BUG_ON(rc == 0);
889 consumed += rc;
890
891 if (segment->total_copied >= segment->total_size) {
892 skb_abort_seq_read(&seq);
893 goto segment_done;
894 }
895 }
896
897segment_done:
898 *status = ISCSI_TCP_SEGMENT_DONE;
899 debug_tcp("segment done\n");
900 rc = segment->done(tcp_conn, segment);
901 if (rc != 0) {
902 *status = ISCSI_TCP_CONN_ERR;
903 debug_tcp("Error receiving PDU, errno=%d\n", rc);
904 iscsi_conn_failure(conn, rc);
905 return 0;
906 }
907 /* The done() functions sets up the next segment. */
908
909skb_done:
910 conn->rxdata_octets += consumed;
911 return consumed;
912}
913EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
914
915/**
916 * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
917 * @conn: iscsi connection
918 * @task: scsi command task
919 * @sc: scsi command
920 */
921int iscsi_tcp_task_init(struct iscsi_task *task)
922{
923 struct iscsi_tcp_task *tcp_task = task->dd_data;
924 struct iscsi_conn *conn = task->conn;
925 struct scsi_cmnd *sc = task->sc;
926 int err;
927
928 if (!sc) {
929 /*
930 * mgmt tasks do not have a scatterlist since they come
931 * in from the iscsi interface.
932 */
933 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
934 task->itt);
935
936 return conn->session->tt->init_pdu(task, 0, task->data_count);
937 }
938
939 BUG_ON(__kfifo_len(tcp_task->r2tqueue));
940 tcp_task->exp_datasn = 0;
941
942 /* Prepare PDU, optionally w/ immediate data */
943 debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
944 conn->id, task->itt, task->imm_count,
945 task->unsol_r2t.data_length);
946
947 err = conn->session->tt->init_pdu(task, 0, task->imm_count);
948 if (err)
949 return err;
950 task->imm_count = 0;
951 return 0;
952}
953EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);
954
955static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
956{
957 struct iscsi_session *session = task->conn->session;
958 struct iscsi_tcp_task *tcp_task = task->dd_data;
959 struct iscsi_r2t_info *r2t = NULL;
960
961 if (iscsi_task_has_unsol_data(task))
962 r2t = &task->unsol_r2t;
963 else {
964 spin_lock_bh(&session->lock);
965 if (tcp_task->r2t) {
966 r2t = tcp_task->r2t;
967 /* Continue with this R2T? */
968 if (r2t->data_length <= r2t->sent) {
969 debug_scsi(" done with r2t %p\n", r2t);
970 __kfifo_put(tcp_task->r2tpool.queue,
971 (void *)&tcp_task->r2t,
972 sizeof(void *));
973 tcp_task->r2t = r2t = NULL;
974 }
975 }
976
977 if (r2t == NULL) {
978 __kfifo_get(tcp_task->r2tqueue,
979 (void *)&tcp_task->r2t, sizeof(void *));
980 r2t = tcp_task->r2t;
981 }
982 spin_unlock_bh(&session->lock);
983 }
984
985 return r2t;
986}
987
988/**
989 * iscsi_tcp_task_xmit - xmit normal PDU task
990 * @task: iscsi command task
991 *
992 * We're expected to return 0 when everything was transmitted succesfully,
993 * -EAGAIN if there's still data in the queue, or != 0 for any other kind
994 * of error.
995 */
996int iscsi_tcp_task_xmit(struct iscsi_task *task)
997{
998 struct iscsi_conn *conn = task->conn;
999 struct iscsi_session *session = conn->session;
1000 struct iscsi_r2t_info *r2t;
1001 int rc = 0;
1002
1003flush:
1004 /* Flush any pending data first. */
1005 rc = session->tt->xmit_pdu(task);
1006 if (rc < 0)
1007 return rc;
1008
1009 /* mgmt command */
1010 if (!task->sc) {
1011 if (task->hdr->itt == RESERVED_ITT)
1012 iscsi_put_task(task);
1013 return 0;
1014 }
1015
1016 /* Are we done already? */
1017 if (task->sc->sc_data_direction != DMA_TO_DEVICE)
1018 return 0;
1019
1020 r2t = iscsi_tcp_get_curr_r2t(task);
1021 if (r2t == NULL) {
1022 /* Waiting for more R2Ts to arrive. */
1023 debug_tcp("no R2Ts yet\n");
1024 return 0;
1025 }
1026
1027 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT);
1028 if (rc)
1029 return rc;
1030 iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr);
1031
1032 debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
1033 r2t, r2t->datasn - 1, task->hdr->itt,
1034 r2t->data_offset + r2t->sent, r2t->data_count);
1035
1036 rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent,
1037 r2t->data_count);
1038 if (rc)
1039 return rc;
1040 r2t->sent += r2t->data_count;
1041 goto flush;
1042}
1043EXPORT_SYMBOL_GPL(iscsi_tcp_task_xmit);
1044
1045struct iscsi_cls_conn *
1046iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
1047 uint32_t conn_idx)
1048
1049{
1050 struct iscsi_conn *conn;
1051 struct iscsi_cls_conn *cls_conn;
1052 struct iscsi_tcp_conn *tcp_conn;
1053
1054 cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
1055 if (!cls_conn)
1056 return NULL;
1057 conn = cls_conn->dd_data;
1058 /*
1059 * due to strange issues with iser these are not set
1060 * in iscsi_conn_setup
1061 */
1062 conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1063
1064 tcp_conn = conn->dd_data;
1065 tcp_conn->iscsi_conn = conn;
1066
1067 tcp_conn->dd_data = kzalloc(dd_data_size, GFP_KERNEL);
1068 if (!tcp_conn->dd_data) {
1069 iscsi_conn_teardown(cls_conn);
1070 return NULL;
1071 }
1072 return cls_conn;
1073}
1074EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup);
1075
1076void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn)
1077{
1078 struct iscsi_conn *conn = cls_conn->dd_data;
1079 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1080
1081 kfree(tcp_conn->dd_data);
1082 iscsi_conn_teardown(cls_conn);
1083}
1084EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown);
1085
1086int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
1087{
1088 int i;
1089 int cmd_i;
1090
1091 /*
1092 * initialize per-task: R2T pool and xmit queue
1093 */
1094 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1095 struct iscsi_task *task = session->cmds[cmd_i];
1096 struct iscsi_tcp_task *tcp_task = task->dd_data;
1097
1098 /*
1099 * pre-allocated x2 as much r2ts to handle race when
1100 * target acks DataOut faster than we data_xmit() queues
1101 * could replenish r2tqueue.
1102 */
1103
1104 /* R2T pool */
1105 if (iscsi_pool_init(&tcp_task->r2tpool,
1106 session->max_r2t * 2, NULL,
1107 sizeof(struct iscsi_r2t_info))) {
1108 goto r2t_alloc_fail;
1109 }
1110
1111 /* R2T xmit queue */
1112 tcp_task->r2tqueue = kfifo_alloc(
1113 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
1114 if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
1115 iscsi_pool_free(&tcp_task->r2tpool);
1116 goto r2t_alloc_fail;
1117 }
1118 }
1119
1120 return 0;
1121
1122r2t_alloc_fail:
1123 for (i = 0; i < cmd_i; i++) {
1124 struct iscsi_task *task = session->cmds[i];
1125 struct iscsi_tcp_task *tcp_task = task->dd_data;
1126
1127 kfifo_free(tcp_task->r2tqueue);
1128 iscsi_pool_free(&tcp_task->r2tpool);
1129 }
1130 return -ENOMEM;
1131}
1132EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_alloc);
1133
1134void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
1135{
1136 int i;
1137
1138 for (i = 0; i < session->cmds_max; i++) {
1139 struct iscsi_task *task = session->cmds[i];
1140 struct iscsi_tcp_task *tcp_task = task->dd_data;
1141
1142 kfifo_free(tcp_task->r2tqueue);
1143 iscsi_pool_free(&tcp_task->r2tpool);
1144 }
1145}
1146EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free);
1147
1148void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1149 struct iscsi_stats *stats)
1150{
1151 struct iscsi_conn *conn = cls_conn->dd_data;
1152
1153 stats->txdata_octets = conn->txdata_octets;
1154 stats->rxdata_octets = conn->rxdata_octets;
1155 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1156 stats->dataout_pdus = conn->dataout_pdus_cnt;
1157 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1158 stats->datain_pdus = conn->datain_pdus_cnt;
1159 stats->r2t_pdus = conn->r2t_pdus_cnt;
1160 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1161 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1162}
1163EXPORT_SYMBOL_GPL(iscsi_tcp_conn_get_stats);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 60a9e6e9384b..dcba267db711 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -29,8 +29,10 @@ struct lpfc_sli2_slim;
29#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact 29#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
30 the NameServer before giving up. */ 30 the NameServer before giving up. */
31#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ 31#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
32#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ 32#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
33#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */ 33#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
34#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
35#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
34#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 36#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
35#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 37#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
36#define LPFC_VNAME_LEN 100 /* vport symbolic name length */ 38#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
@@ -354,8 +356,6 @@ struct lpfc_vport {
354 uint8_t load_flag; 356 uint8_t load_flag;
355#define FC_LOADING 0x1 /* HBA in process of loading drvr */ 357#define FC_LOADING 0x1 /* HBA in process of loading drvr */
356#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ 358#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
357 char *vname; /* Application assigned name */
358
359 /* Vport Config Parameters */ 359 /* Vport Config Parameters */
360 uint32_t cfg_scan_down; 360 uint32_t cfg_scan_down;
361 uint32_t cfg_lun_queue_depth; 361 uint32_t cfg_lun_queue_depth;
@@ -376,7 +376,7 @@ struct lpfc_vport {
376 376
377 struct fc_vport *fc_vport; 377 struct fc_vport *fc_vport;
378 378
379#ifdef CONFIG_LPFC_DEBUG_FS 379#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
380 struct dentry *debug_disc_trc; 380 struct dentry *debug_disc_trc;
381 struct dentry *debug_nodelist; 381 struct dentry *debug_nodelist;
382 struct dentry *vport_debugfs_root; 382 struct dentry *vport_debugfs_root;
@@ -428,6 +428,7 @@ struct lpfc_hba {
428#define LPFC_SLI3_VPORT_TEARDOWN 0x04 428#define LPFC_SLI3_VPORT_TEARDOWN 0x04
429#define LPFC_SLI3_CRP_ENABLED 0x08 429#define LPFC_SLI3_CRP_ENABLED 0x08
430#define LPFC_SLI3_INB_ENABLED 0x10 430#define LPFC_SLI3_INB_ENABLED 0x10
431#define LPFC_SLI3_BG_ENABLED 0x20
431 uint32_t iocb_cmd_size; 432 uint32_t iocb_cmd_size;
432 uint32_t iocb_rsp_size; 433 uint32_t iocb_rsp_size;
433 434
@@ -501,12 +502,14 @@ struct lpfc_hba {
501 uint32_t cfg_poll_tmo; 502 uint32_t cfg_poll_tmo;
502 uint32_t cfg_use_msi; 503 uint32_t cfg_use_msi;
503 uint32_t cfg_sg_seg_cnt; 504 uint32_t cfg_sg_seg_cnt;
505 uint32_t cfg_prot_sg_seg_cnt;
504 uint32_t cfg_sg_dma_buf_size; 506 uint32_t cfg_sg_dma_buf_size;
505 uint64_t cfg_soft_wwnn; 507 uint64_t cfg_soft_wwnn;
506 uint64_t cfg_soft_wwpn; 508 uint64_t cfg_soft_wwpn;
507 uint32_t cfg_hba_queue_depth; 509 uint32_t cfg_hba_queue_depth;
508 uint32_t cfg_enable_hba_reset; 510 uint32_t cfg_enable_hba_reset;
509 uint32_t cfg_enable_hba_heartbeat; 511 uint32_t cfg_enable_hba_heartbeat;
512 uint32_t cfg_enable_bg;
510 513
511 lpfc_vpd_t vpd; /* vital product data */ 514 lpfc_vpd_t vpd; /* vital product data */
512 515
@@ -572,6 +575,9 @@ struct lpfc_hba {
572 uint64_t fc4InputRequests; 575 uint64_t fc4InputRequests;
573 uint64_t fc4OutputRequests; 576 uint64_t fc4OutputRequests;
574 uint64_t fc4ControlRequests; 577 uint64_t fc4ControlRequests;
578 uint64_t bg_guard_err_cnt;
579 uint64_t bg_apptag_err_cnt;
580 uint64_t bg_reftag_err_cnt;
575 581
576 struct lpfc_sysfs_mbox sysfs_mbox; 582 struct lpfc_sysfs_mbox sysfs_mbox;
577 583
@@ -594,6 +600,8 @@ struct lpfc_hba {
594 600
595 struct fc_host_statistics link_stats; 601 struct fc_host_statistics link_stats;
596 enum intr_type_t intr_type; 602 enum intr_type_t intr_type;
603 uint32_t intr_mode;
604#define LPFC_INTR_ERROR 0xFFFFFFFF
597 struct msix_entry msix_entries[LPFC_MSIX_VECTORS]; 605 struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
598 606
599 struct list_head port_list; 607 struct list_head port_list;
@@ -613,12 +621,14 @@ struct lpfc_hba {
613 unsigned long last_rsrc_error_time; 621 unsigned long last_rsrc_error_time;
614 unsigned long last_ramp_down_time; 622 unsigned long last_ramp_down_time;
615 unsigned long last_ramp_up_time; 623 unsigned long last_ramp_up_time;
616#ifdef CONFIG_LPFC_DEBUG_FS 624#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
617 struct dentry *hba_debugfs_root; 625 struct dentry *hba_debugfs_root;
618 atomic_t debugfs_vport_count; 626 atomic_t debugfs_vport_count;
619 struct dentry *debug_hbqinfo; 627 struct dentry *debug_hbqinfo;
620 struct dentry *debug_dumpHostSlim; 628 struct dentry *debug_dumpHostSlim;
621 struct dentry *debug_dumpHBASlim; 629 struct dentry *debug_dumpHBASlim;
630 struct dentry *debug_dumpData; /* BlockGuard BPL*/
631 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
622 struct dentry *debug_slow_ring_trc; 632 struct dentry *debug_slow_ring_trc;
623 struct lpfc_debugfs_trc *slow_ring_trc; 633 struct lpfc_debugfs_trc *slow_ring_trc;
624 atomic_t slow_ring_trc_cnt; 634 atomic_t slow_ring_trc_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index aa3d6277581d..40cf0f4f327f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -96,6 +96,61 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
96 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); 96 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
97} 97}
98 98
99static ssize_t
100lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
101 char *buf)
102{
103 struct Scsi_Host *shost = class_to_shost(dev);
104 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
105 struct lpfc_hba *phba = vport->phba;
106
107 if (phba->cfg_enable_bg)
108 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
109 return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
110 else
111 return snprintf(buf, PAGE_SIZE,
112 "BlockGuard Not Supported\n");
113 else
114 return snprintf(buf, PAGE_SIZE,
115 "BlockGuard Disabled\n");
116}
117
118static ssize_t
119lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
120 char *buf)
121{
122 struct Scsi_Host *shost = class_to_shost(dev);
123 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
124 struct lpfc_hba *phba = vport->phba;
125
126 return snprintf(buf, PAGE_SIZE, "%llu\n",
127 (unsigned long long)phba->bg_guard_err_cnt);
128}
129
130static ssize_t
131lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
132 char *buf)
133{
134 struct Scsi_Host *shost = class_to_shost(dev);
135 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
136 struct lpfc_hba *phba = vport->phba;
137
138 return snprintf(buf, PAGE_SIZE, "%llu\n",
139 (unsigned long long)phba->bg_apptag_err_cnt);
140}
141
142static ssize_t
143lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
144 char *buf)
145{
146 struct Scsi_Host *shost = class_to_shost(dev);
147 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
148 struct lpfc_hba *phba = vport->phba;
149
150 return snprintf(buf, PAGE_SIZE, "%llu\n",
151 (unsigned long long)phba->bg_reftag_err_cnt);
152}
153
99/** 154/**
100 * lpfc_info_show: Return some pci info about the host in ascii. 155 * lpfc_info_show: Return some pci info about the host in ascii.
101 * @dev: class converted to a Scsi_host structure. 156 * @dev: class converted to a Scsi_host structure.
@@ -1485,6 +1540,10 @@ lpfc_vport_param_store(name)\
1485static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ 1540static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
1486 lpfc_##name##_show, lpfc_##name##_store) 1541 lpfc_##name##_show, lpfc_##name##_store)
1487 1542
1543static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
1544static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
1545static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
1546static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
1488static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); 1547static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
1489static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); 1548static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
1490static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); 1549static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
@@ -1970,6 +2029,7 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
1970# LOG_LINK_EVENT 0x10 Link events 2029# LOG_LINK_EVENT 0x10 Link events
1971# LOG_FCP 0x40 FCP traffic history 2030# LOG_FCP 0x40 FCP traffic history
1972# LOG_NODE 0x80 Node table events 2031# LOG_NODE 0x80 Node table events
2032# LOG_BG 0x200 BlockBuard events
1973# LOG_MISC 0x400 Miscellaneous events 2033# LOG_MISC 0x400 Miscellaneous events
1974# LOG_SLI 0x800 SLI events 2034# LOG_SLI 0x800 SLI events
1975# LOG_FCP_ERROR 0x1000 Only log FCP errors 2035# LOG_FCP_ERROR 0x1000 Only log FCP errors
@@ -2769,6 +2829,42 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
2769LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); 2829LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2770 2830
2771/* 2831/*
2832# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
2833# 0 = BlockGuard disabled (default)
2834# 1 = BlockGuard enabled
2835# Value range is [0,1]. Default value is 0.
2836*/
2837LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2838
2839
2840/*
2841# lpfc_prot_mask: i
2842# - Bit mask of host protection capabilities used to register with the
2843# SCSI mid-layer
2844# - Only meaningful if BG is turned on (lpfc_enable_bg=1).
2845# - Allows you to ultimately specify which profiles to use
2846# - Default will result in registering capabilities for all profiles.
2847#
2848*/
2849unsigned int lpfc_prot_mask = SHOST_DIX_TYPE0_PROTECTION;
2850
2851module_param(lpfc_prot_mask, uint, 0);
2852MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
2853
2854/*
2855# lpfc_prot_guard: i
2856# - Bit mask of protection guard types to register with the SCSI mid-layer
2857# - Guard types are currently either 1) IP checksum 2) T10-DIF CRC
2858# - Allows you to ultimately specify which profiles to use
2859# - Default will result in registering capabilities for all guard types
2860#
2861*/
2862unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
2863module_param(lpfc_prot_guard, byte, 0);
2864MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
2865
2866
2867/*
2772 * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count 2868 * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count
2773 * This value can be set to values between 64 and 256. The default value is 2869 * This value can be set to values between 64 and 256. The default value is
2774 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer 2870 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
@@ -2777,7 +2873,15 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2777LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, 2873LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
2778 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); 2874 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
2779 2875
2876LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
2877 LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
2878 "Max Protection Scatter Gather Segment Count");
2879
2780struct device_attribute *lpfc_hba_attrs[] = { 2880struct device_attribute *lpfc_hba_attrs[] = {
2881 &dev_attr_bg_info,
2882 &dev_attr_bg_guard_err,
2883 &dev_attr_bg_apptag_err,
2884 &dev_attr_bg_reftag_err,
2781 &dev_attr_info, 2885 &dev_attr_info,
2782 &dev_attr_serialnum, 2886 &dev_attr_serialnum,
2783 &dev_attr_modeldesc, 2887 &dev_attr_modeldesc,
@@ -2825,6 +2929,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
2825 &dev_attr_lpfc_poll, 2929 &dev_attr_lpfc_poll,
2826 &dev_attr_lpfc_poll_tmo, 2930 &dev_attr_lpfc_poll_tmo,
2827 &dev_attr_lpfc_use_msi, 2931 &dev_attr_lpfc_use_msi,
2932 &dev_attr_lpfc_enable_bg,
2828 &dev_attr_lpfc_soft_wwnn, 2933 &dev_attr_lpfc_soft_wwnn,
2829 &dev_attr_lpfc_soft_wwpn, 2934 &dev_attr_lpfc_soft_wwpn,
2830 &dev_attr_lpfc_soft_wwn_enable, 2935 &dev_attr_lpfc_soft_wwn_enable,
@@ -2833,6 +2938,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
2833 &dev_attr_lpfc_sg_seg_cnt, 2938 &dev_attr_lpfc_sg_seg_cnt,
2834 &dev_attr_lpfc_max_scsicmpl_time, 2939 &dev_attr_lpfc_max_scsicmpl_time,
2835 &dev_attr_lpfc_stat_data_ctrl, 2940 &dev_attr_lpfc_stat_data_ctrl,
2941 &dev_attr_lpfc_prot_sg_seg_cnt,
2836 NULL, 2942 NULL,
2837}; 2943};
2838 2944
@@ -3282,25 +3388,28 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
3282 int error; 3388 int error;
3283 3389
3284 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 3390 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
3285 &sysfs_ctlreg_attr); 3391 &sysfs_drvr_stat_data_attr);
3286 if (error) 3392
3393 /* Virtual ports do not need ctrl_reg and mbox */
3394 if (error || vport->port_type == LPFC_NPIV_PORT)
3287 goto out; 3395 goto out;
3288 3396
3289 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 3397 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
3290 &sysfs_mbox_attr); 3398 &sysfs_ctlreg_attr);
3291 if (error) 3399 if (error)
3292 goto out_remove_ctlreg_attr; 3400 goto out_remove_stat_attr;
3293 3401
3294 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 3402 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
3295 &sysfs_drvr_stat_data_attr); 3403 &sysfs_mbox_attr);
3296 if (error) 3404 if (error)
3297 goto out_remove_mbox_attr; 3405 goto out_remove_ctlreg_attr;
3298 3406
3299 return 0; 3407 return 0;
3300out_remove_mbox_attr:
3301 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
3302out_remove_ctlreg_attr: 3408out_remove_ctlreg_attr:
3303 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 3409 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
3410out_remove_stat_attr:
3411 sysfs_remove_bin_file(&shost->shost_dev.kobj,
3412 &sysfs_drvr_stat_data_attr);
3304out: 3413out:
3305 return error; 3414 return error;
3306} 3415}
@@ -3315,6 +3424,9 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
3315 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3424 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3316 sysfs_remove_bin_file(&shost->shost_dev.kobj, 3425 sysfs_remove_bin_file(&shost->shost_dev.kobj,
3317 &sysfs_drvr_stat_data_attr); 3426 &sysfs_drvr_stat_data_attr);
3427 /* Virtual ports do not need ctrl_reg and mbox */
3428 if (vport->port_type == LPFC_NPIV_PORT)
3429 return;
3318 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); 3430 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
3319 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 3431 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
3320} 3432}
@@ -3792,6 +3904,23 @@ lpfc_show_rport_##field (struct device *dev, \
3792 lpfc_rport_show_function(field, format_string, sz, ) \ 3904 lpfc_rport_show_function(field, format_string, sz, ) \
3793static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) 3905static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
3794 3906
3907/**
3908 * lpfc_set_vport_symbolic_name: Set the vport's symbolic name.
3909 * @fc_vport: The fc_vport who's symbolic name has been changed.
3910 *
3911 * Description:
3912 * This function is called by the transport after the @fc_vport's symbolic name
3913 * has been changed. This function re-registers the symbolic name with the
3914 * switch to propogate the change into the fabric if the vport is active.
3915 **/
3916static void
3917lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
3918{
3919 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
3920
3921 if (vport->port_state == LPFC_VPORT_READY)
3922 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3923}
3795 3924
3796struct fc_function_template lpfc_transport_functions = { 3925struct fc_function_template lpfc_transport_functions = {
3797 /* fixed attributes the driver supports */ 3926 /* fixed attributes the driver supports */
@@ -3801,6 +3930,7 @@ struct fc_function_template lpfc_transport_functions = {
3801 .show_host_supported_fc4s = 1, 3930 .show_host_supported_fc4s = 1,
3802 .show_host_supported_speeds = 1, 3931 .show_host_supported_speeds = 1,
3803 .show_host_maxframe_size = 1, 3932 .show_host_maxframe_size = 1,
3933 .show_host_symbolic_name = 1,
3804 3934
3805 /* dynamic attributes the driver supports */ 3935 /* dynamic attributes the driver supports */
3806 .get_host_port_id = lpfc_get_host_port_id, 3936 .get_host_port_id = lpfc_get_host_port_id,
@@ -3850,6 +3980,10 @@ struct fc_function_template lpfc_transport_functions = {
3850 .terminate_rport_io = lpfc_terminate_rport_io, 3980 .terminate_rport_io = lpfc_terminate_rport_io,
3851 3981
3852 .dd_fcvport_size = sizeof(struct lpfc_vport *), 3982 .dd_fcvport_size = sizeof(struct lpfc_vport *),
3983
3984 .vport_disable = lpfc_vport_disable,
3985
3986 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
3853}; 3987};
3854 3988
3855struct fc_function_template lpfc_vport_transport_functions = { 3989struct fc_function_template lpfc_vport_transport_functions = {
@@ -3860,6 +3994,7 @@ struct fc_function_template lpfc_vport_transport_functions = {
3860 .show_host_supported_fc4s = 1, 3994 .show_host_supported_fc4s = 1,
3861 .show_host_supported_speeds = 1, 3995 .show_host_supported_speeds = 1,
3862 .show_host_maxframe_size = 1, 3996 .show_host_maxframe_size = 1,
3997 .show_host_symbolic_name = 1,
3863 3998
3864 /* dynamic attributes the driver supports */ 3999 /* dynamic attributes the driver supports */
3865 .get_host_port_id = lpfc_get_host_port_id, 4000 .get_host_port_id = lpfc_get_host_port_id,
@@ -3908,6 +4043,8 @@ struct fc_function_template lpfc_vport_transport_functions = {
3908 .terminate_rport_io = lpfc_terminate_rport_io, 4043 .terminate_rport_io = lpfc_terminate_rport_io,
3909 4044
3910 .vport_disable = lpfc_vport_disable, 4045 .vport_disable = lpfc_vport_disable,
4046
4047 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
3911}; 4048};
3912 4049
3913/** 4050/**
@@ -3930,13 +4067,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
3930 lpfc_use_msi_init(phba, lpfc_use_msi); 4067 lpfc_use_msi_init(phba, lpfc_use_msi);
3931 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4068 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
3932 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4069 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4070 lpfc_enable_bg_init(phba, lpfc_enable_bg);
3933 phba->cfg_poll = lpfc_poll; 4071 phba->cfg_poll = lpfc_poll;
3934 phba->cfg_soft_wwnn = 0L; 4072 phba->cfg_soft_wwnn = 0L;
3935 phba->cfg_soft_wwpn = 0L; 4073 phba->cfg_soft_wwpn = 0L;
3936 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4074 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
3937 /* Also reinitialize the host templates with new values. */ 4075 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
3938 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3939 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3940 /* 4076 /*
3941 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4077 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3942 * used to create the sg_dma_buf_pool must be dynamically calculated. 4078 * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -3945,6 +4081,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
3945 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4081 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3946 sizeof(struct fcp_rsp) + 4082 sizeof(struct fcp_rsp) +
3947 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4083 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4084
4085 if (phba->cfg_enable_bg) {
4086 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4087 phba->cfg_sg_dma_buf_size +=
4088 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4089 }
4090
4091 /* Also reinitialize the host templates with new values. */
4092 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4093 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4094
3948 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4095 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
3949 return; 4096 return;
3950} 4097}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 044ef4057d28..07f4976319a5 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -22,6 +22,7 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
22 22
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
25void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 26void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 27void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
27 28
@@ -284,12 +285,24 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
284 uint32_t, uint32_t); 285 uint32_t, uint32_t);
285extern struct lpfc_hbq_init *lpfc_hbq_defs[]; 286extern struct lpfc_hbq_init *lpfc_hbq_defs[];
286 287
288/* externs BlockGuard */
289extern char *_dump_buf_data;
290extern unsigned long _dump_buf_data_order;
291extern char *_dump_buf_dif;
292extern unsigned long _dump_buf_dif_order;
293extern spinlock_t _dump_buf_lock;
294extern int _dump_buf_done;
295extern spinlock_t pgcnt_lock;
296extern unsigned int pgcnt;
297extern unsigned int lpfc_prot_mask;
298extern unsigned char lpfc_prot_guard;
299
287/* Interface exported by fabric iocb scheduler */ 300/* Interface exported by fabric iocb scheduler */
288void lpfc_fabric_abort_nport(struct lpfc_nodelist *); 301void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
289void lpfc_fabric_abort_hba(struct lpfc_hba *); 302void lpfc_fabric_abort_hba(struct lpfc_hba *);
290void lpfc_fabric_block_timeout(unsigned long); 303void lpfc_fabric_block_timeout(unsigned long);
291void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); 304void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
292void lpfc_adjust_queue_depth(struct lpfc_hba *); 305void lpfc_rampdown_queue_depth(struct lpfc_hba *);
293void lpfc_ramp_down_queue_handler(struct lpfc_hba *); 306void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
294void lpfc_ramp_up_queue_handler(struct lpfc_hba *); 307void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
295void lpfc_scsi_dev_block(struct lpfc_hba *); 308void lpfc_scsi_dev_block(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 26dae8bae2d1..896c7b0351e5 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -560,18 +560,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
560 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry); 560 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
561 561
562 /* Don't bother processing response if vport is being torn down. */ 562 /* Don't bother processing response if vport is being torn down. */
563 if (vport->load_flag & FC_UNLOADING) 563 if (vport->load_flag & FC_UNLOADING) {
564 if (vport->fc_flag & FC_RSCN_MODE)
565 lpfc_els_flush_rscn(vport);
564 goto out; 566 goto out;
567 }
565 568
566 if (lpfc_els_chk_latt(vport)) { 569 if (lpfc_els_chk_latt(vport)) {
567 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 570 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
568 "0216 Link event during NS query\n"); 571 "0216 Link event during NS query\n");
572 if (vport->fc_flag & FC_RSCN_MODE)
573 lpfc_els_flush_rscn(vport);
569 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 574 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
570 goto out; 575 goto out;
571 } 576 }
572 if (lpfc_error_lost_link(irsp)) { 577 if (lpfc_error_lost_link(irsp)) {
573 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 578 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
574 "0226 NS query failed due to link event\n"); 579 "0226 NS query failed due to link event\n");
580 if (vport->fc_flag & FC_RSCN_MODE)
581 lpfc_els_flush_rscn(vport);
575 goto out; 582 goto out;
576 } 583 }
577 if (irsp->ulpStatus) { 584 if (irsp->ulpStatus) {
@@ -587,6 +594,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
587 if (rc == 0) 594 if (rc == 0)
588 goto out; 595 goto out;
589 } 596 }
597 if (vport->fc_flag & FC_RSCN_MODE)
598 lpfc_els_flush_rscn(vport);
590 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 599 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
591 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 600 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
592 "0257 GID_FT Query error: 0x%x 0x%x\n", 601 "0257 GID_FT Query error: 0x%x 0x%x\n",
@@ -1008,8 +1017,10 @@ lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
1008 if (n < size) 1017 if (n < size)
1009 n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi); 1018 n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
1010 1019
1011 if (n < size && vport->vname) 1020 if (n < size &&
1012 n += snprintf(symbol + n, size - n, " VName-%s", vport->vname); 1021 strlen(vport->fc_vport->symbolic_name))
1022 n += snprintf(symbol + n, size - n, " VName-%s",
1023 vport->fc_vport->symbolic_name);
1013 return n; 1024 return n;
1014} 1025}
1015 1026
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 771920bdde44..b615eda361d5 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -46,7 +46,7 @@
46#include "lpfc_compat.h" 46#include "lpfc_compat.h"
47#include "lpfc_debugfs.h" 47#include "lpfc_debugfs.h"
48 48
49#ifdef CONFIG_LPFC_DEBUG_FS 49#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
50/** 50/**
51 * debugfs interface 51 * debugfs interface
52 * 52 *
@@ -618,7 +618,7 @@ inline void
618lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, 618lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
619 uint32_t data1, uint32_t data2, uint32_t data3) 619 uint32_t data1, uint32_t data2, uint32_t data3)
620{ 620{
621#ifdef CONFIG_LPFC_DEBUG_FS 621#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
622 struct lpfc_debugfs_trc *dtp; 622 struct lpfc_debugfs_trc *dtp;
623 int index; 623 int index;
624 624
@@ -659,7 +659,7 @@ inline void
659lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, 659lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
660 uint32_t data1, uint32_t data2, uint32_t data3) 660 uint32_t data1, uint32_t data2, uint32_t data3)
661{ 661{
662#ifdef CONFIG_LPFC_DEBUG_FS 662#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
663 struct lpfc_debugfs_trc *dtp; 663 struct lpfc_debugfs_trc *dtp;
664 int index; 664 int index;
665 665
@@ -680,7 +680,7 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
680 return; 680 return;
681} 681}
682 682
683#ifdef CONFIG_LPFC_DEBUG_FS 683#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
684/** 684/**
685 * lpfc_debugfs_disc_trc_open - Open the discovery trace log. 685 * lpfc_debugfs_disc_trc_open - Open the discovery trace log.
686 * @inode: The inode pointer that contains a vport pointer. 686 * @inode: The inode pointer that contains a vport pointer.
@@ -907,6 +907,91 @@ out:
907 return rc; 907 return rc;
908} 908}
909 909
910static int
911lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
912{
913 struct lpfc_debug *debug;
914 int rc = -ENOMEM;
915
916 if (!_dump_buf_data)
917 return -EBUSY;
918
919 debug = kmalloc(sizeof(*debug), GFP_KERNEL);
920 if (!debug)
921 goto out;
922
923 /* Round to page boundry */
924 printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n",
925 __func__, _dump_buf_data);
926 debug->buffer = _dump_buf_data;
927 if (!debug->buffer) {
928 kfree(debug);
929 goto out;
930 }
931
932 debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
933 file->private_data = debug;
934
935 rc = 0;
936out:
937 return rc;
938}
939
940static int
941lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
942{
943 struct lpfc_debug *debug;
944 int rc = -ENOMEM;
945
946 if (!_dump_buf_dif)
947 return -EBUSY;
948
949 debug = kmalloc(sizeof(*debug), GFP_KERNEL);
950 if (!debug)
951 goto out;
952
953 /* Round to page boundry */
954 printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__,
955 _dump_buf_dif, file->f_dentry->d_name.name);
956 debug->buffer = _dump_buf_dif;
957 if (!debug->buffer) {
958 kfree(debug);
959 goto out;
960 }
961
962 debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
963 file->private_data = debug;
964
965 rc = 0;
966out:
967 return rc;
968}
969
970static ssize_t
971lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
972 size_t nbytes, loff_t *ppos)
973{
974 /*
975 * The Data/DIF buffers only save one failing IO
976 * The write op is used as a reset mechanism after an IO has
977 * already been saved to the next one can be saved
978 */
979 spin_lock(&_dump_buf_lock);
980
981 memset((void *)_dump_buf_data, 0,
982 ((1 << PAGE_SHIFT) << _dump_buf_data_order));
983 memset((void *)_dump_buf_dif, 0,
984 ((1 << PAGE_SHIFT) << _dump_buf_dif_order));
985
986 _dump_buf_done = 0;
987
988 spin_unlock(&_dump_buf_lock);
989
990 return nbytes;
991}
992
993
994
910/** 995/**
911 * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file. 996 * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file.
912 * @inode: The inode pointer that contains a vport pointer. 997 * @inode: The inode pointer that contains a vport pointer.
@@ -1035,6 +1120,17 @@ lpfc_debugfs_release(struct inode *inode, struct file *file)
1035 return 0; 1120 return 0;
1036} 1121}
1037 1122
1123static int
1124lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
1125{
1126 struct lpfc_debug *debug = file->private_data;
1127
1128 debug->buffer = NULL;
1129 kfree(debug);
1130
1131 return 0;
1132}
1133
1038#undef lpfc_debugfs_op_disc_trc 1134#undef lpfc_debugfs_op_disc_trc
1039static struct file_operations lpfc_debugfs_op_disc_trc = { 1135static struct file_operations lpfc_debugfs_op_disc_trc = {
1040 .owner = THIS_MODULE, 1136 .owner = THIS_MODULE,
@@ -1080,6 +1176,26 @@ static struct file_operations lpfc_debugfs_op_dumpHostSlim = {
1080 .release = lpfc_debugfs_release, 1176 .release = lpfc_debugfs_release,
1081}; 1177};
1082 1178
1179#undef lpfc_debugfs_op_dumpData
1180static struct file_operations lpfc_debugfs_op_dumpData = {
1181 .owner = THIS_MODULE,
1182 .open = lpfc_debugfs_dumpData_open,
1183 .llseek = lpfc_debugfs_lseek,
1184 .read = lpfc_debugfs_read,
1185 .write = lpfc_debugfs_dumpDataDif_write,
1186 .release = lpfc_debugfs_dumpDataDif_release,
1187};
1188
1189#undef lpfc_debugfs_op_dumpDif
1190static struct file_operations lpfc_debugfs_op_dumpDif = {
1191 .owner = THIS_MODULE,
1192 .open = lpfc_debugfs_dumpDif_open,
1193 .llseek = lpfc_debugfs_lseek,
1194 .read = lpfc_debugfs_read,
1195 .write = lpfc_debugfs_dumpDataDif_write,
1196 .release = lpfc_debugfs_dumpDataDif_release,
1197};
1198
1083#undef lpfc_debugfs_op_slow_ring_trc 1199#undef lpfc_debugfs_op_slow_ring_trc
1084static struct file_operations lpfc_debugfs_op_slow_ring_trc = { 1200static struct file_operations lpfc_debugfs_op_slow_ring_trc = {
1085 .owner = THIS_MODULE, 1201 .owner = THIS_MODULE,
@@ -1106,7 +1222,7 @@ static atomic_t lpfc_debugfs_hba_count;
1106inline void 1222inline void
1107lpfc_debugfs_initialize(struct lpfc_vport *vport) 1223lpfc_debugfs_initialize(struct lpfc_vport *vport)
1108{ 1224{
1109#ifdef CONFIG_LPFC_DEBUG_FS 1225#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1110 struct lpfc_hba *phba = vport->phba; 1226 struct lpfc_hba *phba = vport->phba;
1111 char name[64]; 1227 char name[64];
1112 uint32_t num, i; 1228 uint32_t num, i;
@@ -1176,6 +1292,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
1176 goto debug_failed; 1292 goto debug_failed;
1177 } 1293 }
1178 1294
1295 /* Setup dumpData */
1296 snprintf(name, sizeof(name), "dumpData");
1297 phba->debug_dumpData =
1298 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
1299 phba->hba_debugfs_root,
1300 phba, &lpfc_debugfs_op_dumpData);
1301 if (!phba->debug_dumpData) {
1302 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1303 "0800 Cannot create debugfs dumpData\n");
1304 goto debug_failed;
1305 }
1306
1307 /* Setup dumpDif */
1308 snprintf(name, sizeof(name), "dumpDif");
1309 phba->debug_dumpDif =
1310 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
1311 phba->hba_debugfs_root,
1312 phba, &lpfc_debugfs_op_dumpDif);
1313 if (!phba->debug_dumpDif) {
1314 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1315 "0801 Cannot create debugfs dumpDif\n");
1316 goto debug_failed;
1317 }
1318
1319
1320
1179 /* Setup slow ring trace */ 1321 /* Setup slow ring trace */
1180 if (lpfc_debugfs_max_slow_ring_trc) { 1322 if (lpfc_debugfs_max_slow_ring_trc) {
1181 num = lpfc_debugfs_max_slow_ring_trc - 1; 1323 num = lpfc_debugfs_max_slow_ring_trc - 1;
@@ -1305,7 +1447,7 @@ debug_failed:
1305inline void 1447inline void
1306lpfc_debugfs_terminate(struct lpfc_vport *vport) 1448lpfc_debugfs_terminate(struct lpfc_vport *vport)
1307{ 1449{
1308#ifdef CONFIG_LPFC_DEBUG_FS 1450#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1309 struct lpfc_hba *phba = vport->phba; 1451 struct lpfc_hba *phba = vport->phba;
1310 1452
1311 if (vport->disc_trc) { 1453 if (vport->disc_trc) {
@@ -1340,6 +1482,16 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
1340 debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ 1482 debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
1341 phba->debug_dumpHostSlim = NULL; 1483 phba->debug_dumpHostSlim = NULL;
1342 } 1484 }
1485 if (phba->debug_dumpData) {
1486 debugfs_remove(phba->debug_dumpData); /* dumpData */
1487 phba->debug_dumpData = NULL;
1488 }
1489
1490 if (phba->debug_dumpDif) {
1491 debugfs_remove(phba->debug_dumpDif); /* dumpDif */
1492 phba->debug_dumpDif = NULL;
1493 }
1494
1343 if (phba->slow_ring_trc) { 1495 if (phba->slow_ring_trc) {
1344 kfree(phba->slow_ring_trc); 1496 kfree(phba->slow_ring_trc);
1345 phba->slow_ring_trc = NULL; 1497 phba->slow_ring_trc = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 31e86a55391d..03c7313a1012 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -21,7 +21,7 @@
21#ifndef _H_LPFC_DEBUG_FS 21#ifndef _H_LPFC_DEBUG_FS
22#define _H_LPFC_DEBUG_FS 22#define _H_LPFC_DEBUG_FS
23 23
24#ifdef CONFIG_LPFC_DEBUG_FS 24#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
25struct lpfc_debugfs_trc { 25struct lpfc_debugfs_trc {
26 char *fmt; 26 char *fmt;
27 uint32_t data1; 27 uint32_t data1;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 630bd28fb997..a8f30bdaff69 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -221,7 +221,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
221 /* For ELS_REQUEST64_CR, use the VPI by default */ 221 /* For ELS_REQUEST64_CR, use the VPI by default */
222 icmd->ulpContext = vport->vpi; 222 icmd->ulpContext = vport->vpi;
223 icmd->ulpCt_h = 0; 223 icmd->ulpCt_h = 0;
224 icmd->ulpCt_l = 1; 224 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
225 if (elscmd == ELS_CMD_ECHO)
226 icmd->ulpCt_l = 0; /* context = invalid RPI */
227 else
228 icmd->ulpCt_l = 1; /* context = VPI */
225 } 229 }
226 230
227 bpl = (struct ulp_bde64 *) pbuflist->virt; 231 bpl = (struct ulp_bde64 *) pbuflist->virt;
@@ -271,7 +275,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
271 return elsiocb; 275 return elsiocb;
272 276
273els_iocb_free_pbuf_exit: 277els_iocb_free_pbuf_exit:
274 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 278 if (expectRsp)
279 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
275 kfree(pbuflist); 280 kfree(pbuflist);
276 281
277els_iocb_free_prsp_exit: 282els_iocb_free_prsp_exit:
@@ -2468,6 +2473,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2468 case IOSTAT_LOCAL_REJECT: 2473 case IOSTAT_LOCAL_REJECT:
2469 switch ((irsp->un.ulpWord[4] & 0xff)) { 2474 switch ((irsp->un.ulpWord[4] & 0xff)) {
2470 case IOERR_LOOP_OPEN_FAILURE: 2475 case IOERR_LOOP_OPEN_FAILURE:
2476 if (cmd == ELS_CMD_FLOGI) {
2477 if (PCI_DEVICE_ID_HORNET ==
2478 phba->pcidev->device) {
2479 phba->fc_topology = TOPOLOGY_LOOP;
2480 phba->pport->fc_myDID = 0;
2481 phba->alpa_map[0] = 0;
2482 phba->alpa_map[1] = 0;
2483 }
2484 }
2471 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 2485 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
2472 delay = 1000; 2486 delay = 1000;
2473 retry = 1; 2487 retry = 1;
@@ -3823,27 +3837,21 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
3823 while (payload_len) { 3837 while (payload_len) {
3824 rscn_did.un.word = be32_to_cpu(*lp++); 3838 rscn_did.un.word = be32_to_cpu(*lp++);
3825 payload_len -= sizeof(uint32_t); 3839 payload_len -= sizeof(uint32_t);
3826 switch (rscn_did.un.b.resv) { 3840 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
3827 case 0: /* Single N_Port ID effected */ 3841 case RSCN_ADDRESS_FORMAT_PORT:
3828 if (ns_did.un.word == rscn_did.un.word) 3842 if (ns_did.un.word == rscn_did.un.word)
3829 goto return_did_out; 3843 goto return_did_out;
3830 break; 3844 break;
3831 case 1: /* Whole N_Port Area effected */ 3845 case RSCN_ADDRESS_FORMAT_AREA:
3832 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 3846 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3833 && (ns_did.un.b.area == rscn_did.un.b.area)) 3847 && (ns_did.un.b.area == rscn_did.un.b.area))
3834 goto return_did_out; 3848 goto return_did_out;
3835 break; 3849 break;
3836 case 2: /* Whole N_Port Domain effected */ 3850 case RSCN_ADDRESS_FORMAT_DOMAIN:
3837 if (ns_did.un.b.domain == rscn_did.un.b.domain) 3851 if (ns_did.un.b.domain == rscn_did.un.b.domain)
3838 goto return_did_out; 3852 goto return_did_out;
3839 break; 3853 break;
3840 default: 3854 case RSCN_ADDRESS_FORMAT_FABRIC:
3841 /* Unknown Identifier in RSCN node */
3842 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3843 "0217 Unknown Identifier in "
3844 "RSCN payload Data: x%x\n",
3845 rscn_did.un.word);
3846 case 3: /* Whole Fabric effected */
3847 goto return_did_out; 3855 goto return_did_out;
3848 } 3856 }
3849 } 3857 }
@@ -3887,6 +3895,49 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
3887} 3895}
3888 3896
3889/** 3897/**
3898 * lpfc_send_rscn_event: Send an RSCN event to management application.
3899 * @vport: pointer to a host virtual N_Port data structure.
3900 * @cmdiocb: pointer to lpfc command iocb data structure.
3901 *
3902 * lpfc_send_rscn_event sends an RSCN netlink event to management
3903 * applications.
3904 */
3905static void
3906lpfc_send_rscn_event(struct lpfc_vport *vport,
3907 struct lpfc_iocbq *cmdiocb)
3908{
3909 struct lpfc_dmabuf *pcmd;
3910 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3911 uint32_t *payload_ptr;
3912 uint32_t payload_len;
3913 struct lpfc_rscn_event_header *rscn_event_data;
3914
3915 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3916 payload_ptr = (uint32_t *) pcmd->virt;
3917 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
3918
3919 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
3920 payload_len, GFP_KERNEL);
3921 if (!rscn_event_data) {
3922 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3923 "0147 Failed to allocate memory for RSCN event\n");
3924 return;
3925 }
3926 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
3927 rscn_event_data->payload_length = payload_len;
3928 memcpy(rscn_event_data->rscn_payload, payload_ptr,
3929 payload_len);
3930
3931 fc_host_post_vendor_event(shost,
3932 fc_get_event_number(),
3933 sizeof(struct lpfc_els_event_header) + payload_len,
3934 (char *)rscn_event_data,
3935 LPFC_NL_VENDOR_ID);
3936
3937 kfree(rscn_event_data);
3938}
3939
3940/**
3890 * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb. 3941 * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb.
3891 * @vport: pointer to a host virtual N_Port data structure. 3942 * @vport: pointer to a host virtual N_Port data structure.
3892 * @cmdiocb: pointer to lpfc command iocb data structure. 3943 * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -3933,6 +3984,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3933 "0214 RSCN received Data: x%x x%x x%x x%x\n", 3984 "0214 RSCN received Data: x%x x%x x%x x%x\n",
3934 vport->fc_flag, payload_len, *lp, 3985 vport->fc_flag, payload_len, *lp,
3935 vport->fc_rscn_id_cnt); 3986 vport->fc_rscn_id_cnt);
3987
3988 /* Send an RSCN event to the management application */
3989 lpfc_send_rscn_event(vport, cmdiocb);
3990
3936 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 3991 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
3937 fc_host_post_event(shost, fc_get_event_number(), 3992 fc_host_post_event(shost, fc_get_event_number(),
3938 FCH_EVT_RSCN, lp[i]); 3993 FCH_EVT_RSCN, lp[i]);
@@ -4884,10 +4939,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
4884 uint32_t timeout; 4939 uint32_t timeout;
4885 uint32_t remote_ID = 0xffffffff; 4940 uint32_t remote_ID = 0xffffffff;
4886 4941
4887 /* If the timer is already canceled do nothing */
4888 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
4889 return;
4890 }
4891 spin_lock_irq(&phba->hbalock); 4942 spin_lock_irq(&phba->hbalock);
4892 timeout = (uint32_t)(phba->fc_ratov << 1); 4943 timeout = (uint32_t)(phba->fc_ratov << 1);
4893 4944
@@ -5128,7 +5179,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
5128 fc_get_event_number(), 5179 fc_get_event_number(),
5129 sizeof(lsrjt_event), 5180 sizeof(lsrjt_event),
5130 (char *)&lsrjt_event, 5181 (char *)&lsrjt_event,
5131 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 5182 LPFC_NL_VENDOR_ID);
5132 return; 5183 return;
5133 } 5184 }
5134 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 5185 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
@@ -5146,7 +5197,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
5146 fc_get_event_number(), 5197 fc_get_event_number(),
5147 sizeof(fabric_event), 5198 sizeof(fabric_event),
5148 (char *)&fabric_event, 5199 (char *)&fabric_event,
5149 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 5200 LPFC_NL_VENDOR_ID);
5150 return; 5201 return;
5151 } 5202 }
5152 5203
@@ -5164,32 +5215,68 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
5164static void 5215static void
5165lpfc_send_els_event(struct lpfc_vport *vport, 5216lpfc_send_els_event(struct lpfc_vport *vport,
5166 struct lpfc_nodelist *ndlp, 5217 struct lpfc_nodelist *ndlp,
5167 uint32_t cmd) 5218 uint32_t *payload)
5168{ 5219{
5169 struct lpfc_els_event_header els_data; 5220 struct lpfc_els_event_header *els_data = NULL;
5221 struct lpfc_logo_event *logo_data = NULL;
5170 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5222 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5171 5223
5172 els_data.event_type = FC_REG_ELS_EVENT; 5224 if (*payload == ELS_CMD_LOGO) {
5173 switch (cmd) { 5225 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
5226 if (!logo_data) {
5227 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5228 "0148 Failed to allocate memory "
5229 "for LOGO event\n");
5230 return;
5231 }
5232 els_data = &logo_data->header;
5233 } else {
5234 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
5235 GFP_KERNEL);
5236 if (!els_data) {
5237 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5238 "0149 Failed to allocate memory "
5239 "for ELS event\n");
5240 return;
5241 }
5242 }
5243 els_data->event_type = FC_REG_ELS_EVENT;
5244 switch (*payload) {
5174 case ELS_CMD_PLOGI: 5245 case ELS_CMD_PLOGI:
5175 els_data.subcategory = LPFC_EVENT_PLOGI_RCV; 5246 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
5176 break; 5247 break;
5177 case ELS_CMD_PRLO: 5248 case ELS_CMD_PRLO:
5178 els_data.subcategory = LPFC_EVENT_PRLO_RCV; 5249 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
5179 break; 5250 break;
5180 case ELS_CMD_ADISC: 5251 case ELS_CMD_ADISC:
5181 els_data.subcategory = LPFC_EVENT_ADISC_RCV; 5252 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
5253 break;
5254 case ELS_CMD_LOGO:
5255 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
5256 /* Copy the WWPN in the LOGO payload */
5257 memcpy(logo_data->logo_wwpn, &payload[2],
5258 sizeof(struct lpfc_name));
5182 break; 5259 break;
5183 default: 5260 default:
5184 return; 5261 return;
5185 } 5262 }
5186 memcpy(els_data.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 5263 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
5187 memcpy(els_data.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 5264 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
5188 fc_host_post_vendor_event(shost, 5265 if (*payload == ELS_CMD_LOGO) {
5189 fc_get_event_number(), 5266 fc_host_post_vendor_event(shost,
5190 sizeof(els_data), 5267 fc_get_event_number(),
5191 (char *)&els_data, 5268 sizeof(struct lpfc_logo_event),
5192 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 5269 (char *)logo_data,
5270 LPFC_NL_VENDOR_ID);
5271 kfree(logo_data);
5272 } else {
5273 fc_host_post_vendor_event(shost,
5274 fc_get_event_number(),
5275 sizeof(struct lpfc_els_event_header),
5276 (char *)els_data,
5277 LPFC_NL_VENDOR_ID);
5278 kfree(els_data);
5279 }
5193 5280
5194 return; 5281 return;
5195} 5282}
@@ -5296,7 +5383,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5296 phba->fc_stat.elsRcvPLOGI++; 5383 phba->fc_stat.elsRcvPLOGI++;
5297 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 5384 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
5298 5385
5299 lpfc_send_els_event(vport, ndlp, cmd); 5386 lpfc_send_els_event(vport, ndlp, payload);
5300 if (vport->port_state < LPFC_DISC_AUTH) { 5387 if (vport->port_state < LPFC_DISC_AUTH) {
5301 if (!(phba->pport->fc_flag & FC_PT2PT) || 5388 if (!(phba->pport->fc_flag & FC_PT2PT) ||
5302 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 5389 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -5334,6 +5421,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5334 did, vport->port_state, ndlp->nlp_flag); 5421 did, vport->port_state, ndlp->nlp_flag);
5335 5422
5336 phba->fc_stat.elsRcvLOGO++; 5423 phba->fc_stat.elsRcvLOGO++;
5424 lpfc_send_els_event(vport, ndlp, payload);
5337 if (vport->port_state < LPFC_DISC_AUTH) { 5425 if (vport->port_state < LPFC_DISC_AUTH) {
5338 rjt_err = LSRJT_UNABLE_TPC; 5426 rjt_err = LSRJT_UNABLE_TPC;
5339 break; 5427 break;
@@ -5346,7 +5434,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5346 did, vport->port_state, ndlp->nlp_flag); 5434 did, vport->port_state, ndlp->nlp_flag);
5347 5435
5348 phba->fc_stat.elsRcvPRLO++; 5436 phba->fc_stat.elsRcvPRLO++;
5349 lpfc_send_els_event(vport, ndlp, cmd); 5437 lpfc_send_els_event(vport, ndlp, payload);
5350 if (vport->port_state < LPFC_DISC_AUTH) { 5438 if (vport->port_state < LPFC_DISC_AUTH) {
5351 rjt_err = LSRJT_UNABLE_TPC; 5439 rjt_err = LSRJT_UNABLE_TPC;
5352 break; 5440 break;
@@ -5364,7 +5452,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5364 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 5452 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
5365 did, vport->port_state, ndlp->nlp_flag); 5453 did, vport->port_state, ndlp->nlp_flag);
5366 5454
5367 lpfc_send_els_event(vport, ndlp, cmd); 5455 lpfc_send_els_event(vport, ndlp, payload);
5368 phba->fc_stat.elsRcvADISC++; 5456 phba->fc_stat.elsRcvADISC++;
5369 if (vport->port_state < LPFC_DISC_AUTH) { 5457 if (vport->port_state < LPFC_DISC_AUTH) {
5370 rjt_err = LSRJT_UNABLE_TPC; 5458 rjt_err = LSRJT_UNABLE_TPC;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a1a70d9ffc2a..8c64494444bf 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -350,7 +350,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
350 evt_data_size = sizeof(fast_evt_data->un. 350 evt_data_size = sizeof(fast_evt_data->un.
351 read_check_error); 351 read_check_error);
352 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || 352 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
353 (evt_sub_category == IOSTAT_NPORT_BSY)) { 353 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
354 evt_data = (char *) &fast_evt_data->un.fabric_evt; 354 evt_data = (char *) &fast_evt_data->un.fabric_evt;
355 evt_data_size = sizeof(fast_evt_data->un.fabric_evt); 355 evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
356 } else { 356 } else {
@@ -387,7 +387,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
387 fc_get_event_number(), 387 fc_get_event_number(),
388 evt_data_size, 388 evt_data_size,
389 evt_data, 389 evt_data,
390 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 390 LPFC_NL_VENDOR_ID);
391 391
392 lpfc_free_fast_evt(phba, fast_evt_data); 392 lpfc_free_fast_evt(phba, fast_evt_data);
393 return; 393 return;
@@ -585,20 +585,25 @@ lpfc_do_work(void *p)
585 set_user_nice(current, -20); 585 set_user_nice(current, -20);
586 phba->data_flags = 0; 586 phba->data_flags = 0;
587 587
588 while (1) { 588 while (!kthread_should_stop()) {
589 /* wait and check worker queue activities */ 589 /* wait and check worker queue activities */
590 rc = wait_event_interruptible(phba->work_waitq, 590 rc = wait_event_interruptible(phba->work_waitq,
591 (test_and_clear_bit(LPFC_DATA_READY, 591 (test_and_clear_bit(LPFC_DATA_READY,
592 &phba->data_flags) 592 &phba->data_flags)
593 || kthread_should_stop())); 593 || kthread_should_stop()));
594 BUG_ON(rc); 594 /* Signal wakeup shall terminate the worker thread */
595 595 if (rc) {
596 if (kthread_should_stop()) 596 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
597 "0433 Wakeup on signal: rc=x%x\n", rc);
597 break; 598 break;
599 }
598 600
599 /* Attend pending lpfc data processing */ 601 /* Attend pending lpfc data processing */
600 lpfc_work_done(phba); 602 lpfc_work_done(phba);
601 } 603 }
604 phba->worker_thread = NULL;
605 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
606 "0432 Worker thread stopped.\n");
602 return 0; 607 return 0;
603} 608}
604 609
@@ -1852,6 +1857,32 @@ lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1852 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 1857 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1853 NLP_STE_UNUSED_NODE); 1858 NLP_STE_UNUSED_NODE);
1854} 1859}
1860/**
1861 * lpfc_initialize_node: Initialize all fields of node object.
1862 * @vport: Pointer to Virtual Port object.
1863 * @ndlp: Pointer to FC node object.
1864 * @did: FC_ID of the node.
1865 * This function is always called when node object need to
1866 * be initialized. It initializes all the fields of the node
1867 * object.
1868 **/
1869static inline void
1870lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1871 uint32_t did)
1872{
1873 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
1874 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
1875 init_timer(&ndlp->nlp_delayfunc);
1876 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
1877 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
1878 ndlp->nlp_DID = did;
1879 ndlp->vport = vport;
1880 ndlp->nlp_sid = NLP_NO_SID;
1881 kref_init(&ndlp->kref);
1882 NLP_INT_NODE_ACT(ndlp);
1883 atomic_set(&ndlp->cmd_pending, 0);
1884 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
1885}
1855 1886
1856struct lpfc_nodelist * 1887struct lpfc_nodelist *
1857lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1888lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
@@ -1892,17 +1923,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1892 /* re-initialize ndlp except of ndlp linked list pointer */ 1923 /* re-initialize ndlp except of ndlp linked list pointer */
1893 memset((((char *)ndlp) + sizeof (struct list_head)), 0, 1924 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
1894 sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); 1925 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
1895 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 1926 lpfc_initialize_node(vport, ndlp, did);
1896 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
1897 init_timer(&ndlp->nlp_delayfunc);
1898 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
1899 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
1900 ndlp->nlp_DID = did;
1901 ndlp->vport = vport;
1902 ndlp->nlp_sid = NLP_NO_SID;
1903 /* ndlp management re-initialize */
1904 kref_init(&ndlp->kref);
1905 NLP_INT_NODE_ACT(ndlp);
1906 1927
1907 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 1928 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
1908 1929
@@ -3116,19 +3137,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3116 uint32_t did) 3137 uint32_t did)
3117{ 3138{
3118 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 3139 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
3119 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 3140
3120 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); 3141 lpfc_initialize_node(vport, ndlp, did);
3121 init_timer(&ndlp->nlp_delayfunc);
3122 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
3123 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
3124 ndlp->nlp_DID = did;
3125 ndlp->vport = vport;
3126 ndlp->nlp_sid = NLP_NO_SID;
3127 INIT_LIST_HEAD(&ndlp->nlp_listp); 3142 INIT_LIST_HEAD(&ndlp->nlp_listp);
3128 kref_init(&ndlp->kref);
3129 NLP_INT_NODE_ACT(ndlp);
3130 atomic_set(&ndlp->cmd_pending, 0);
3131 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
3132 3143
3133 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 3144 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
3134 "node init: did:x%x", 3145 "node init: did:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 5de5dabbbee6..4168c7b498b8 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -65,6 +65,9 @@
65#define SLI3_IOCB_RSP_SIZE 64 65#define SLI3_IOCB_RSP_SIZE 64
66 66
67 67
68/* vendor ID used in SCSI netlink calls */
69#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
70
68/* Common Transport structures and definitions */ 71/* Common Transport structures and definitions */
69 72
70union CtRevisionId { 73union CtRevisionId {
@@ -866,6 +869,12 @@ typedef struct _D_ID { /* Structure is in Big Endian format */
866 } un; 869 } un;
867} D_ID; 870} D_ID;
868 871
872#define RSCN_ADDRESS_FORMAT_PORT 0x0
873#define RSCN_ADDRESS_FORMAT_AREA 0x1
874#define RSCN_ADDRESS_FORMAT_DOMAIN 0x2
875#define RSCN_ADDRESS_FORMAT_FABRIC 0x3
876#define RSCN_ADDRESS_FORMAT_MASK 0x3
877
869/* 878/*
870 * Structure to define all ELS Payload types 879 * Structure to define all ELS Payload types
871 */ 880 */
@@ -1535,6 +1544,108 @@ typedef struct ULP_BDL { /* SLI-2 */
1535 uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */ 1544 uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */
1536} ULP_BDL; 1545} ULP_BDL;
1537 1546
1547/*
1548 * BlockGuard Definitions
1549 */
1550
1551enum lpfc_protgrp_type {
1552 LPFC_PG_TYPE_INVALID = 0, /* used to indicate errors */
1553 LPFC_PG_TYPE_NO_DIF, /* no DIF data pointed to by prot grp */
1554 LPFC_PG_TYPE_EMBD_DIF, /* DIF is embedded (inline) with data */
1555 LPFC_PG_TYPE_DIF_BUF /* DIF has its own scatter/gather list */
1556};
1557
1558/* PDE Descriptors */
1559#define LPFC_PDE1_DESCRIPTOR 0x81
1560#define LPFC_PDE2_DESCRIPTOR 0x82
1561#define LPFC_PDE3_DESCRIPTOR 0x83
1562
1563/* BlockGuard Profiles */
1564enum lpfc_bg_prof_codes {
1565 LPFC_PROF_INVALID,
1566 LPFC_PROF_A1 = 128, /* Full Protection */
1567 LPFC_PROF_A2, /* Disabled Protection Checks:A2~A4 */
1568 LPFC_PROF_A3,
1569 LPFC_PROF_A4,
1570 LPFC_PROF_B1, /* Embedded DIFs: B1~B3 */
1571 LPFC_PROF_B2,
1572 LPFC_PROF_B3,
1573 LPFC_PROF_C1, /* Separate DIFs: C1~C3 */
1574 LPFC_PROF_C2,
1575 LPFC_PROF_C3,
1576 LPFC_PROF_D1, /* Full Protection */
1577 LPFC_PROF_D2, /* Partial Protection & Check Disabling */
1578 LPFC_PROF_D3,
1579 LPFC_PROF_E1, /* E1~E4:out - check-only, in - update apptag */
1580 LPFC_PROF_E2,
1581 LPFC_PROF_E3,
1582 LPFC_PROF_E4,
1583 LPFC_PROF_F1, /* Full Translation - F1 Prot Descriptor */
1584 /* F1 Translation BDE */
1585 LPFC_PROF_ANT1, /* TCP checksum, DIF inline with data buffers */
1586 LPFC_PROF_AST1, /* TCP checksum, DIF split from data buffer */
1587 LPFC_PROF_ANT2,
1588 LPFC_PROF_AST2
1589};
1590
1591/* BlockGuard error-control defines */
1592#define BG_EC_STOP_ERR 0x00
1593#define BG_EC_CONT_ERR 0x01
1594#define BG_EC_IGN_UNINIT_STOP_ERR 0x10
1595#define BG_EC_IGN_UNINIT_CONT_ERR 0x11
1596
1597/* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */
1598#define PDE_DESC_TYPE_MASK 0xff000000
1599#define PDE_DESC_TYPE_SHIFT 24
1600#define PDE_BG_PROFILE_MASK 0x00ff0000
1601#define PDE_BG_PROFILE_SHIFT 16
1602#define PDE_BLOCK_LEN_MASK 0x0000fffc
1603#define PDE_BLOCK_LEN_SHIFT 2
1604#define PDE_ERR_CTRL_MASK 0x00000003
1605#define PDE_ERR_CTRL_SHIFT 0
1606/* PDE word 1 bit masks and shifts */
1607#define PDE_APPTAG_MASK_MASK 0xffff0000
1608#define PDE_APPTAG_MASK_SHIFT 16
1609#define PDE_APPTAG_VAL_MASK 0x0000ffff
1610#define PDE_APPTAG_VAL_SHIFT 0
1611struct lpfc_pde {
1612 uint32_t parms; /* bitfields of descriptor, prof, len, and ec */
1613 uint32_t apptag; /* bitfields of app tag maskand app tag value */
1614 uint32_t reftag; /* reference tag occupying all 32 bits */
1615};
1616
1617/* inline function to set fields in parms of PDE */
1618static inline void
1619lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec)
1620{
1621 uint32_t *wp = &p->parms;
1622
1623 /* spec indicates that adapter appends two 0's to length field */
1624 len = len >> 2;
1625
1626 *wp &= 0;
1627 *wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK);
1628 *wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK);
1629 *wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK);
1630 *wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK);
1631 *wp = le32_to_cpu(*wp);
1632}
1633
1634/* inline function to set apptag and reftag fields of PDE */
1635static inline void
1636lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval,
1637 u32 reftag)
1638{
1639 uint32_t *wp = &p->apptag;
1640 *wp &= 0;
1641 *wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK);
1642 *wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK);
1643 *wp = le32_to_cpu(*wp);
1644 wp = &p->reftag;
1645 *wp = le32_to_cpu(reftag);
1646}
1647
1648
1538/* Structure for MB Command LOAD_SM and DOWN_LOAD */ 1649/* Structure for MB Command LOAD_SM and DOWN_LOAD */
1539 1650
1540typedef struct { 1651typedef struct {
@@ -2359,6 +2470,30 @@ typedef struct {
2359#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2470#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2360#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2471#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2361 2472
2473#define WAKE_UP_PARMS_REGION_ID 4
2474#define WAKE_UP_PARMS_WORD_SIZE 15
2475
2476/* Option rom version structure */
2477struct prog_id {
2478#ifdef __BIG_ENDIAN_BITFIELD
2479 uint8_t type;
2480 uint8_t id;
2481 uint32_t ver:4; /* Major Version */
2482 uint32_t rev:4; /* Revision */
2483 uint32_t lev:2; /* Level */
2484 uint32_t dist:2; /* Dist Type */
2485 uint32_t num:4; /* number after dist type */
2486#else /* __LITTLE_ENDIAN_BITFIELD */
2487 uint32_t num:4; /* number after dist type */
2488 uint32_t dist:2; /* Dist Type */
2489 uint32_t lev:2; /* Level */
2490 uint32_t rev:4; /* Revision */
2491 uint32_t ver:4; /* Major Version */
2492 uint8_t id;
2493 uint8_t type;
2494#endif
2495};
2496
2362/* Structure for MB Command UPDATE_CFG (0x1B) */ 2497/* Structure for MB Command UPDATE_CFG (0x1B) */
2363 2498
2364struct update_cfg_var { 2499struct update_cfg_var {
@@ -2552,11 +2687,19 @@ typedef struct {
2552 2687
2553 uint32_t pcbLow; /* bit 31:0 of memory based port config block */ 2688 uint32_t pcbLow; /* bit 31:0 of memory based port config block */
2554 uint32_t pcbHigh; /* bit 63:32 of memory based port config block */ 2689 uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
2555 uint32_t hbainit[6]; 2690 uint32_t hbainit[5];
2691#ifdef __BIG_ENDIAN_BITFIELD
2692 uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
2693 uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
2694#else /* __LITTLE_ENDIAN */
2695 uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
2696 uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
2697#endif
2556 2698
2557#ifdef __BIG_ENDIAN_BITFIELD 2699#ifdef __BIG_ENDIAN_BITFIELD
2558 uint32_t rsvd : 24; /* Reserved */ 2700 uint32_t rsvd1 : 23; /* Reserved */
2559 uint32_t cmv : 1; /* Configure Max VPIs */ 2701 uint32_t cbg : 1; /* Configure BlockGuard */
2702 uint32_t cmv : 1; /* Configure Max VPIs */
2560 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2703 uint32_t ccrp : 1; /* Config Command Ring Polling */
2561 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 2704 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
2562 uint32_t chbs : 1; /* Cofigure Host Backing store */ 2705 uint32_t chbs : 1; /* Cofigure Host Backing store */
@@ -2573,10 +2716,12 @@ typedef struct {
2573 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 2716 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
2574 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2717 uint32_t ccrp : 1; /* Config Command Ring Polling */
2575 uint32_t cmv : 1; /* Configure Max VPIs */ 2718 uint32_t cmv : 1; /* Configure Max VPIs */
2576 uint32_t rsvd : 24; /* Reserved */ 2719 uint32_t cbg : 1; /* Configure BlockGuard */
2720 uint32_t rsvd1 : 23; /* Reserved */
2577#endif 2721#endif
2578#ifdef __BIG_ENDIAN_BITFIELD 2722#ifdef __BIG_ENDIAN_BITFIELD
2579 uint32_t rsvd2 : 24; /* Reserved */ 2723 uint32_t rsvd2 : 23; /* Reserved */
2724 uint32_t gbg : 1; /* Grant BlockGuard */
2580 uint32_t gmv : 1; /* Grant Max VPIs */ 2725 uint32_t gmv : 1; /* Grant Max VPIs */
2581 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2726 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2582 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ 2727 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
@@ -2594,7 +2739,8 @@ typedef struct {
2594 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ 2739 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
2595 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2740 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2596 uint32_t gmv : 1; /* Grant Max VPIs */ 2741 uint32_t gmv : 1; /* Grant Max VPIs */
2597 uint32_t rsvd2 : 24; /* Reserved */ 2742 uint32_t gbg : 1; /* Grant BlockGuard */
2743 uint32_t rsvd2 : 23; /* Reserved */
2598#endif 2744#endif
2599 2745
2600#ifdef __BIG_ENDIAN_BITFIELD 2746#ifdef __BIG_ENDIAN_BITFIELD
@@ -3214,6 +3360,94 @@ struct que_xri64cx_ext_fields {
3214 struct lpfc_hbq_entry buff[5]; 3360 struct lpfc_hbq_entry buff[5];
3215}; 3361};
3216 3362
3363struct sli3_bg_fields {
3364 uint32_t filler[6]; /* word 8-13 in IOCB */
3365 uint32_t bghm; /* word 14 - BlockGuard High Water Mark */
3366/* Bitfields for bgstat (BlockGuard Status - word 15 of IOCB) */
3367#define BGS_BIDIR_BG_PROF_MASK 0xff000000
3368#define BGS_BIDIR_BG_PROF_SHIFT 24
3369#define BGS_BIDIR_ERR_COND_FLAGS_MASK 0x003f0000
3370#define BGS_BIDIR_ERR_COND_SHIFT 16
3371#define BGS_BG_PROFILE_MASK 0x0000ff00
3372#define BGS_BG_PROFILE_SHIFT 8
3373#define BGS_INVALID_PROF_MASK 0x00000020
3374#define BGS_INVALID_PROF_SHIFT 5
3375#define BGS_UNINIT_DIF_BLOCK_MASK 0x00000010
3376#define BGS_UNINIT_DIF_BLOCK_SHIFT 4
3377#define BGS_HI_WATER_MARK_PRESENT_MASK 0x00000008
3378#define BGS_HI_WATER_MARK_PRESENT_SHIFT 3
3379#define BGS_REFTAG_ERR_MASK 0x00000004
3380#define BGS_REFTAG_ERR_SHIFT 2
3381#define BGS_APPTAG_ERR_MASK 0x00000002
3382#define BGS_APPTAG_ERR_SHIFT 1
3383#define BGS_GUARD_ERR_MASK 0x00000001
3384#define BGS_GUARD_ERR_SHIFT 0
3385 uint32_t bgstat; /* word 15 - BlockGuard Status */
3386};
3387
3388static inline uint32_t
3389lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat)
3390{
3391 return (le32_to_cpu(bgstat) & BGS_BIDIR_BG_PROF_MASK) >>
3392 BGS_BIDIR_BG_PROF_SHIFT;
3393}
3394
3395static inline uint32_t
3396lpfc_bgs_get_bidir_err_cond(uint32_t bgstat)
3397{
3398 return (le32_to_cpu(bgstat) & BGS_BIDIR_ERR_COND_FLAGS_MASK) >>
3399 BGS_BIDIR_ERR_COND_SHIFT;
3400}
3401
3402static inline uint32_t
3403lpfc_bgs_get_bg_prof(uint32_t bgstat)
3404{
3405 return (le32_to_cpu(bgstat) & BGS_BG_PROFILE_MASK) >>
3406 BGS_BG_PROFILE_SHIFT;
3407}
3408
3409static inline uint32_t
3410lpfc_bgs_get_invalid_prof(uint32_t bgstat)
3411{
3412 return (le32_to_cpu(bgstat) & BGS_INVALID_PROF_MASK) >>
3413 BGS_INVALID_PROF_SHIFT;
3414}
3415
3416static inline uint32_t
3417lpfc_bgs_get_uninit_dif_block(uint32_t bgstat)
3418{
3419 return (le32_to_cpu(bgstat) & BGS_UNINIT_DIF_BLOCK_MASK) >>
3420 BGS_UNINIT_DIF_BLOCK_SHIFT;
3421}
3422
3423static inline uint32_t
3424lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat)
3425{
3426 return (le32_to_cpu(bgstat) & BGS_HI_WATER_MARK_PRESENT_MASK) >>
3427 BGS_HI_WATER_MARK_PRESENT_SHIFT;
3428}
3429
3430static inline uint32_t
3431lpfc_bgs_get_reftag_err(uint32_t bgstat)
3432{
3433 return (le32_to_cpu(bgstat) & BGS_REFTAG_ERR_MASK) >>
3434 BGS_REFTAG_ERR_SHIFT;
3435}
3436
3437static inline uint32_t
3438lpfc_bgs_get_apptag_err(uint32_t bgstat)
3439{
3440 return (le32_to_cpu(bgstat) & BGS_APPTAG_ERR_MASK) >>
3441 BGS_APPTAG_ERR_SHIFT;
3442}
3443
3444static inline uint32_t
3445lpfc_bgs_get_guard_err(uint32_t bgstat)
3446{
3447 return (le32_to_cpu(bgstat) & BGS_GUARD_ERR_MASK) >>
3448 BGS_GUARD_ERR_SHIFT;
3449}
3450
3217#define LPFC_EXT_DATA_BDE_COUNT 3 3451#define LPFC_EXT_DATA_BDE_COUNT 3
3218struct fcp_irw_ext { 3452struct fcp_irw_ext {
3219 uint32_t io_tag64_low; 3453 uint32_t io_tag64_low;
@@ -3322,6 +3556,9 @@ typedef struct _IOCB { /* IOCB structure */
3322 struct que_xri64cx_ext_fields que_xri64cx_ext_words; 3556 struct que_xri64cx_ext_fields que_xri64cx_ext_words;
3323 struct fcp_irw_ext fcp_ext; 3557 struct fcp_irw_ext fcp_ext;
3324 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ 3558 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
3559
3560 /* words 8-15 for BlockGuard */
3561 struct sli3_bg_fields sli3_bg;
3325 } unsli3; 3562 } unsli3;
3326 3563
3327#define ulpCt_h ulpXS 3564#define ulpCt_h ulpXS
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 909be3301bba..4c77038c8f1c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -45,6 +45,12 @@
45#include "lpfc_vport.h" 45#include "lpfc_vport.h"
46#include "lpfc_version.h" 46#include "lpfc_version.h"
47 47
48char *_dump_buf_data;
49unsigned long _dump_buf_data_order;
50char *_dump_buf_dif;
51unsigned long _dump_buf_dif_order;
52spinlock_t _dump_buf_lock;
53
48static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 54static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
49static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 55static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
50static int lpfc_post_rcv_buf(struct lpfc_hba *); 56static int lpfc_post_rcv_buf(struct lpfc_hba *);
@@ -236,6 +242,51 @@ lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
236} 242}
237 243
238/** 244/**
245 * lpfc_dump_wakeup_param_cmpl: Completion handler for dump memory mailbox
246 * command used for getting wake up parameters.
247 * @phba: pointer to lpfc hba data structure.
248 * @pmboxq: pointer to the driver internal queue element for mailbox command.
249 *
250 * This is the completion handler for dump mailbox command for getting
251 * wake up parameters. When this command complete, the response contain
252 * Option rom version of the HBA. This function translate the version number
253 * into a human readable string and store it in OptionROMVersion.
254 **/
255static void
256lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
257{
258 struct prog_id *prg;
259 uint32_t prog_id_word;
260 char dist = ' ';
261 /* character array used for decoding dist type. */
262 char dist_char[] = "nabx";
263
264 if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
265 mempool_free(pmboxq, phba->mbox_mem_pool);
266 return;
267 }
268
269 prg = (struct prog_id *) &prog_id_word;
270
271 /* word 7 contain option rom version */
272 prog_id_word = pmboxq->mb.un.varWords[7];
273
274 /* Decode the Option rom version word to a readable string */
275 if (prg->dist < 4)
276 dist = dist_char[prg->dist];
277
278 if ((prg->dist == 3) && (prg->num == 0))
279 sprintf(phba->OptionROMVersion, "%d.%d%d",
280 prg->ver, prg->rev, prg->lev);
281 else
282 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
283 prg->ver, prg->rev, prg->lev,
284 dist, prg->num);
285 mempool_free(pmboxq, phba->mbox_mem_pool);
286 return;
287}
288
289/**
239 * lpfc_config_port_post: Perform lpfc initialization after config port. 290 * lpfc_config_port_post: Perform lpfc initialization after config port.
240 * @phba: pointer to lpfc hba data structure. 291 * @phba: pointer to lpfc hba data structure.
241 * 292 *
@@ -482,6 +533,20 @@ lpfc_config_port_post(struct lpfc_hba *phba)
482 rc); 533 rc);
483 mempool_free(pmb, phba->mbox_mem_pool); 534 mempool_free(pmb, phba->mbox_mem_pool);
484 } 535 }
536
537 /* Get Option rom version */
538 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
539 lpfc_dump_wakeup_param(phba, pmb);
540 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
541 pmb->vport = phba->pport;
542 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
543
544 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
545 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
546 "to get Option ROM version status x%x\n.", rc);
547 mempool_free(pmb, phba->mbox_mem_pool);
548 }
549
485 return 0; 550 return 0;
486} 551}
487 552
@@ -686,11 +751,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
686 return; 751 return;
687 752
688 spin_lock_irq(&phba->pport->work_port_lock); 753 spin_lock_irq(&phba->pport->work_port_lock);
689 /* If the timer is already canceled do nothing */
690 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
691 spin_unlock_irq(&phba->pport->work_port_lock);
692 return;
693 }
694 754
695 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 755 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
696 jiffies)) { 756 jiffies)) {
@@ -833,8 +893,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
833 fc_host_post_vendor_event(shost, fc_get_event_number(), 893 fc_host_post_vendor_event(shost, fc_get_event_number(),
834 sizeof(board_event), 894 sizeof(board_event),
835 (char *) &board_event, 895 (char *) &board_event,
836 SCSI_NL_VID_TYPE_PCI 896 LPFC_NL_VENDOR_ID);
837 | PCI_VENDOR_ID_EMULEX);
838 897
839 if (phba->work_hs & HS_FFER6) { 898 if (phba->work_hs & HS_FFER6) {
840 /* Re-establishing Link */ 899 /* Re-establishing Link */
@@ -1984,6 +2043,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
1984 shost->max_lun = vport->cfg_max_luns; 2043 shost->max_lun = vport->cfg_max_luns;
1985 shost->this_id = -1; 2044 shost->this_id = -1;
1986 shost->max_cmd_len = 16; 2045 shost->max_cmd_len = 16;
2046
1987 /* 2047 /*
1988 * Set initial can_queue value since 0 is no longer supported and 2048 * Set initial can_queue value since 0 is no longer supported and
1989 * scsi_add_host will fail. This will be adjusted later based on the 2049 * scsi_add_host will fail. This will be adjusted later based on the
@@ -2042,8 +2102,6 @@ destroy_port(struct lpfc_vport *vport)
2042 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2102 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2043 struct lpfc_hba *phba = vport->phba; 2103 struct lpfc_hba *phba = vport->phba;
2044 2104
2045 kfree(vport->vname);
2046
2047 lpfc_debugfs_terminate(vport); 2105 lpfc_debugfs_terminate(vport);
2048 fc_remove_host(shost); 2106 fc_remove_host(shost);
2049 scsi_remove_host(shost); 2107 scsi_remove_host(shost);
@@ -2226,8 +2284,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2226 ARRAY_SIZE(phba->msix_entries)); 2284 ARRAY_SIZE(phba->msix_entries));
2227 if (rc) { 2285 if (rc) {
2228 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2286 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2229 "0420 Enable MSI-X failed (%d), continuing " 2287 "0420 PCI enable MSI-X failed (%d)\n", rc);
2230 "with MSI\n", rc);
2231 goto msi_fail_out; 2288 goto msi_fail_out;
2232 } else 2289 } else
2233 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 2290 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
@@ -2244,9 +2301,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2244 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, 2301 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
2245 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); 2302 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
2246 if (rc) { 2303 if (rc) {
2247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2304 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2248 "0421 MSI-X slow-path request_irq failed " 2305 "0421 MSI-X slow-path request_irq failed "
2249 "(%d), continuing with MSI\n", rc); 2306 "(%d)\n", rc);
2250 goto msi_fail_out; 2307 goto msi_fail_out;
2251 } 2308 }
2252 2309
@@ -2255,9 +2312,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2255 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); 2312 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
2256 2313
2257 if (rc) { 2314 if (rc) {
2258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2315 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2259 "0429 MSI-X fast-path request_irq failed " 2316 "0429 MSI-X fast-path request_irq failed "
2260 "(%d), continuing with MSI\n", rc); 2317 "(%d)\n", rc);
2261 goto irq_fail_out; 2318 goto irq_fail_out;
2262 } 2319 }
2263 2320
@@ -2278,7 +2335,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2278 goto mbx_fail_out; 2335 goto mbx_fail_out;
2279 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 2336 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2280 if (rc != MBX_SUCCESS) { 2337 if (rc != MBX_SUCCESS) {
2281 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2338 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2282 "0351 Config MSI mailbox command failed, " 2339 "0351 Config MSI mailbox command failed, "
2283 "mbxCmd x%x, mbxStatus x%x\n", 2340 "mbxCmd x%x, mbxStatus x%x\n",
2284 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 2341 pmb->mb.mbxCommand, pmb->mb.mbxStatus);
@@ -2327,6 +2384,195 @@ lpfc_disable_msix(struct lpfc_hba *phba)
2327} 2384}
2328 2385
2329/** 2386/**
2387 * lpfc_enable_msi: Enable MSI interrupt mode.
2388 * @phba: pointer to lpfc hba data structure.
2389 *
2390 * This routine is invoked to enable the MSI interrupt mode. The kernel
2391 * function pci_enable_msi() is called to enable the MSI vector. The
2392 * device driver is responsible for calling the request_irq() to register
2393 * MSI vector with a interrupt the handler, which is done in this function.
2394 *
2395 * Return codes
2396 * 0 - sucessful
2397 * other values - error
2398 */
2399static int
2400lpfc_enable_msi(struct lpfc_hba *phba)
2401{
2402 int rc;
2403
2404 rc = pci_enable_msi(phba->pcidev);
2405 if (!rc)
2406 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2407 "0462 PCI enable MSI mode success.\n");
2408 else {
2409 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2410 "0471 PCI enable MSI mode failed (%d)\n", rc);
2411 return rc;
2412 }
2413
2414 rc = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2415 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2416 if (rc) {
2417 pci_disable_msi(phba->pcidev);
2418 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2419 "0478 MSI request_irq failed (%d)\n", rc);
2420 }
2421 return rc;
2422}
2423
2424/**
2425 * lpfc_disable_msi: Disable MSI interrupt mode.
2426 * @phba: pointer to lpfc hba data structure.
2427 *
2428 * This routine is invoked to disable the MSI interrupt mode. The driver
2429 * calls free_irq() on MSI vector it has done request_irq() on before
2430 * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and
2431 * a device will be left with MSI enabled and leaks its vector.
2432 */
2433
2434static void
2435lpfc_disable_msi(struct lpfc_hba *phba)
2436{
2437 free_irq(phba->pcidev->irq, phba);
2438 pci_disable_msi(phba->pcidev);
2439 return;
2440}
2441
2442/**
2443 * lpfc_log_intr_mode: Log the active interrupt mode
2444 * @phba: pointer to lpfc hba data structure.
2445 * @intr_mode: active interrupt mode adopted.
2446 *
2447 * This routine it invoked to log the currently used active interrupt mode
2448 * to the device.
2449 */
2450static void
2451lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
2452{
2453 switch (intr_mode) {
2454 case 0:
2455 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2456 "0470 Enable INTx interrupt mode.\n");
2457 break;
2458 case 1:
2459 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2460 "0481 Enabled MSI interrupt mode.\n");
2461 break;
2462 case 2:
2463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2464 "0480 Enabled MSI-X interrupt mode.\n");
2465 break;
2466 default:
2467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2468 "0482 Illegal interrupt mode.\n");
2469 break;
2470 }
2471 return;
2472}
2473
2474static void
2475lpfc_stop_port(struct lpfc_hba *phba)
2476{
2477 /* Clear all interrupt enable conditions */
2478 writel(0, phba->HCregaddr);
2479 readl(phba->HCregaddr); /* flush */
2480 /* Clear all pending interrupts */
2481 writel(0xffffffff, phba->HAregaddr);
2482 readl(phba->HAregaddr); /* flush */
2483
2484 /* Reset some HBA SLI setup states */
2485 lpfc_stop_phba_timers(phba);
2486 phba->pport->work_port_events = 0;
2487
2488 return;
2489}
2490
2491/**
2492 * lpfc_enable_intr: Enable device interrupt.
2493 * @phba: pointer to lpfc hba data structure.
2494 *
2495 * This routine is invoked to enable device interrupt and associate driver's
2496 * interrupt handler(s) to interrupt vector(s). Depends on the interrupt
2497 * mode configured to the driver, the driver will try to fallback from the
2498 * configured interrupt mode to an interrupt mode which is supported by the
2499 * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ.
2500 *
2501 * Return codes
2502 * 0 - sucessful
2503 * other values - error
2504 **/
2505static uint32_t
2506lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2507{
2508 uint32_t intr_mode = LPFC_INTR_ERROR;
2509 int retval;
2510
2511 if (cfg_mode == 2) {
2512 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
2513 retval = lpfc_sli_config_port(phba, 3);
2514 if (!retval) {
2515 /* Now, try to enable MSI-X interrupt mode */
2516 retval = lpfc_enable_msix(phba);
2517 if (!retval) {
2518 /* Indicate initialization to MSI-X mode */
2519 phba->intr_type = MSIX;
2520 intr_mode = 2;
2521 }
2522 }
2523 }
2524
2525 /* Fallback to MSI if MSI-X initialization failed */
2526 if (cfg_mode >= 1 && phba->intr_type == NONE) {
2527 retval = lpfc_enable_msi(phba);
2528 if (!retval) {
2529 /* Indicate initialization to MSI mode */
2530 phba->intr_type = MSI;
2531 intr_mode = 1;
2532 }
2533 }
2534
2535 /* Fallback to INTx if both MSI-X/MSI initalization failed */
2536 if (phba->intr_type == NONE) {
2537 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2538 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2539 if (!retval) {
2540 /* Indicate initialization to INTx mode */
2541 phba->intr_type = INTx;
2542 intr_mode = 0;
2543 }
2544 }
2545 return intr_mode;
2546}
2547
2548/**
2549 * lpfc_disable_intr: Disable device interrupt.
2550 * @phba: pointer to lpfc hba data structure.
2551 *
2552 * This routine is invoked to disable device interrupt and disassociate the
2553 * driver's interrupt handler(s) from interrupt vector(s). Depending on the
2554 * interrupt mode, the driver will release the interrupt vector(s) for the
2555 * message signaled interrupt.
2556 **/
2557static void
2558lpfc_disable_intr(struct lpfc_hba *phba)
2559{
2560 /* Disable the currently initialized interrupt mode */
2561 if (phba->intr_type == MSIX)
2562 lpfc_disable_msix(phba);
2563 else if (phba->intr_type == MSI)
2564 lpfc_disable_msi(phba);
2565 else if (phba->intr_type == INTx)
2566 free_irq(phba->pcidev->irq, phba);
2567
2568 /* Reset interrupt management states */
2569 phba->intr_type = NONE;
2570 phba->sli.slistat.sli_intr = 0;
2571
2572 return;
2573}
2574
2575/**
2330 * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem. 2576 * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem.
2331 * @pdev: pointer to PCI device 2577 * @pdev: pointer to PCI device
2332 * @pid: pointer to PCI device identifier 2578 * @pid: pointer to PCI device identifier
@@ -2356,6 +2602,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2356 int error = -ENODEV, retval; 2602 int error = -ENODEV, retval;
2357 int i, hbq_count; 2603 int i, hbq_count;
2358 uint16_t iotag; 2604 uint16_t iotag;
2605 uint32_t cfg_mode, intr_mode;
2359 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 2606 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2360 struct lpfc_adapter_event_header adapter_event; 2607 struct lpfc_adapter_event_header adapter_event;
2361 2608
@@ -2409,6 +2656,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2409 phba->eratt_poll.data = (unsigned long) phba; 2656 phba->eratt_poll.data = (unsigned long) phba;
2410 2657
2411 pci_set_master(pdev); 2658 pci_set_master(pdev);
2659 pci_save_state(pdev);
2412 pci_try_set_mwi(pdev); 2660 pci_try_set_mwi(pdev);
2413 2661
2414 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) 2662 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
@@ -2557,7 +2805,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2557 lpfc_debugfs_initialize(vport); 2805 lpfc_debugfs_initialize(vport);
2558 2806
2559 pci_set_drvdata(pdev, shost); 2807 pci_set_drvdata(pdev, shost);
2560 phba->intr_type = NONE;
2561 2808
2562 phba->MBslimaddr = phba->slim_memmap_p; 2809 phba->MBslimaddr = phba->slim_memmap_p;
2563 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 2810 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
@@ -2565,63 +2812,58 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2565 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 2812 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
2566 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 2813 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
2567 2814
2568 /* Configure and enable interrupt */ 2815 /* Configure sysfs attributes */
2569 if (phba->cfg_use_msi == 2) {
2570 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
2571 error = lpfc_sli_config_port(phba, 3);
2572 if (error)
2573 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2574 "0427 Firmware not capable of SLI 3 mode.\n");
2575 else {
2576 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2577 "0426 Firmware capable of SLI 3 mode.\n");
2578 /* Now, try to enable MSI-X interrupt mode */
2579 error = lpfc_enable_msix(phba);
2580 if (!error) {
2581 phba->intr_type = MSIX;
2582 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2583 "0430 enable MSI-X mode.\n");
2584 }
2585 }
2586 }
2587
2588 /* Fallback to MSI if MSI-X initialization failed */
2589 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
2590 retval = pci_enable_msi(phba->pcidev);
2591 if (!retval) {
2592 phba->intr_type = MSI;
2593 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2594 "0473 enable MSI mode.\n");
2595 } else
2596 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2597 "0452 enable IRQ mode.\n");
2598 }
2599
2600 /* MSI-X is the only case the doesn't need to call request_irq */
2601 if (phba->intr_type != MSIX) {
2602 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2603 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2604 if (retval) {
2605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable "
2606 "interrupt handler failed\n");
2607 error = retval;
2608 goto out_disable_msi;
2609 } else if (phba->intr_type != MSI)
2610 phba->intr_type = INTx;
2611 }
2612
2613 if (lpfc_alloc_sysfs_attr(vport)) { 2816 if (lpfc_alloc_sysfs_attr(vport)) {
2614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2615 "1476 Failed to allocate sysfs attr\n"); 2818 "1476 Failed to allocate sysfs attr\n");
2616 error = -ENOMEM; 2819 error = -ENOMEM;
2617 goto out_free_irq; 2820 goto out_destroy_port;
2618 } 2821 }
2619 2822
2620 if (lpfc_sli_hba_setup(phba)) { 2823 cfg_mode = phba->cfg_use_msi;
2621 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2824 while (true) {
2622 "1477 Failed to set up hba\n"); 2825 /* Configure and enable interrupt */
2623 error = -ENODEV; 2826 intr_mode = lpfc_enable_intr(phba, cfg_mode);
2624 goto out_remove_device; 2827 if (intr_mode == LPFC_INTR_ERROR) {
2828 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2829 "0426 Failed to enable interrupt.\n");
2830 goto out_free_sysfs_attr;
2831 }
2832 /* HBA SLI setup */
2833 if (lpfc_sli_hba_setup(phba)) {
2834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2835 "1477 Failed to set up hba\n");
2836 error = -ENODEV;
2837 goto out_remove_device;
2838 }
2839
2840 /* Wait 50ms for the interrupts of previous mailbox commands */
2841 msleep(50);
2842 /* Check active interrupts received */
2843 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
2844 /* Log the current active interrupt mode */
2845 phba->intr_mode = intr_mode;
2846 lpfc_log_intr_mode(phba, intr_mode);
2847 break;
2848 } else {
2849 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2850 "0451 Configure interrupt mode (%d) "
2851 "failed active interrupt test.\n",
2852 intr_mode);
2853 if (intr_mode == 0) {
2854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2855 "0479 Failed to enable "
2856 "interrupt.\n");
2857 error = -ENODEV;
2858 goto out_remove_device;
2859 }
2860 /* Stop HBA SLI setups */
2861 lpfc_stop_port(phba);
2862 /* Disable the current interrupt mode */
2863 lpfc_disable_intr(phba);
2864 /* Try next level of interrupt mode */
2865 cfg_mode = --intr_mode;
2866 }
2625 } 2867 }
2626 2868
2627 /* 2869 /*
@@ -2629,6 +2871,75 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2629 * the value of can_queue. 2871 * the value of can_queue.
2630 */ 2872 */
2631 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2873 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2874 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
2875
2876 if (lpfc_prot_mask && lpfc_prot_guard) {
2877 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2878 "1478 Registering BlockGuard with the "
2879 "SCSI layer\n");
2880
2881 scsi_host_set_prot(shost, lpfc_prot_mask);
2882 scsi_host_set_guard(shost, lpfc_prot_guard);
2883 }
2884 }
2885
2886 if (!_dump_buf_data) {
2887 int pagecnt = 10;
2888 while (pagecnt) {
2889 spin_lock_init(&_dump_buf_lock);
2890 _dump_buf_data =
2891 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2892 if (_dump_buf_data) {
2893 printk(KERN_ERR "BLKGRD allocated %d pages for "
2894 "_dump_buf_data at 0x%p\n",
2895 (1 << pagecnt), _dump_buf_data);
2896 _dump_buf_data_order = pagecnt;
2897 memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
2898 << pagecnt));
2899 break;
2900 } else {
2901 --pagecnt;
2902 }
2903
2904 }
2905
2906 if (!_dump_buf_data_order)
2907 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2908 "memory for hexdump\n");
2909
2910 } else {
2911 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
2912 "\n", _dump_buf_data);
2913 }
2914
2915
2916 if (!_dump_buf_dif) {
2917 int pagecnt = 10;
2918 while (pagecnt) {
2919 _dump_buf_dif =
2920 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2921 if (_dump_buf_dif) {
2922 printk(KERN_ERR "BLKGRD allocated %d pages for "
2923 "_dump_buf_dif at 0x%p\n",
2924 (1 << pagecnt), _dump_buf_dif);
2925 _dump_buf_dif_order = pagecnt;
2926 memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
2927 << pagecnt));
2928 break;
2929 } else {
2930 --pagecnt;
2931 }
2932
2933 }
2934
2935 if (!_dump_buf_dif_order)
2936 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2937 "memory for hexdump\n");
2938
2939 } else {
2940 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
2941 _dump_buf_dif);
2942 }
2632 2943
2633 lpfc_host_attrib_init(shost); 2944 lpfc_host_attrib_init(shost);
2634 2945
@@ -2646,29 +2957,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2646 fc_host_post_vendor_event(shost, fc_get_event_number(), 2957 fc_host_post_vendor_event(shost, fc_get_event_number(),
2647 sizeof(adapter_event), 2958 sizeof(adapter_event),
2648 (char *) &adapter_event, 2959 (char *) &adapter_event,
2649 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2960 LPFC_NL_VENDOR_ID);
2650
2651 scsi_scan_host(shost);
2652 2961
2653 return 0; 2962 return 0;
2654 2963
2655out_remove_device: 2964out_remove_device:
2656 lpfc_free_sysfs_attr(vport);
2657 spin_lock_irq(shost->host_lock); 2965 spin_lock_irq(shost->host_lock);
2658 vport->load_flag |= FC_UNLOADING; 2966 vport->load_flag |= FC_UNLOADING;
2659 spin_unlock_irq(shost->host_lock); 2967 spin_unlock_irq(shost->host_lock);
2660out_free_irq:
2661 lpfc_stop_phba_timers(phba); 2968 lpfc_stop_phba_timers(phba);
2662 phba->pport->work_port_events = 0; 2969 phba->pport->work_port_events = 0;
2663 2970 lpfc_disable_intr(phba);
2664 if (phba->intr_type == MSIX) 2971 lpfc_sli_hba_down(phba);
2665 lpfc_disable_msix(phba); 2972 lpfc_sli_brdrestart(phba);
2666 else 2973out_free_sysfs_attr:
2667 free_irq(phba->pcidev->irq, phba); 2974 lpfc_free_sysfs_attr(vport);
2668 2975out_destroy_port:
2669out_disable_msi:
2670 if (phba->intr_type == MSI)
2671 pci_disable_msi(phba->pcidev);
2672 destroy_port(vport); 2976 destroy_port(vport);
2673out_kthread_stop: 2977out_kthread_stop:
2674 kthread_stop(phba->worker_thread); 2978 kthread_stop(phba->worker_thread);
@@ -2709,7 +3013,7 @@ out:
2709 * @pdev: pointer to PCI device 3013 * @pdev: pointer to PCI device
2710 * 3014 *
2711 * This routine is to be registered to the kernel's PCI subsystem. When an 3015 * This routine is to be registered to the kernel's PCI subsystem. When an
2712 * Emulex HBA is removed from PCI bus. It perform all the necessary cleanup 3016 * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup
2713 * for the HBA device to be removed from the PCI subsystem properly. 3017 * for the HBA device to be removed from the PCI subsystem properly.
2714 **/ 3018 **/
2715static void __devexit 3019static void __devexit
@@ -2717,18 +3021,27 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2717{ 3021{
2718 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3022 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2719 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3023 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3024 struct lpfc_vport **vports;
2720 struct lpfc_hba *phba = vport->phba; 3025 struct lpfc_hba *phba = vport->phba;
3026 int i;
2721 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 3027 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2722 3028
2723 spin_lock_irq(&phba->hbalock); 3029 spin_lock_irq(&phba->hbalock);
2724 vport->load_flag |= FC_UNLOADING; 3030 vport->load_flag |= FC_UNLOADING;
2725 spin_unlock_irq(&phba->hbalock); 3031 spin_unlock_irq(&phba->hbalock);
2726 3032
2727 kfree(vport->vname);
2728 lpfc_free_sysfs_attr(vport); 3033 lpfc_free_sysfs_attr(vport);
2729 3034
2730 kthread_stop(phba->worker_thread); 3035 kthread_stop(phba->worker_thread);
2731 3036
3037 /* Release all the vports against this physical port */
3038 vports = lpfc_create_vport_work_array(phba);
3039 if (vports != NULL)
3040 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++)
3041 fc_vport_terminate(vports[i]->fc_vport);
3042 lpfc_destroy_vport_work_array(phba, vports);
3043
3044 /* Remove FC host and then SCSI host with the physical port */
2732 fc_remove_host(shost); 3045 fc_remove_host(shost);
2733 scsi_remove_host(shost); 3046 scsi_remove_host(shost);
2734 lpfc_cleanup(vport); 3047 lpfc_cleanup(vport);
@@ -2748,13 +3061,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2748 3061
2749 lpfc_debugfs_terminate(vport); 3062 lpfc_debugfs_terminate(vport);
2750 3063
2751 if (phba->intr_type == MSIX) 3064 /* Disable interrupt */
2752 lpfc_disable_msix(phba); 3065 lpfc_disable_intr(phba);
2753 else {
2754 free_irq(phba->pcidev->irq, phba);
2755 if (phba->intr_type == MSI)
2756 pci_disable_msi(phba->pcidev);
2757 }
2758 3066
2759 pci_set_drvdata(pdev, NULL); 3067 pci_set_drvdata(pdev, NULL);
2760 scsi_host_put(shost); 3068 scsi_host_put(shost);
@@ -2786,6 +3094,115 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2786} 3094}
2787 3095
2788/** 3096/**
3097 * lpfc_pci_suspend_one: lpfc PCI func to suspend device for power management.
3098 * @pdev: pointer to PCI device
3099 * @msg: power management message
3100 *
3101 * This routine is to be registered to the kernel's PCI subsystem to support
3102 * system Power Management (PM). When PM invokes this method, it quiesces the
3103 * device by stopping the driver's worker thread for the device, turning off
3104 * device's interrupt and DMA, and bring the device offline. Note that as the
3105 * driver implements the minimum PM requirements to a power-aware driver's PM
3106 * support for suspend/resume -- all the possible PM messages (SUSPEND,
3107 * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND
3108 * and the driver will fully reinitialize its device during resume() method
3109 * call, the driver will set device to PCI_D3hot state in PCI config space
3110 * instead of setting it according to the @msg provided by the PM.
3111 *
3112 * Return code
3113 * 0 - driver suspended the device
3114 * Error otherwise
3115 **/
3116static int
3117lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3118{
3119 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3120 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3121
3122 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3123 "0473 PCI device Power Management suspend.\n");
3124
3125 /* Bring down the device */
3126 lpfc_offline_prep(phba);
3127 lpfc_offline(phba);
3128 kthread_stop(phba->worker_thread);
3129
3130 /* Disable interrupt from device */
3131 lpfc_disable_intr(phba);
3132
3133 /* Save device state to PCI config space */
3134 pci_save_state(pdev);
3135 pci_set_power_state(pdev, PCI_D3hot);
3136
3137 return 0;
3138}
3139
3140/**
3141 * lpfc_pci_resume_one: lpfc PCI func to resume device for power management.
3142 * @pdev: pointer to PCI device
3143 *
3144 * This routine is to be registered to the kernel's PCI subsystem to support
3145 * system Power Management (PM). When PM invokes this method, it restores
3146 * the device's PCI config space state and fully reinitializes the device
3147 * and brings it online. Note that as the driver implements the minimum PM
3148 * requirements to a power-aware driver's PM for suspend/resume -- all
3149 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
3150 * method call will be treated as SUSPEND and the driver will fully
3151 * reinitialize its device during resume() method call, the device will be
3152 * set to PCI_D0 directly in PCI config space before restoring the state.
3153 *
3154 * Return code
3155 * 0 - driver suspended the device
3156 * Error otherwise
3157 **/
3158static int
3159lpfc_pci_resume_one(struct pci_dev *pdev)
3160{
3161 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3162 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3163 uint32_t intr_mode;
3164 int error;
3165
3166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3167 "0452 PCI device Power Management resume.\n");
3168
3169 /* Restore device state from PCI config space */
3170 pci_set_power_state(pdev, PCI_D0);
3171 pci_restore_state(pdev);
3172 if (pdev->is_busmaster)
3173 pci_set_master(pdev);
3174
3175 /* Startup the kernel thread for this host adapter. */
3176 phba->worker_thread = kthread_run(lpfc_do_work, phba,
3177 "lpfc_worker_%d", phba->brd_no);
3178 if (IS_ERR(phba->worker_thread)) {
3179 error = PTR_ERR(phba->worker_thread);
3180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3181 "0434 PM resume failed to start worker "
3182 "thread: error=x%x.\n", error);
3183 return error;
3184 }
3185
3186 /* Configure and enable interrupt */
3187 intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
3188 if (intr_mode == LPFC_INTR_ERROR) {
3189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3190 "0430 PM resume Failed to enable interrupt\n");
3191 return -EIO;
3192 } else
3193 phba->intr_mode = intr_mode;
3194
3195 /* Restart HBA and bring it online */
3196 lpfc_sli_brdrestart(phba);
3197 lpfc_online(phba);
3198
3199 /* Log the current active interrupt mode */
3200 lpfc_log_intr_mode(phba, phba->intr_mode);
3201
3202 return 0;
3203}
3204
3205/**
2789 * lpfc_io_error_detected: Driver method for handling PCI I/O error detected. 3206 * lpfc_io_error_detected: Driver method for handling PCI I/O error detected.
2790 * @pdev: pointer to PCI device. 3207 * @pdev: pointer to PCI device.
2791 * @state: the current PCI connection state. 3208 * @state: the current PCI connection state.
@@ -2828,13 +3245,8 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
2828 pring = &psli->ring[psli->fcp_ring]; 3245 pring = &psli->ring[psli->fcp_ring];
2829 lpfc_sli_abort_iocb_ring(phba, pring); 3246 lpfc_sli_abort_iocb_ring(phba, pring);
2830 3247
2831 if (phba->intr_type == MSIX) 3248 /* Disable interrupt */
2832 lpfc_disable_msix(phba); 3249 lpfc_disable_intr(phba);
2833 else {
2834 free_irq(phba->pcidev->irq, phba);
2835 if (phba->intr_type == MSI)
2836 pci_disable_msi(phba->pcidev);
2837 }
2838 3250
2839 /* Request a slot reset. */ 3251 /* Request a slot reset. */
2840 return PCI_ERS_RESULT_NEED_RESET; 3252 return PCI_ERS_RESULT_NEED_RESET;
@@ -2862,7 +3274,7 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
2862 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3274 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2863 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3275 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2864 struct lpfc_sli *psli = &phba->sli; 3276 struct lpfc_sli *psli = &phba->sli;
2865 int error, retval; 3277 uint32_t intr_mode;
2866 3278
2867 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 3279 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
2868 if (pci_enable_device_mem(pdev)) { 3280 if (pci_enable_device_mem(pdev)) {
@@ -2871,61 +3283,31 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
2871 return PCI_ERS_RESULT_DISCONNECT; 3283 return PCI_ERS_RESULT_DISCONNECT;
2872 } 3284 }
2873 3285
2874 pci_set_master(pdev); 3286 pci_restore_state(pdev);
3287 if (pdev->is_busmaster)
3288 pci_set_master(pdev);
2875 3289
2876 spin_lock_irq(&phba->hbalock); 3290 spin_lock_irq(&phba->hbalock);
2877 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 3291 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2878 spin_unlock_irq(&phba->hbalock); 3292 spin_unlock_irq(&phba->hbalock);
2879 3293
2880 /* Enable configured interrupt method */ 3294 /* Configure and enable interrupt */
2881 phba->intr_type = NONE; 3295 intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
2882 if (phba->cfg_use_msi == 2) { 3296 if (intr_mode == LPFC_INTR_ERROR) {
2883 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 3297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2884 error = lpfc_sli_config_port(phba, 3); 3298 "0427 Cannot re-enable interrupt after "
2885 if (error) 3299 "slot reset.\n");
2886 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3300 return PCI_ERS_RESULT_DISCONNECT;
2887 "0478 Firmware not capable of SLI 3 mode.\n"); 3301 } else
2888 else { 3302 phba->intr_mode = intr_mode;
2889 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2890 "0479 Firmware capable of SLI 3 mode.\n");
2891 /* Now, try to enable MSI-X interrupt mode */
2892 error = lpfc_enable_msix(phba);
2893 if (!error) {
2894 phba->intr_type = MSIX;
2895 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2896 "0480 enable MSI-X mode.\n");
2897 }
2898 }
2899 }
2900
2901 /* Fallback to MSI if MSI-X initialization failed */
2902 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
2903 retval = pci_enable_msi(phba->pcidev);
2904 if (!retval) {
2905 phba->intr_type = MSI;
2906 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2907 "0481 enable MSI mode.\n");
2908 } else
2909 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2910 "0470 enable IRQ mode.\n");
2911 }
2912
2913 /* MSI-X is the only case the doesn't need to call request_irq */
2914 if (phba->intr_type != MSIX) {
2915 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2916 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2917 if (retval) {
2918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2919 "0471 Enable interrupt handler "
2920 "failed\n");
2921 } else if (phba->intr_type != MSI)
2922 phba->intr_type = INTx;
2923 }
2924 3303
2925 /* Take device offline; this will perform cleanup */ 3304 /* Take device offline; this will perform cleanup */
2926 lpfc_offline(phba); 3305 lpfc_offline(phba);
2927 lpfc_sli_brdrestart(phba); 3306 lpfc_sli_brdrestart(phba);
2928 3307
3308 /* Log the current active interrupt mode */
3309 lpfc_log_intr_mode(phba, phba->intr_mode);
3310
2929 return PCI_ERS_RESULT_RECOVERED; 3311 return PCI_ERS_RESULT_RECOVERED;
2930} 3312}
2931 3313
@@ -3037,6 +3419,8 @@ static struct pci_driver lpfc_driver = {
3037 .id_table = lpfc_id_table, 3419 .id_table = lpfc_id_table,
3038 .probe = lpfc_pci_probe_one, 3420 .probe = lpfc_pci_probe_one,
3039 .remove = __devexit_p(lpfc_pci_remove_one), 3421 .remove = __devexit_p(lpfc_pci_remove_one),
3422 .suspend = lpfc_pci_suspend_one,
3423 .resume = lpfc_pci_resume_one,
3040 .err_handler = &lpfc_err_handler, 3424 .err_handler = &lpfc_err_handler,
3041}; 3425};
3042 3426
@@ -3100,6 +3484,19 @@ lpfc_exit(void)
3100 fc_release_transport(lpfc_transport_template); 3484 fc_release_transport(lpfc_transport_template);
3101 if (lpfc_enable_npiv) 3485 if (lpfc_enable_npiv)
3102 fc_release_transport(lpfc_vport_transport_template); 3486 fc_release_transport(lpfc_vport_transport_template);
3487 if (_dump_buf_data) {
3488 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
3489 "at 0x%p\n",
3490 (1L << _dump_buf_data_order), _dump_buf_data);
3491 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
3492 }
3493
3494 if (_dump_buf_dif) {
3495 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
3496 "at 0x%p\n",
3497 (1L << _dump_buf_dif_order), _dump_buf_dif);
3498 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
3499 }
3103} 3500}
3104 3501
3105module_init(lpfc_init); 3502module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 39fd2b843bec..a85b7c196bbc 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -27,6 +27,7 @@
27#define LOG_FCP 0x40 /* FCP traffic history */ 27#define LOG_FCP 0x40 /* FCP traffic history */
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x80 /* Node table events */
29#define LOG_TEMP 0x100 /* Temperature sensor events */ 29#define LOG_TEMP 0x100 /* Temperature sensor events */
30#define LOG_BG 0x200 /* BlockBuard events */
30#define LOG_MISC 0x400 /* Miscellaneous events */ 31#define LOG_MISC 0x400 /* Miscellaneous events */
31#define LOG_SLI 0x800 /* SLI events */ 32#define LOG_SLI 0x800 /* SLI events */
32#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 33#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 7465fe746fe9..34eeb086a667 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -77,6 +77,38 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
77} 77}
78 78
79/** 79/**
80 * lpfc_dump_mem: Prepare a mailbox command for retrieving wakeup params.
81 * @phba: pointer to lpfc hba data structure.
82 * @pmb: pointer to the driver internal queue element for mailbox command.
83 * This function create a dump memory mailbox command to dump wake up
84 * parameters.
85 */
86void
87lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
88{
89 MAILBOX_t *mb;
90 void *ctx;
91
92 mb = &pmb->mb;
93 /* Save context so that we can restore after memset */
94 ctx = pmb->context2;
95
96 /* Setup to dump VPD region */
97 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
98 mb->mbxCommand = MBX_DUMP_MEMORY;
99 mb->mbxOwner = OWN_HOST;
100 mb->un.varDmp.cv = 1;
101 mb->un.varDmp.type = DMP_NV_PARAMS;
102 mb->un.varDmp.entry_index = 0;
103 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
104 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
105 mb->un.varDmp.co = 0;
106 mb->un.varDmp.resp_offset = 0;
107 pmb->context2 = ctx;
108 return;
109}
110
111/**
80 * lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param. 112 * lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param.
81 * @phba: pointer to lpfc hba data structure. 113 * @phba: pointer to lpfc hba data structure.
82 * @pmb: pointer to the driver internal queue element for mailbox command. 114 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -1061,9 +1093,14 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1061 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); 1093 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
1062 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); 1094 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
1063 1095
1096 /* Always Host Group Pointer is in SLIM */
1097 mb->un.varCfgPort.hps = 1;
1098
1064 /* If HBA supports SLI=3 ask for it */ 1099 /* If HBA supports SLI=3 ask for it */
1065 1100
1066 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { 1101 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
1102 if (phba->cfg_enable_bg)
1103 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
1067 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1104 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1068 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1105 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1069 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ 1106 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
@@ -1163,16 +1200,11 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1163 sizeof(*phba->host_gp)); 1200 sizeof(*phba->host_gp));
1164 } 1201 }
1165 1202
1166 /* Setup Port Group ring pointer */ 1203 /* Setup Port Group offset */
1167 if (phba->sli3_options & LPFC_SLI3_INB_ENABLED) { 1204 if (phba->sli_rev == 3)
1168 pgp_offset = offsetof(struct lpfc_sli2_slim,
1169 mbx.us.s3_inb_pgp.port);
1170 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
1171 } else if (phba->sli_rev == 3) {
1172 pgp_offset = offsetof(struct lpfc_sli2_slim, 1205 pgp_offset = offsetof(struct lpfc_sli2_slim,
1173 mbx.us.s3_pgp.port); 1206 mbx.us.s3_pgp.port);
1174 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 1207 else
1175 } else
1176 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port); 1208 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
1177 pdma_addr = phba->slim2p.phys + pgp_offset; 1209 pdma_addr = phba->slim2p.phys + pgp_offset;
1178 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr); 1210 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
@@ -1285,10 +1317,12 @@ lpfc_mbox_get(struct lpfc_hba * phba)
1285void 1317void
1286lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1318lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
1287{ 1319{
1320 unsigned long iflag;
1321
1288 /* This function expects to be called from interrupt context */ 1322 /* This function expects to be called from interrupt context */
1289 spin_lock(&phba->hbalock); 1323 spin_lock_irqsave(&phba->hbalock, iflag);
1290 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); 1324 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1291 spin_unlock(&phba->hbalock); 1325 spin_unlock_irqrestore(&phba->hbalock, iflag);
1292 return; 1326 return;
1293} 1327}
1294 1328
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index 1accb5a9f4e6..27d1a88a98fe 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -22,18 +22,20 @@
22#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */ 22#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */
23#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */ 23#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */
24#define FC_REG_CT_EVENT 0x0004 /* CT request events */ 24#define FC_REG_CT_EVENT 0x0004 /* CT request events */
25#define FC_REG_DUMP_EVENT 0x0008 /* Dump events */ 25#define FC_REG_DUMP_EVENT 0x0010 /* Dump events */
26#define FC_REG_TEMPERATURE_EVENT 0x0010 /* temperature events */ 26#define FC_REG_TEMPERATURE_EVENT 0x0020 /* temperature events */
27#define FC_REG_ELS_EVENT 0x0020 /* lpfc els events */ 27#define FC_REG_VPORTRSCN_EVENT 0x0040 /* Vport RSCN events */
28#define FC_REG_FABRIC_EVENT 0x0040 /* lpfc fabric events */ 28#define FC_REG_ELS_EVENT 0x0080 /* lpfc els events */
29#define FC_REG_SCSI_EVENT 0x0080 /* lpfc scsi events */ 29#define FC_REG_FABRIC_EVENT 0x0100 /* lpfc fabric events */
30#define FC_REG_BOARD_EVENT 0x0100 /* lpfc board events */ 30#define FC_REG_SCSI_EVENT 0x0200 /* lpfc scsi events */
31#define FC_REG_ADAPTER_EVENT 0x0200 /* lpfc adapter events */ 31#define FC_REG_BOARD_EVENT 0x0400 /* lpfc board events */
32#define FC_REG_ADAPTER_EVENT 0x0800 /* lpfc adapter events */
32#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \ 33#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \
33 FC_REG_RSCN_EVENT | \ 34 FC_REG_RSCN_EVENT | \
34 FC_REG_CT_EVENT | \ 35 FC_REG_CT_EVENT | \
35 FC_REG_DUMP_EVENT | \ 36 FC_REG_DUMP_EVENT | \
36 FC_REG_TEMPERATURE_EVENT | \ 37 FC_REG_TEMPERATURE_EVENT | \
38 FC_REG_VPORTRSCN_EVENT | \
37 FC_REG_ELS_EVENT | \ 39 FC_REG_ELS_EVENT | \
38 FC_REG_FABRIC_EVENT | \ 40 FC_REG_FABRIC_EVENT | \
39 FC_REG_SCSI_EVENT | \ 41 FC_REG_SCSI_EVENT | \
@@ -52,6 +54,13 @@
52 * The payload sent via the fc transport is one-way driver->application. 54 * The payload sent via the fc transport is one-way driver->application.
53 */ 55 */
54 56
57/* RSCN event header */
58struct lpfc_rscn_event_header {
59 uint32_t event_type;
60 uint32_t payload_length; /* RSCN data length in bytes */
61 uint32_t rscn_payload[];
62};
63
55/* els event header */ 64/* els event header */
56struct lpfc_els_event_header { 65struct lpfc_els_event_header {
57 uint32_t event_type; 66 uint32_t event_type;
@@ -65,6 +74,7 @@ struct lpfc_els_event_header {
65#define LPFC_EVENT_PRLO_RCV 0x02 74#define LPFC_EVENT_PRLO_RCV 0x02
66#define LPFC_EVENT_ADISC_RCV 0x04 75#define LPFC_EVENT_ADISC_RCV 0x04
67#define LPFC_EVENT_LSRJT_RCV 0x08 76#define LPFC_EVENT_LSRJT_RCV 0x08
77#define LPFC_EVENT_LOGO_RCV 0x10
68 78
69/* special els lsrjt event */ 79/* special els lsrjt event */
70struct lpfc_lsrjt_event { 80struct lpfc_lsrjt_event {
@@ -74,6 +84,11 @@ struct lpfc_lsrjt_event {
74 uint32_t explanation; 84 uint32_t explanation;
75}; 85};
76 86
87/* special els logo event */
88struct lpfc_logo_event {
89 struct lpfc_els_event_header header;
90 uint8_t logo_wwpn[8];
91};
77 92
78/* fabric event header */ 93/* fabric event header */
79struct lpfc_fabric_event_header { 94struct lpfc_fabric_event_header {
@@ -125,6 +140,7 @@ struct lpfc_scsi_varqueuedepth_event {
125/* special case scsi check condition event */ 140/* special case scsi check condition event */
126struct lpfc_scsi_check_condition_event { 141struct lpfc_scsi_check_condition_event {
127 struct lpfc_scsi_event_header scsi_event; 142 struct lpfc_scsi_event_header scsi_event;
143 uint8_t opcode;
128 uint8_t sense_key; 144 uint8_t sense_key;
129 uint8_t asc; 145 uint8_t asc;
130 uint8_t ascq; 146 uint8_t ascq;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0c25d97acb42..8f548adae9cc 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1929,10 +1929,10 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1929 if (vport->fc_flag & FC_RSCN_DEFERRED) 1929 if (vport->fc_flag & FC_RSCN_DEFERRED)
1930 return ndlp->nlp_state; 1930 return ndlp->nlp_state;
1931 1931
1932 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1932 spin_lock_irq(shost->host_lock); 1933 spin_lock_irq(shost->host_lock);
1933 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1934 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1934 spin_unlock_irq(shost->host_lock); 1935 spin_unlock_irq(shost->host_lock);
1935 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1936 return ndlp->nlp_state; 1936 return ndlp->nlp_state;
1937} 1937}
1938 1938
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bd1867411821..b103b6ed4970 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -18,13 +18,14 @@
18 * more details, a copy of which can be found in the file COPYING * 18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. * 19 * included with this package. *
20 *******************************************************************/ 20 *******************************************************************/
21
22#include <linux/pci.h> 21#include <linux/pci.h>
23#include <linux/interrupt.h> 22#include <linux/interrupt.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
24#include <asm/unaligned.h>
25 25
26#include <scsi/scsi.h> 26#include <scsi/scsi.h>
27#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
28#include <scsi/scsi_eh.h>
28#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
29#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
30#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
@@ -43,6 +44,73 @@
43#define LPFC_RESET_WAIT 2 44#define LPFC_RESET_WAIT 2
44#define LPFC_ABORT_WAIT 2 45#define LPFC_ABORT_WAIT 2
45 46
47int _dump_buf_done;
48
49static char *dif_op_str[] = {
50 "SCSI_PROT_NORMAL",
51 "SCSI_PROT_READ_INSERT",
52 "SCSI_PROT_WRITE_STRIP",
53 "SCSI_PROT_READ_STRIP",
54 "SCSI_PROT_WRITE_INSERT",
55 "SCSI_PROT_READ_PASS",
56 "SCSI_PROT_WRITE_PASS",
57 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT"
59};
60
61static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd)
63{
64 void *src, *dst;
65 struct scatterlist *sgde = scsi_sglist(cmnd);
66
67 if (!_dump_buf_data) {
68 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
69 __func__);
70 return;
71 }
72
73
74 if (!sgde) {
75 printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
76 return;
77 }
78
79 dst = (void *) _dump_buf_data;
80 while (sgde) {
81 src = sg_virt(sgde);
82 memcpy(dst, src, sgde->length);
83 dst += sgde->length;
84 sgde = sg_next(sgde);
85 }
86}
87
88static void
89lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
90{
91 void *src, *dst;
92 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
93
94 if (!_dump_buf_dif) {
95 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
96 __func__);
97 return;
98 }
99
100 if (!sgde) {
101 printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
102 return;
103 }
104
105 dst = _dump_buf_dif;
106 while (sgde) {
107 src = sg_virt(sgde);
108 memcpy(dst, src, sgde->length);
109 dst += sgde->length;
110 sgde = sg_next(sgde);
111 }
112}
113
46/** 114/**
47 * lpfc_update_stats: Update statistical data for the command completion. 115 * lpfc_update_stats: Update statistical data for the command completion.
48 * @phba: Pointer to HBA object. 116 * @phba: Pointer to HBA object.
@@ -66,6 +134,8 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
66 if (cmd->result) 134 if (cmd->result)
67 return; 135 return;
68 136
137 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
138
69 spin_lock_irqsave(shost->host_lock, flags); 139 spin_lock_irqsave(shost->host_lock, flags);
70 if (!vport->stat_data_enabled || 140 if (!vport->stat_data_enabled ||
71 vport->stat_data_blocked || 141 vport->stat_data_blocked ||
@@ -74,13 +144,15 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
74 spin_unlock_irqrestore(shost->host_lock, flags); 144 spin_unlock_irqrestore(shost->host_lock, flags);
75 return; 145 return;
76 } 146 }
77 latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
78 147
79 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 148 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
80 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 149 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
81 phba->bucket_step; 150 phba->bucket_step;
82 if (i >= LPFC_MAX_BUCKET_COUNT) 151 /* check array subscript bounds */
83 i = LPFC_MAX_BUCKET_COUNT; 152 if (i < 0)
153 i = 0;
154 else if (i >= LPFC_MAX_BUCKET_COUNT)
155 i = LPFC_MAX_BUCKET_COUNT - 1;
84 } else { 156 } else {
85 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 157 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
86 if (latency <= (phba->bucket_base + 158 if (latency <= (phba->bucket_base +
@@ -92,7 +164,6 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
92 spin_unlock_irqrestore(shost->host_lock, flags); 164 spin_unlock_irqrestore(shost->host_lock, flags);
93} 165}
94 166
95
96/** 167/**
97 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change 168 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
98 * event. 169 * event.
@@ -148,12 +219,19 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
148 return; 219 return;
149} 220}
150 221
151/* 222/**
152 * This function is called with no lock held when there is a resource 223 * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
153 * error in driver or in firmware. 224 * @phba: The Hba for which this call is being executed.
154 */ 225 *
226 * This routine is called when there is resource error in driver or firmware.
227 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
228 * posts at most 1 event each second. This routine wakes up worker thread of
229 * @phba to process WORKER_RAM_DOWN_EVENT event.
230 *
231 * This routine should be called with no lock held.
232 **/
155void 233void
156lpfc_adjust_queue_depth(struct lpfc_hba *phba) 234lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
157{ 235{
158 unsigned long flags; 236 unsigned long flags;
159 uint32_t evt_posted; 237 uint32_t evt_posted;
@@ -182,10 +260,17 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
182 return; 260 return;
183} 261}
184 262
185/* 263/**
186 * This function is called with no lock held when there is a successful 264 * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
187 * SCSI command completion. 265 * @phba: The Hba for which this call is being executed.
188 */ 266 *
267 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
268 * post at most 1 event every 5 minute after last_ramp_up_time or
269 * last_rsrc_error_time. This routine wakes up worker thread of @phba
270 * to process WORKER_RAM_DOWN_EVENT event.
271 *
272 * This routine should be called with no lock held.
273 **/
189static inline void 274static inline void
190lpfc_rampup_queue_depth(struct lpfc_vport *vport, 275lpfc_rampup_queue_depth(struct lpfc_vport *vport,
191 struct scsi_device *sdev) 276 struct scsi_device *sdev)
@@ -217,6 +302,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
217 return; 302 return;
218} 303}
219 304
305/**
306 * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
307 * @phba: The Hba for which this call is being executed.
308 *
309 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
310 * thread.This routine reduces queue depth for all scsi device on each vport
311 * associated with @phba.
312 **/
220void 313void
221lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 314lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
222{ 315{
@@ -267,6 +360,15 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
267 atomic_set(&phba->num_cmd_success, 0); 360 atomic_set(&phba->num_cmd_success, 0);
268} 361}
269 362
363/**
364 * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
365 * @phba: The Hba for which this call is being executed.
366 *
367 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
368 * thread.This routine increases queue depth for all scsi device on each vport
369 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
370 * num_cmd_success to zero.
371 **/
270void 372void
271lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) 373lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
272{ 374{
@@ -336,14 +438,21 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
336 lpfc_destroy_vport_work_array(phba, vports); 438 lpfc_destroy_vport_work_array(phba, vports);
337} 439}
338 440
339/* 441/**
442 * lpfc_new_scsi_buf: Scsi buffer allocator.
443 * @vport: The virtual port for which this call being executed.
444 *
340 * This routine allocates a scsi buffer, which contains all the necessary 445 * This routine allocates a scsi buffer, which contains all the necessary
341 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 446 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
342 * contains information to build the IOCB. The DMAable region contains 447 * contains information to build the IOCB. The DMAable region contains
343 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to 448 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
344 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL 449 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
345 * and the BPL BDE is setup in the IOCB. 450 * and the BPL BDE is setup in the IOCB.
346 */ 451 *
452 * Return codes:
453 * NULL - Error
454 * Pointer to lpfc_scsi_buf data structure - Success
455 **/
347static struct lpfc_scsi_buf * 456static struct lpfc_scsi_buf *
348lpfc_new_scsi_buf(struct lpfc_vport *vport) 457lpfc_new_scsi_buf(struct lpfc_vport *vport)
349{ 458{
@@ -407,14 +516,14 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
407 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 516 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
408 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 517 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
409 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 518 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
410 bpl[0].tus.w = le32_to_cpu(bpl->tus.w); 519 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
411 520
412 /* Setup the physical region for the FCP RSP */ 521 /* Setup the physical region for the FCP RSP */
413 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 522 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
414 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 523 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
415 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 524 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
416 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 525 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
417 bpl[1].tus.w = le32_to_cpu(bpl->tus.w); 526 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
418 527
419 /* 528 /*
420 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 529 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
@@ -422,7 +531,8 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
422 */ 531 */
423 iocb = &psb->cur_iocbq.iocb; 532 iocb = &psb->cur_iocbq.iocb;
424 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 533 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
425 if (phba->sli_rev == 3) { 534 if ((phba->sli_rev == 3) &&
535 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
426 /* fill in immediate fcp command BDE */ 536 /* fill in immediate fcp command BDE */
427 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 537 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
428 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 538 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
@@ -452,6 +562,17 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
452 return psb; 562 return psb;
453} 563}
454 564
565/**
566 * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
567 * @phba: The Hba for which this call is being executed.
568 *
569 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
570 * and returns to caller.
571 *
572 * Return codes:
573 * NULL - Error
574 * Pointer to lpfc_scsi_buf - Success
575 **/
455static struct lpfc_scsi_buf* 576static struct lpfc_scsi_buf*
456lpfc_get_scsi_buf(struct lpfc_hba * phba) 577lpfc_get_scsi_buf(struct lpfc_hba * phba)
457{ 578{
@@ -464,11 +585,20 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
464 if (lpfc_cmd) { 585 if (lpfc_cmd) {
465 lpfc_cmd->seg_cnt = 0; 586 lpfc_cmd->seg_cnt = 0;
466 lpfc_cmd->nonsg_phys = 0; 587 lpfc_cmd->nonsg_phys = 0;
588 lpfc_cmd->prot_seg_cnt = 0;
467 } 589 }
468 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 590 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
469 return lpfc_cmd; 591 return lpfc_cmd;
470} 592}
471 593
594/**
595 * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
596 * @phba: The Hba for which this call is being executed.
597 * @psb: The scsi buffer which is being released.
598 *
599 * This routine releases @psb scsi buffer by adding it to tail of @phba
600 * lpfc_scsi_buf_list list.
601 **/
472static void 602static void
473lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 603lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
474{ 604{
@@ -480,6 +610,20 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
480 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 610 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
481} 611}
482 612
613/**
614 * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
615 * @phba: The Hba for which this call is being executed.
616 * @lpfc_cmd: The scsi buffer which is going to be mapped.
617 *
618 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
619 * field of @lpfc_cmd. This routine scans through sg elements and format the
620 * bdea. This routine also initializes all IOCB fields which are dependent on
621 * scsi command request buffer.
622 *
623 * Return codes:
624 * 1 - Error
625 * 0 - Success
626 **/
483static int 627static int
484lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 628lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
485{ 629{
@@ -516,7 +660,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
516 lpfc_cmd->seg_cnt = nseg; 660 lpfc_cmd->seg_cnt = nseg;
517 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 661 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
518 printk(KERN_ERR "%s: Too many sg segments from " 662 printk(KERN_ERR "%s: Too many sg segments from "
519 "dma_map_sg. Config %d, seg_cnt %d", 663 "dma_map_sg. Config %d, seg_cnt %d\n",
520 __func__, phba->cfg_sg_seg_cnt, 664 __func__, phba->cfg_sg_seg_cnt,
521 lpfc_cmd->seg_cnt); 665 lpfc_cmd->seg_cnt);
522 scsi_dma_unmap(scsi_cmnd); 666 scsi_dma_unmap(scsi_cmnd);
@@ -535,6 +679,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
535 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 679 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
536 physaddr = sg_dma_address(sgel); 680 physaddr = sg_dma_address(sgel);
537 if (phba->sli_rev == 3 && 681 if (phba->sli_rev == 3 &&
682 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
538 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 683 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
539 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 684 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
540 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 685 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -560,7 +705,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
560 * explicitly reinitialized and for SLI-3 the extended bde count is 705 * explicitly reinitialized and for SLI-3 the extended bde count is
561 * explicitly reinitialized since all iocb memory resources are reused. 706 * explicitly reinitialized since all iocb memory resources are reused.
562 */ 707 */
563 if (phba->sli_rev == 3) { 708 if (phba->sli_rev == 3 &&
709 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
564 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 710 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
565 /* 711 /*
566 * The extended IOCB format can only fit 3 BDE or a BPL. 712 * The extended IOCB format can only fit 3 BDE or a BPL.
@@ -587,7 +733,683 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
587 ((num_bde + 2) * sizeof(struct ulp_bde64)); 733 ((num_bde + 2) * sizeof(struct ulp_bde64));
588 } 734 }
589 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 735 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
736
737 /*
738 * Due to difference in data length between DIF/non-DIF paths,
739 * we need to set word 4 of IOCB here
740 */
741 iocb_cmd->un.fcpi.fcpi_parm = le32_to_cpu(scsi_bufflen(scsi_cmnd));
742 return 0;
743}
744
745/*
746 * Given a scsi cmnd, determine the BlockGuard profile to be used
747 * with the cmd
748 */
749static int
750lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
751{
752 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
753 uint8_t ret_prof = LPFC_PROF_INVALID;
754
755 if (guard_type == SHOST_DIX_GUARD_IP) {
756 switch (scsi_get_prot_op(sc)) {
757 case SCSI_PROT_READ_INSERT:
758 case SCSI_PROT_WRITE_STRIP:
759 ret_prof = LPFC_PROF_AST2;
760 break;
761
762 case SCSI_PROT_READ_STRIP:
763 case SCSI_PROT_WRITE_INSERT:
764 ret_prof = LPFC_PROF_A1;
765 break;
766
767 case SCSI_PROT_READ_CONVERT:
768 case SCSI_PROT_WRITE_CONVERT:
769 ret_prof = LPFC_PROF_AST1;
770 break;
771
772 case SCSI_PROT_READ_PASS:
773 case SCSI_PROT_WRITE_PASS:
774 case SCSI_PROT_NORMAL:
775 default:
776 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
777 scsi_get_prot_op(sc), guard_type);
778 break;
779
780 }
781 } else if (guard_type == SHOST_DIX_GUARD_CRC) {
782 switch (scsi_get_prot_op(sc)) {
783 case SCSI_PROT_READ_STRIP:
784 case SCSI_PROT_WRITE_INSERT:
785 ret_prof = LPFC_PROF_A1;
786 break;
787
788 case SCSI_PROT_READ_PASS:
789 case SCSI_PROT_WRITE_PASS:
790 ret_prof = LPFC_PROF_C1;
791 break;
792
793 case SCSI_PROT_READ_CONVERT:
794 case SCSI_PROT_WRITE_CONVERT:
795 case SCSI_PROT_READ_INSERT:
796 case SCSI_PROT_WRITE_STRIP:
797 case SCSI_PROT_NORMAL:
798 default:
799 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
800 scsi_get_prot_op(sc), guard_type);
801 break;
802 }
803 } else {
804 /* unsupported format */
805 BUG();
806 }
807
808 return ret_prof;
809}
810
811struct scsi_dif_tuple {
812 __be16 guard_tag; /* Checksum */
813 __be16 app_tag; /* Opaque storage */
814 __be32 ref_tag; /* Target LBA or indirect LBA */
815};
816
817static inline unsigned
818lpfc_cmd_blksize(struct scsi_cmnd *sc)
819{
820 return sc->device->sector_size;
821}
822
823/**
824 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
825 * @sc: in: SCSI command
826 * @apptagmask out: app tag mask
827 * @apptagval out: app tag value
828 * @reftag out: ref tag (reference tag)
829 *
830 * Description:
831 * Extract DIF paramters from the command if possible. Otherwise,
832 * use default paratmers.
833 *
834 **/
835static inline void
836lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
837 uint16_t *apptagval, uint32_t *reftag)
838{
839 struct scsi_dif_tuple *spt;
840 unsigned char op = scsi_get_prot_op(sc);
841 unsigned int protcnt = scsi_prot_sg_count(sc);
842 static int cnt;
843
844 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
845 op == SCSI_PROT_WRITE_PASS ||
846 op == SCSI_PROT_WRITE_CONVERT)) {
847
848 cnt++;
849 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
850 scsi_prot_sglist(sc)[0].offset;
851 *apptagmask = 0;
852 *apptagval = 0;
853 *reftag = cpu_to_be32(spt->ref_tag);
854
855 } else {
856 /* SBC defines ref tag to be lower 32bits of LBA */
857 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
858 *apptagmask = 0;
859 *apptagval = 0;
860 }
861}
862
863/*
864 * This function sets up buffer list for protection groups of
865 * type LPFC_PG_TYPE_NO_DIF
866 *
867 * This is usually used when the HBA is instructed to generate
868 * DIFs and insert them into data stream (or strip DIF from
869 * incoming data stream)
870 *
871 * The buffer list consists of just one protection group described
872 * below:
873 * +-------------------------+
874 * start of prot group --> | PDE_1 |
875 * +-------------------------+
876 * | Data BDE |
877 * +-------------------------+
878 * |more Data BDE's ... (opt)|
879 * +-------------------------+
880 *
881 * @sc: pointer to scsi command we're working on
882 * @bpl: pointer to buffer list for protection groups
883 * @datacnt: number of segments of data that have been dma mapped
884 *
885 * Note: Data s/g buffers have been dma mapped
886 */
887static int
888lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
889 struct ulp_bde64 *bpl, int datasegcnt)
890{
891 struct scatterlist *sgde = NULL; /* s/g data entry */
892 struct lpfc_pde *pde1 = NULL;
893 dma_addr_t physaddr;
894 int i = 0, num_bde = 0;
895 int datadir = sc->sc_data_direction;
896 int prof = LPFC_PROF_INVALID;
897 unsigned blksize;
898 uint32_t reftag;
899 uint16_t apptagmask, apptagval;
900
901 pde1 = (struct lpfc_pde *) bpl;
902 prof = lpfc_sc_to_sli_prof(sc);
903
904 if (prof == LPFC_PROF_INVALID)
905 goto out;
906
907 /* extract some info from the scsi command for PDE1*/
908 blksize = lpfc_cmd_blksize(sc);
909 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
910
911 /* setup PDE1 with what we have */
912 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
913 BG_EC_STOP_ERR);
914 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
915
916 num_bde++;
917 bpl++;
918
919 /* assumption: caller has already run dma_map_sg on command data */
920 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
921 physaddr = sg_dma_address(sgde);
922 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
923 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
924 bpl->tus.f.bdeSize = sg_dma_len(sgde);
925 if (datadir == DMA_TO_DEVICE)
926 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
927 else
928 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
929 bpl->tus.w = le32_to_cpu(bpl->tus.w);
930 bpl++;
931 num_bde++;
932 }
933
934out:
935 return num_bde;
936}
937
938/*
939 * This function sets up buffer list for protection groups of
940 * type LPFC_PG_TYPE_DIF_BUF
941 *
942 * This is usually used when DIFs are in their own buffers,
943 * separate from the data. The HBA can then by instructed
944 * to place the DIFs in the outgoing stream. For read operations,
945 * The HBA could extract the DIFs and place it in DIF buffers.
946 *
947 * The buffer list for this type consists of one or more of the
948 * protection groups described below:
949 * +-------------------------+
950 * start of first prot group --> | PDE_1 |
951 * +-------------------------+
952 * | PDE_3 (Prot BDE) |
953 * +-------------------------+
954 * | Data BDE |
955 * +-------------------------+
956 * |more Data BDE's ... (opt)|
957 * +-------------------------+
958 * start of new prot group --> | PDE_1 |
959 * +-------------------------+
960 * | ... |
961 * +-------------------------+
962 *
963 * @sc: pointer to scsi command we're working on
964 * @bpl: pointer to buffer list for protection groups
965 * @datacnt: number of segments of data that have been dma mapped
966 * @protcnt: number of segment of protection data that have been dma mapped
967 *
968 * Note: It is assumed that both data and protection s/g buffers have been
969 * mapped for DMA
970 */
971static int
972lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
973 struct ulp_bde64 *bpl, int datacnt, int protcnt)
974{
975 struct scatterlist *sgde = NULL; /* s/g data entry */
976 struct scatterlist *sgpe = NULL; /* s/g prot entry */
977 struct lpfc_pde *pde1 = NULL;
978 struct ulp_bde64 *prot_bde = NULL;
979 dma_addr_t dataphysaddr, protphysaddr;
980 unsigned short curr_data = 0, curr_prot = 0;
981 unsigned int split_offset, protgroup_len;
982 unsigned int protgrp_blks, protgrp_bytes;
983 unsigned int remainder, subtotal;
984 int prof = LPFC_PROF_INVALID;
985 int datadir = sc->sc_data_direction;
986 unsigned char pgdone = 0, alldone = 0;
987 unsigned blksize;
988 uint32_t reftag;
989 uint16_t apptagmask, apptagval;
990 int num_bde = 0;
991
992 sgpe = scsi_prot_sglist(sc);
993 sgde = scsi_sglist(sc);
994
995 if (!sgpe || !sgde) {
996 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
997 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
998 sgpe, sgde);
999 return 0;
1000 }
1001
1002 prof = lpfc_sc_to_sli_prof(sc);
1003 if (prof == LPFC_PROF_INVALID)
1004 goto out;
1005
1006 /* extract some info from the scsi command for PDE1*/
1007 blksize = lpfc_cmd_blksize(sc);
1008 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1009
1010 split_offset = 0;
1011 do {
1012 /* setup the first PDE_1 */
1013 pde1 = (struct lpfc_pde *) bpl;
1014
1015 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
1016 BG_EC_STOP_ERR);
1017 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1018
1019 num_bde++;
1020 bpl++;
1021
1022 /* setup the first BDE that points to protection buffer */
1023 prot_bde = (struct ulp_bde64 *) bpl;
1024 protphysaddr = sg_dma_address(sgpe);
1025 prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1026 prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1027 protgroup_len = sg_dma_len(sgpe);
1028
1029
1030 /* must be integer multiple of the DIF block length */
1031 BUG_ON(protgroup_len % 8);
1032
1033 protgrp_blks = protgroup_len / 8;
1034 protgrp_bytes = protgrp_blks * blksize;
1035
1036 prot_bde->tus.f.bdeSize = protgroup_len;
1037 if (datadir == DMA_TO_DEVICE)
1038 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1039 else
1040 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1041 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1042
1043 curr_prot++;
1044 num_bde++;
1045
1046 /* setup BDE's for data blocks associated with DIF data */
1047 pgdone = 0;
1048 subtotal = 0; /* total bytes processed for current prot grp */
1049 while (!pgdone) {
1050 if (!sgde) {
1051 printk(KERN_ERR "%s Invalid data segment\n",
1052 __func__);
1053 return 0;
1054 }
1055 bpl++;
1056 dataphysaddr = sg_dma_address(sgde) + split_offset;
1057 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1058 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1059
1060 remainder = sg_dma_len(sgde) - split_offset;
1061
1062 if ((subtotal + remainder) <= protgrp_bytes) {
1063 /* we can use this whole buffer */
1064 bpl->tus.f.bdeSize = remainder;
1065 split_offset = 0;
1066
1067 if ((subtotal + remainder) == protgrp_bytes)
1068 pgdone = 1;
1069 } else {
1070 /* must split this buffer with next prot grp */
1071 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1072 split_offset += bpl->tus.f.bdeSize;
1073 }
1074
1075 subtotal += bpl->tus.f.bdeSize;
1076
1077 if (datadir == DMA_TO_DEVICE)
1078 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1079 else
1080 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1081 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1082
1083 num_bde++;
1084 curr_data++;
1085
1086 if (split_offset)
1087 break;
1088
1089 /* Move to the next s/g segment if possible */
1090 sgde = sg_next(sgde);
1091 }
1092
1093 /* are we done ? */
1094 if (curr_prot == protcnt) {
1095 alldone = 1;
1096 } else if (curr_prot < protcnt) {
1097 /* advance to next prot buffer */
1098 sgpe = sg_next(sgpe);
1099 bpl++;
1100
1101 /* update the reference tag */
1102 reftag += protgrp_blks;
1103 } else {
1104 /* if we're here, we have a bug */
1105 printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
1106 }
1107
1108 } while (!alldone);
1109
1110out:
1111
1112
1113 return num_bde;
1114}
1115/*
1116 * Given a SCSI command that supports DIF, determine composition of protection
1117 * groups involved in setting up buffer lists
1118 *
1119 * Returns:
1120 * for DIF (for both read and write)
1121 * */
1122static int
1123lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1124{
1125 int ret = LPFC_PG_TYPE_INVALID;
1126 unsigned char op = scsi_get_prot_op(sc);
1127
1128 switch (op) {
1129 case SCSI_PROT_READ_STRIP:
1130 case SCSI_PROT_WRITE_INSERT:
1131 ret = LPFC_PG_TYPE_NO_DIF;
1132 break;
1133 case SCSI_PROT_READ_INSERT:
1134 case SCSI_PROT_WRITE_STRIP:
1135 case SCSI_PROT_READ_PASS:
1136 case SCSI_PROT_WRITE_PASS:
1137 case SCSI_PROT_WRITE_CONVERT:
1138 case SCSI_PROT_READ_CONVERT:
1139 ret = LPFC_PG_TYPE_DIF_BUF;
1140 break;
1141 default:
1142 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1143 "9021 Unsupported protection op:%d\n", op);
1144 break;
1145 }
1146
1147 return ret;
1148}
1149
1150/*
1151 * This is the protection/DIF aware version of
1152 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1153 * two functions eventually, but for now, it's here
1154 */
1155static int
1156lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1157 struct lpfc_scsi_buf *lpfc_cmd)
1158{
1159 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1160 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1161 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1162 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1163 uint32_t num_bde = 0;
1164 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1165 int prot_group_type = 0;
1166 int diflen, fcpdl;
1167 unsigned blksize;
1168
1169 /*
1170 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1171 * fcp_rsp regions to the first data bde entry
1172 */
1173 bpl += 2;
1174 if (scsi_sg_count(scsi_cmnd)) {
1175 /*
1176 * The driver stores the segment count returned from pci_map_sg
1177 * because this a count of dma-mappings used to map the use_sg
1178 * pages. They are not guaranteed to be the same for those
1179 * architectures that implement an IOMMU.
1180 */
1181 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1182 scsi_sglist(scsi_cmnd),
1183 scsi_sg_count(scsi_cmnd), datadir);
1184 if (unlikely(!datasegcnt))
1185 return 1;
1186
1187 lpfc_cmd->seg_cnt = datasegcnt;
1188 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1189 printk(KERN_ERR "%s: Too many sg segments from "
1190 "dma_map_sg. Config %d, seg_cnt %d\n",
1191 __func__, phba->cfg_sg_seg_cnt,
1192 lpfc_cmd->seg_cnt);
1193 scsi_dma_unmap(scsi_cmnd);
1194 return 1;
1195 }
1196
1197 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1198
1199 switch (prot_group_type) {
1200 case LPFC_PG_TYPE_NO_DIF:
1201 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1202 datasegcnt);
1203 /* we shoud have 2 or more entries in buffer list */
1204 if (num_bde < 2)
1205 goto err;
1206 break;
1207 case LPFC_PG_TYPE_DIF_BUF:{
1208 /*
1209 * This type indicates that protection buffers are
1210 * passed to the driver, so that needs to be prepared
1211 * for DMA
1212 */
1213 protsegcnt = dma_map_sg(&phba->pcidev->dev,
1214 scsi_prot_sglist(scsi_cmnd),
1215 scsi_prot_sg_count(scsi_cmnd), datadir);
1216 if (unlikely(!protsegcnt)) {
1217 scsi_dma_unmap(scsi_cmnd);
1218 return 1;
1219 }
1220
1221 lpfc_cmd->prot_seg_cnt = protsegcnt;
1222 if (lpfc_cmd->prot_seg_cnt
1223 > phba->cfg_prot_sg_seg_cnt) {
1224 printk(KERN_ERR "%s: Too many prot sg segments "
1225 "from dma_map_sg. Config %d,"
1226 "prot_seg_cnt %d\n", __func__,
1227 phba->cfg_prot_sg_seg_cnt,
1228 lpfc_cmd->prot_seg_cnt);
1229 dma_unmap_sg(&phba->pcidev->dev,
1230 scsi_prot_sglist(scsi_cmnd),
1231 scsi_prot_sg_count(scsi_cmnd),
1232 datadir);
1233 scsi_dma_unmap(scsi_cmnd);
1234 return 1;
1235 }
1236
1237 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1238 datasegcnt, protsegcnt);
1239 /* we shoud have 3 or more entries in buffer list */
1240 if (num_bde < 3)
1241 goto err;
1242 break;
1243 }
1244 case LPFC_PG_TYPE_INVALID:
1245 default:
1246 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1247 "9022 Unexpected protection group %i\n",
1248 prot_group_type);
1249 return 1;
1250 }
1251 }
1252
1253 /*
1254 * Finish initializing those IOCB fields that are dependent on the
1255 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
1256 * reinitialized since all iocb memory resources are used many times
1257 * for transmit, receive, and continuation bpl's.
1258 */
1259 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1260 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1261 iocb_cmd->ulpBdeCount = 1;
1262 iocb_cmd->ulpLe = 1;
1263
1264 fcpdl = scsi_bufflen(scsi_cmnd);
1265
1266 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1267 /*
1268 * We are in DIF Type 1 mode
1269 * Every data block has a 8 byte DIF (trailer)
1270 * attached to it. Must ajust FCP data length
1271 */
1272 blksize = lpfc_cmd_blksize(scsi_cmnd);
1273 diflen = (fcpdl / blksize) * 8;
1274 fcpdl += diflen;
1275 }
1276 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1277
1278 /*
1279 * Due to difference in data length between DIF/non-DIF paths,
1280 * we need to set word 4 of IOCB here
1281 */
1282 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1283
590 return 0; 1284 return 0;
1285err:
1286 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1287 "9023 Could not setup all needed BDE's"
1288 "prot_group_type=%d, num_bde=%d\n",
1289 prot_group_type, num_bde);
1290 return 1;
1291}
1292
1293/*
1294 * This function checks for BlockGuard errors detected by
1295 * the HBA. In case of errors, the ASC/ASCQ fields in the
1296 * sense buffer will be set accordingly, paired with
1297 * ILLEGAL_REQUEST to signal to the kernel that the HBA
1298 * detected corruption.
1299 *
1300 * Returns:
1301 * 0 - No error found
1302 * 1 - BlockGuard error found
1303 * -1 - Internal error (bad profile, ...etc)
1304 */
1305static int
1306lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1307 struct lpfc_iocbq *pIocbOut)
1308{
1309 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1310 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1311 int ret = 0;
1312 uint32_t bghm = bgf->bghm;
1313 uint32_t bgstat = bgf->bgstat;
1314 uint64_t failing_sector = 0;
1315
1316 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
1317 "bgstat=0x%x bghm=0x%x\n",
1318 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1319 cmd->request->nr_sectors, bgstat, bghm);
1320
1321 spin_lock(&_dump_buf_lock);
1322 if (!_dump_buf_done) {
1323 printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
1324 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1325 lpfc_debug_save_data(cmd);
1326
1327 /* If we have a prot sgl, save the DIF buffer */
1328 if (lpfc_prot_group_type(phba, cmd) ==
1329 LPFC_PG_TYPE_DIF_BUF) {
1330 printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
1331 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1332 lpfc_debug_save_dif(cmd);
1333 }
1334
1335 _dump_buf_done = 1;
1336 }
1337 spin_unlock(&_dump_buf_lock);
1338
1339 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1340 cmd->result = ScsiResult(DID_ERROR, 0);
1341 printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
1342 bgstat);
1343 ret = (-1);
1344 goto out;
1345 }
1346
1347 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1348 cmd->result = ScsiResult(DID_ERROR, 0);
1349 printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1350 bgstat);
1351 ret = (-1);
1352 goto out;
1353 }
1354
1355 if (lpfc_bgs_get_guard_err(bgstat)) {
1356 ret = 1;
1357
1358 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1359 0x10, 0x1);
1360 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
1361 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1362 phba->bg_guard_err_cnt++;
1363 printk(KERN_ERR "BLKGRD: guard_tag error\n");
1364 }
1365
1366 if (lpfc_bgs_get_reftag_err(bgstat)) {
1367 ret = 1;
1368
1369 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1370 0x10, 0x3);
1371 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
1372 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1373
1374 phba->bg_reftag_err_cnt++;
1375 printk(KERN_ERR "BLKGRD: ref_tag error\n");
1376 }
1377
1378 if (lpfc_bgs_get_apptag_err(bgstat)) {
1379 ret = 1;
1380
1381 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1382 0x10, 0x2);
1383 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
1384 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1385
1386 phba->bg_apptag_err_cnt++;
1387 printk(KERN_ERR "BLKGRD: app_tag error\n");
1388 }
1389
1390 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1391 /*
1392 * setup sense data descriptor 0 per SPC-4 as an information
1393 * field, and put the failing LBA in it
1394 */
1395 cmd->sense_buffer[8] = 0; /* Information */
1396 cmd->sense_buffer[9] = 0xa; /* Add. length */
1397 do_div(bghm, cmd->device->sector_size);
1398
1399 failing_sector = scsi_get_lba(cmd);
1400 failing_sector += bghm;
1401
1402 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1403 }
1404
1405 if (!ret) {
1406 /* No error was reported - problem in FW? */
1407 cmd->result = ScsiResult(DID_ERROR, 0);
1408 printk(KERN_ERR "BLKGRD: no errors reported!\n");
1409 }
1410
1411out:
1412 return ret;
591} 1413}
592 1414
593/** 1415/**
@@ -681,6 +1503,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
681 lpfc_worker_wake_up(phba); 1503 lpfc_worker_wake_up(phba);
682 return; 1504 return;
683} 1505}
1506
1507/**
1508 * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
1509 * @phba: The Hba for which this call is being executed.
1510 * @psb: The scsi buffer which is going to be un-mapped.
1511 *
1512 * This routine does DMA un-mapping of scatter gather list of scsi command
1513 * field of @lpfc_cmd.
1514 **/
684static void 1515static void
685lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 1516lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
686{ 1517{
@@ -692,8 +1523,22 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
692 */ 1523 */
693 if (psb->seg_cnt > 0) 1524 if (psb->seg_cnt > 0)
694 scsi_dma_unmap(psb->pCmd); 1525 scsi_dma_unmap(psb->pCmd);
1526 if (psb->prot_seg_cnt > 0)
1527 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
1528 scsi_prot_sg_count(psb->pCmd),
1529 psb->pCmd->sc_data_direction);
695} 1530}
696 1531
1532/**
1533 * lpfc_handler_fcp_err: FCP response handler.
1534 * @vport: The virtual port for which this call is being executed.
1535 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
1536 * @rsp_iocb: The response IOCB which contains FCP error.
1537 *
1538 * This routine is called to process response IOCB with status field
1539 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
1540 * based upon SCSI and FCP error.
1541 **/
697static void 1542static void
698lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 1543lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
699 struct lpfc_iocbq *rsp_iocb) 1544 struct lpfc_iocbq *rsp_iocb)
@@ -735,7 +1580,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
735 logit = LOG_FCP; 1580 logit = LOG_FCP;
736 1581
737 lpfc_printf_vlog(vport, KERN_WARNING, logit, 1582 lpfc_printf_vlog(vport, KERN_WARNING, logit,
738 "0730 FCP command x%x failed: x%x SNS x%x x%x " 1583 "9024 FCP command x%x failed: x%x SNS x%x x%x "
739 "Data: x%x x%x x%x x%x x%x\n", 1584 "Data: x%x x%x x%x x%x x%x\n",
740 cmnd->cmnd[0], scsi_status, 1585 cmnd->cmnd[0], scsi_status,
741 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 1586 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
@@ -758,7 +1603,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
758 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 1603 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
759 1604
760 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 1605 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
761 "0716 FCP Read Underrun, expected %d, " 1606 "9025 FCP Read Underrun, expected %d, "
762 "residual %d Data: x%x x%x x%x\n", 1607 "residual %d Data: x%x x%x x%x\n",
763 be32_to_cpu(fcpcmd->fcpDl), 1608 be32_to_cpu(fcpcmd->fcpDl),
764 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 1609 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
@@ -774,7 +1619,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
774 (scsi_get_resid(cmnd) != fcpi_parm)) { 1619 (scsi_get_resid(cmnd) != fcpi_parm)) {
775 lpfc_printf_vlog(vport, KERN_WARNING, 1620 lpfc_printf_vlog(vport, KERN_WARNING,
776 LOG_FCP | LOG_FCP_ERROR, 1621 LOG_FCP | LOG_FCP_ERROR,
777 "0735 FCP Read Check Error " 1622 "9026 FCP Read Check Error "
778 "and Underrun Data: x%x x%x x%x x%x\n", 1623 "and Underrun Data: x%x x%x x%x x%x\n",
779 be32_to_cpu(fcpcmd->fcpDl), 1624 be32_to_cpu(fcpcmd->fcpDl),
780 scsi_get_resid(cmnd), fcpi_parm, 1625 scsi_get_resid(cmnd), fcpi_parm,
@@ -793,7 +1638,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
793 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 1638 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
794 < cmnd->underflow)) { 1639 < cmnd->underflow)) {
795 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 1640 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
796 "0717 FCP command x%x residual " 1641 "9027 FCP command x%x residual "
797 "underrun converted to error " 1642 "underrun converted to error "
798 "Data: x%x x%x x%x\n", 1643 "Data: x%x x%x x%x\n",
799 cmnd->cmnd[0], scsi_bufflen(cmnd), 1644 cmnd->cmnd[0], scsi_bufflen(cmnd),
@@ -802,7 +1647,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
802 } 1647 }
803 } else if (resp_info & RESID_OVER) { 1648 } else if (resp_info & RESID_OVER) {
804 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1649 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
805 "0720 FCP command x%x residual overrun error. " 1650 "9028 FCP command x%x residual overrun error. "
806 "Data: x%x x%x \n", cmnd->cmnd[0], 1651 "Data: x%x x%x \n", cmnd->cmnd[0],
807 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 1652 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
808 host_status = DID_ERROR; 1653 host_status = DID_ERROR;
@@ -814,7 +1659,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
814 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 1659 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
815 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 1660 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
816 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 1661 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
817 "0734 FCP Read Check Error Data: " 1662 "9029 FCP Read Check Error Data: "
818 "x%x x%x x%x x%x\n", 1663 "x%x x%x x%x x%x\n",
819 be32_to_cpu(fcpcmd->fcpDl), 1664 be32_to_cpu(fcpcmd->fcpDl),
820 be32_to_cpu(fcprsp->rspResId), 1665 be32_to_cpu(fcprsp->rspResId),
@@ -828,6 +1673,16 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
828 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); 1673 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
829} 1674}
830 1675
1676/**
1677 * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
1678 * @phba: The Hba for which this call is being executed.
1679 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1680 * @pIocbOut: The response IOCBQ for the scsi cmnd .
1681 *
1682 * This routine assigns scsi command result by looking into response IOCB
1683 * status field appropriately. This routine handles QUEUE FULL condition as
1684 * well by ramping down device queue depth.
1685 **/
831static void 1686static void
832lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 1687lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
833 struct lpfc_iocbq *pIocbOut) 1688 struct lpfc_iocbq *pIocbOut)
@@ -846,7 +1701,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
846 1701
847 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 1702 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
848 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 1703 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
849 atomic_dec(&pnode->cmd_pending); 1704 if (pnode && NLP_CHK_NODE_ACT(pnode))
1705 atomic_dec(&pnode->cmd_pending);
850 1706
851 if (lpfc_cmd->status) { 1707 if (lpfc_cmd->status) {
852 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 1708 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
@@ -856,7 +1712,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
856 lpfc_cmd->status = IOSTAT_DEFAULT; 1712 lpfc_cmd->status = IOSTAT_DEFAULT;
857 1713
858 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1714 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
859 "0729 FCP cmd x%x failed <%d/%d> " 1715 "9030 FCP cmd x%x failed <%d/%d> "
860 "status: x%x result: x%x Data: x%x x%x\n", 1716 "status: x%x result: x%x Data: x%x x%x\n",
861 cmd->cmnd[0], 1717 cmd->cmnd[0],
862 cmd->device ? cmd->device->id : 0xffff, 1718 cmd->device ? cmd->device->id : 0xffff,
@@ -904,7 +1760,28 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
904 lpfc_cmd->result == IOERR_ABORT_REQUESTED) { 1760 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
905 cmd->result = ScsiResult(DID_REQUEUE, 0); 1761 cmd->result = ScsiResult(DID_REQUEUE, 0);
906 break; 1762 break;
907 } /* else: fall through */ 1763 }
1764
1765 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
1766 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
1767 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
1768 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1769 /*
1770 * This is a response for a BG enabled
1771 * cmd. Parse BG error
1772 */
1773 lpfc_parse_bg_err(phba, lpfc_cmd,
1774 pIocbOut);
1775 break;
1776 } else {
1777 lpfc_printf_vlog(vport, KERN_WARNING,
1778 LOG_BG,
1779 "9031 non-zero BGSTAT "
1780 "on unprotected cmd");
1781 }
1782 }
1783
1784 /* else: fall through */
908 default: 1785 default:
909 cmd->result = ScsiResult(DID_ERROR, 0); 1786 cmd->result = ScsiResult(DID_ERROR, 0);
910 break; 1787 break;
@@ -936,23 +1813,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
936 time_after(jiffies, lpfc_cmd->start_time + 1813 time_after(jiffies, lpfc_cmd->start_time +
937 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 1814 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
938 spin_lock_irqsave(sdev->host->host_lock, flags); 1815 spin_lock_irqsave(sdev->host->host_lock, flags);
939 if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) && 1816 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
940 (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) && 1817 if (pnode->cmd_qdepth >
941 ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10)))) 1818 atomic_read(&pnode->cmd_pending) &&
942 pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending); 1819 (atomic_read(&pnode->cmd_pending) >
943 1820 LPFC_MIN_TGT_QDEPTH) &&
944 pnode->last_change_time = jiffies; 1821 ((cmd->cmnd[0] == READ_10) ||
1822 (cmd->cmnd[0] == WRITE_10)))
1823 pnode->cmd_qdepth =
1824 atomic_read(&pnode->cmd_pending);
1825
1826 pnode->last_change_time = jiffies;
1827 }
945 spin_unlock_irqrestore(sdev->host->host_lock, flags); 1828 spin_unlock_irqrestore(sdev->host->host_lock, flags);
946 } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && 1829 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1830 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
947 time_after(jiffies, pnode->last_change_time + 1831 time_after(jiffies, pnode->last_change_time +
948 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { 1832 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
949 spin_lock_irqsave(sdev->host->host_lock, flags); 1833 spin_lock_irqsave(sdev->host->host_lock, flags);
950 pnode->cmd_qdepth += pnode->cmd_qdepth * 1834 pnode->cmd_qdepth += pnode->cmd_qdepth *
951 LPFC_TGTQ_RAMPUP_PCENT / 100; 1835 LPFC_TGTQ_RAMPUP_PCENT / 100;
952 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) 1836 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
953 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 1837 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
954 pnode->last_change_time = jiffies; 1838 pnode->last_change_time = jiffies;
955 spin_unlock_irqrestore(sdev->host->host_lock, flags); 1839 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1840 }
956 } 1841 }
957 1842
958 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 1843 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
@@ -1067,6 +1952,15 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1067 } 1952 }
1068} 1953}
1069 1954
1955/**
1956 * lpfc_scsi_prep_cmnd: Routine to convert scsi cmnd to FCP information unit.
1957 * @vport: The virtual port for which this call is being executed.
1958 * @lpfc_cmd: The scsi command which needs to send.
1959 * @pnode: Pointer to lpfc_nodelist.
1960 *
1961 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1962 * to transfer.
1963 **/
1070static void 1964static void
1071lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 1965lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1072 struct lpfc_nodelist *pnode) 1966 struct lpfc_nodelist *pnode)
@@ -1122,7 +2016,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1122 } else { 2016 } else {
1123 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 2017 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
1124 iocb_cmd->ulpPU = PARM_READ_CHECK; 2018 iocb_cmd->ulpPU = PARM_READ_CHECK;
1125 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1126 fcp_cmnd->fcpCntl3 = READ_DATA; 2019 fcp_cmnd->fcpCntl3 = READ_DATA;
1127 phba->fc4InputRequests++; 2020 phba->fc4InputRequests++;
1128 } 2021 }
@@ -1133,7 +2026,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1133 fcp_cmnd->fcpCntl3 = 0; 2026 fcp_cmnd->fcpCntl3 = 0;
1134 phba->fc4ControlRequests++; 2027 phba->fc4ControlRequests++;
1135 } 2028 }
1136 if (phba->sli_rev == 3) 2029 if (phba->sli_rev == 3 &&
2030 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
1137 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 2031 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1138 /* 2032 /*
1139 * Finish initializing those IOCB fields that are independent 2033 * Finish initializing those IOCB fields that are independent
@@ -1152,6 +2046,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1152 piocbq->vport = vport; 2046 piocbq->vport = vport;
1153} 2047}
1154 2048
2049/**
2050 * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
2051 * @vport: The virtual port for which this call is being executed.
2052 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2053 * @lun: Logical unit number.
2054 * @task_mgmt_cmd: SCSI task management command.
2055 *
2056 * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
2057 *
2058 * Return codes:
2059 * 0 - Error
2060 * 1 - Success
2061 **/
1155static int 2062static int
1156lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 2063lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1157 struct lpfc_scsi_buf *lpfc_cmd, 2064 struct lpfc_scsi_buf *lpfc_cmd,
@@ -1178,7 +2085,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1178 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2085 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1179 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 2086 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
1180 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 2087 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
1181 if (vport->phba->sli_rev == 3) 2088 if (vport->phba->sli_rev == 3 &&
2089 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
1182 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 2090 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
1183 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 2091 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
1184 piocb->ulpContext = ndlp->nlp_rpi; 2092 piocb->ulpContext = ndlp->nlp_rpi;
@@ -1201,6 +2109,15 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1201 return 1; 2109 return 1;
1202} 2110}
1203 2111
2112/**
2113 * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
2114 * @phba: The Hba for which this call is being executed.
2115 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2116 * @rspiocbq: Pointer to lpfc_iocbq data structure.
2117 *
2118 * This routine is IOCB completion routine for device reset and target reset
2119 * routine. This routine release scsi buffer associated with lpfc_cmd.
2120 **/
1204static void 2121static void
1205lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 2122lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
1206 struct lpfc_iocbq *cmdiocbq, 2123 struct lpfc_iocbq *cmdiocbq,
@@ -1213,6 +2130,20 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
1213 return; 2130 return;
1214} 2131}
1215 2132
2133/**
2134 * lpfc_scsi_tgt_reset: Target reset handler.
2135 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
2136 * @vport: The virtual port for which this call is being executed.
2137 * @tgt_id: Target ID.
2138 * @lun: Lun number.
2139 * @rdata: Pointer to lpfc_rport_data.
2140 *
2141 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
2142 *
2143 * Return Code:
2144 * 0x2003 - Error
2145 * 0x2002 - Success.
2146 **/
1216static int 2147static int
1217lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, 2148lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
1218 unsigned tgt_id, unsigned int lun, 2149 unsigned tgt_id, unsigned int lun,
@@ -1266,6 +2197,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
1266 return ret; 2197 return ret;
1267} 2198}
1268 2199
2200/**
2201 * lpfc_info: Info entry point of scsi_host_template data structure.
2202 * @host: The scsi host for which this call is being executed.
2203 *
2204 * This routine provides module information about hba.
2205 *
2206 * Reutrn code:
2207 * Pointer to char - Success.
2208 **/
1269const char * 2209const char *
1270lpfc_info(struct Scsi_Host *host) 2210lpfc_info(struct Scsi_Host *host)
1271{ 2211{
@@ -1295,6 +2235,13 @@ lpfc_info(struct Scsi_Host *host)
1295 return lpfcinfobuf; 2235 return lpfcinfobuf;
1296} 2236}
1297 2237
2238/**
2239 * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
2240 * @phba: The Hba for which this call is being executed.
2241 *
2242 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
2243 * The default value of cfg_poll_tmo is 10 milliseconds.
2244 **/
1298static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 2245static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1299{ 2246{
1300 unsigned long poll_tmo_expires = 2247 unsigned long poll_tmo_expires =
@@ -1305,11 +2252,25 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1305 poll_tmo_expires); 2252 poll_tmo_expires);
1306} 2253}
1307 2254
2255/**
2256 * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
2257 * @phba: The Hba for which this call is being executed.
2258 *
2259 * This routine starts the fcp_poll_timer of @phba.
2260 **/
1308void lpfc_poll_start_timer(struct lpfc_hba * phba) 2261void lpfc_poll_start_timer(struct lpfc_hba * phba)
1309{ 2262{
1310 lpfc_poll_rearm_timer(phba); 2263 lpfc_poll_rearm_timer(phba);
1311} 2264}
1312 2265
2266/**
2267 * lpfc_poll_timeout: Restart polling timer.
2268 * @ptr: Map to lpfc_hba data structure pointer.
2269 *
2270 * This routine restarts fcp_poll timer, when FCP ring polling is enable
2271 * and FCP Ring interrupt is disable.
2272 **/
2273
1313void lpfc_poll_timeout(unsigned long ptr) 2274void lpfc_poll_timeout(unsigned long ptr)
1314{ 2275{
1315 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2276 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
@@ -1321,6 +2282,20 @@ void lpfc_poll_timeout(unsigned long ptr)
1321 } 2282 }
1322} 2283}
1323 2284
2285/**
2286 * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
2287 * structure.
2288 * @cmnd: Pointer to scsi_cmnd data structure.
2289 * @done: Pointer to done routine.
2290 *
2291 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2292 * This routine prepares an IOCB from scsi command and provides to firmware.
2293 * The @done callback is invoked after driver finished processing the command.
2294 *
2295 * Return value :
2296 * 0 - Success
2297 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2298 **/
1324static int 2299static int
1325lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 2300lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1326{ 2301{
@@ -1340,6 +2315,17 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1340 goto out_fail_command; 2315 goto out_fail_command;
1341 } 2316 }
1342 2317
2318 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2319 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2320
2321 printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
2322 "str=%s without registering for BlockGuard - "
2323 "Rejecting command\n",
2324 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2325 dif_op_str[scsi_get_prot_op(cmnd)]);
2326 goto out_fail_command;
2327 }
2328
1343 /* 2329 /*
1344 * Catch race where our node has transitioned, but the 2330 * Catch race where our node has transitioned, but the
1345 * transport is still transitioning. 2331 * transport is still transitioning.
@@ -1348,12 +2334,13 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1348 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 2334 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1349 goto out_fail_command; 2335 goto out_fail_command;
1350 } 2336 }
1351 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) 2337 if (vport->cfg_max_scsicmpl_time &&
2338 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
1352 goto out_host_busy; 2339 goto out_host_busy;
1353 2340
1354 lpfc_cmd = lpfc_get_scsi_buf(phba); 2341 lpfc_cmd = lpfc_get_scsi_buf(phba);
1355 if (lpfc_cmd == NULL) { 2342 if (lpfc_cmd == NULL) {
1356 lpfc_adjust_queue_depth(phba); 2343 lpfc_rampdown_queue_depth(phba);
1357 2344
1358 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2345 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1359 "0707 driver's buffer pool is empty, " 2346 "0707 driver's buffer pool is empty, "
@@ -1361,7 +2348,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1361 goto out_host_busy; 2348 goto out_host_busy;
1362 } 2349 }
1363 2350
1364 lpfc_cmd->start_time = jiffies;
1365 /* 2351 /*
1366 * Store the midlayer's command structure for the completion phase 2352 * Store the midlayer's command structure for the completion phase
1367 * and complete the command initialization. 2353 * and complete the command initialization.
@@ -1373,7 +2359,65 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1373 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 2359 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
1374 cmnd->scsi_done = done; 2360 cmnd->scsi_done = done;
1375 2361
1376 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 2362 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2363 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2364 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2365 "str=%s\n",
2366 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2367 dif_op_str[scsi_get_prot_op(cmnd)]);
2368 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2369 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2370 "%02x %02x %02x %02x %02x \n",
2371 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2372 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2373 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2374 cmnd->cmnd[9]);
2375 if (cmnd->cmnd[0] == READ_10)
2376 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2377 "9035 BLKGRD: READ @ sector %llu, "
2378 "count %lu\n",
2379 (unsigned long long)scsi_get_lba(cmnd),
2380 cmnd->request->nr_sectors);
2381 else if (cmnd->cmnd[0] == WRITE_10)
2382 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2383 "9036 BLKGRD: WRITE @ sector %llu, "
2384 "count %lu cmd=%p\n",
2385 (unsigned long long)scsi_get_lba(cmnd),
2386 cmnd->request->nr_sectors,
2387 cmnd);
2388
2389 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2390 } else {
2391 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2392 "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
2393 " str=%s\n",
2394 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2395 dif_op_str[scsi_get_prot_op(cmnd)]);
2396 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2397 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2398 "%02x %02x %02x %02x %02x \n",
2399 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2400 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2401 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2402 cmnd->cmnd[9]);
2403 if (cmnd->cmnd[0] == READ_10)
2404 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2405 "9040 dbg: READ @ sector %llu, "
2406 "count %lu\n",
2407 (unsigned long long)scsi_get_lba(cmnd),
2408 cmnd->request->nr_sectors);
2409 else if (cmnd->cmnd[0] == WRITE_10)
2410 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2411 "9041 dbg: WRITE @ sector %llu, "
2412 "count %lu cmd=%p\n",
2413 (unsigned long long)scsi_get_lba(cmnd),
2414 cmnd->request->nr_sectors, cmnd);
2415 else
2416 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2417 "9042 dbg: parser not implemented\n");
2418 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2419 }
2420
1377 if (err) 2421 if (err)
1378 goto out_host_busy_free_buf; 2422 goto out_host_busy_free_buf;
1379 2423
@@ -1382,9 +2426,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1382 atomic_inc(&ndlp->cmd_pending); 2426 atomic_inc(&ndlp->cmd_pending);
1383 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 2427 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
1384 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 2428 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
1385 if (err) 2429 if (err) {
2430 atomic_dec(&ndlp->cmd_pending);
1386 goto out_host_busy_free_buf; 2431 goto out_host_busy_free_buf;
1387 2432 }
1388 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2433 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1389 lpfc_sli_poll_fcp_ring(phba); 2434 lpfc_sli_poll_fcp_ring(phba);
1390 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2435 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
@@ -1394,7 +2439,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1394 return 0; 2439 return 0;
1395 2440
1396 out_host_busy_free_buf: 2441 out_host_busy_free_buf:
1397 atomic_dec(&ndlp->cmd_pending);
1398 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 2442 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1399 lpfc_release_scsi_buf(phba, lpfc_cmd); 2443 lpfc_release_scsi_buf(phba, lpfc_cmd);
1400 out_host_busy: 2444 out_host_busy:
@@ -1405,6 +2449,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1405 return 0; 2449 return 0;
1406} 2450}
1407 2451
2452/**
2453 * lpfc_block_error_handler: Routine to block error handler.
2454 * @cmnd: Pointer to scsi_cmnd data structure.
2455 *
2456 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
2457 **/
1408static void 2458static void
1409lpfc_block_error_handler(struct scsi_cmnd *cmnd) 2459lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1410{ 2460{
@@ -1421,6 +2471,17 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1421 return; 2471 return;
1422} 2472}
1423 2473
2474/**
2475 * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
2476 *structure.
2477 * @cmnd: Pointer to scsi_cmnd data structure.
2478 *
2479 * This routine aborts @cmnd pending in base driver.
2480 *
2481 * Return code :
2482 * 0x2003 - Error
2483 * 0x2002 - Success
2484 **/
1424static int 2485static int
1425lpfc_abort_handler(struct scsi_cmnd *cmnd) 2486lpfc_abort_handler(struct scsi_cmnd *cmnd)
1426{ 2487{
@@ -1516,6 +2577,18 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
1516 return ret; 2577 return ret;
1517} 2578}
1518 2579
2580/**
2581 * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
2582 *data structure.
2583 * @cmnd: Pointer to scsi_cmnd data structure.
2584 *
2585 * This routine does a device reset by sending a TARGET_RESET task management
2586 * command.
2587 *
2588 * Return code :
2589 * 0x2003 - Error
2590 * 0ex2002 - Success
2591 **/
1519static int 2592static int
1520lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 2593lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1521{ 2594{
@@ -1560,7 +2633,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1560 fc_get_event_number(), 2633 fc_get_event_number(),
1561 sizeof(scsi_event), 2634 sizeof(scsi_event),
1562 (char *)&scsi_event, 2635 (char *)&scsi_event,
1563 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2636 LPFC_NL_VENDOR_ID);
1564 2637
1565 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { 2638 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1566 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2639 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -1633,6 +2706,17 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1633 return ret; 2706 return ret;
1634} 2707}
1635 2708
2709/**
2710 * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
2711 * Template data structure.
2712 * @cmnd: Pointer to scsi_cmnd data structure.
2713 *
2714 * This routine does target reset to all target on @cmnd->device->host.
2715 *
2716 * Return Code:
2717 * 0x2003 - Error
2718 * 0x2002 - Success
2719 **/
1636static int 2720static int
1637lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 2721lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1638{ 2722{
@@ -1657,7 +2741,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1657 fc_get_event_number(), 2741 fc_get_event_number(),
1658 sizeof(scsi_event), 2742 sizeof(scsi_event),
1659 (char *)&scsi_event, 2743 (char *)&scsi_event,
1660 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2744 LPFC_NL_VENDOR_ID);
1661 2745
1662 lpfc_block_error_handler(cmnd); 2746 lpfc_block_error_handler(cmnd);
1663 /* 2747 /*
@@ -1723,6 +2807,20 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1723 return ret; 2807 return ret;
1724} 2808}
1725 2809
2810/**
2811 * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
2812 * structure.
2813 * @sdev: Pointer to scsi_device.
2814 *
2815 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
2816 * globally available list of scsi buffers. This routine also makes sure scsi
2817 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
2818 * of scsi buffer exists for the lifetime of the driver.
2819 *
2820 * Return codes:
2821 * non-0 - Error
2822 * 0 - Success
2823 **/
1726static int 2824static int
1727lpfc_slave_alloc(struct scsi_device *sdev) 2825lpfc_slave_alloc(struct scsi_device *sdev)
1728{ 2826{
@@ -1784,6 +2882,19 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1784 return 0; 2882 return 0;
1785} 2883}
1786 2884
2885/**
2886 * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
2887 * structure.
2888 * @sdev: Pointer to scsi_device.
2889 *
2890 * This routine configures following items
2891 * - Tag command queuing support for @sdev if supported.
2892 * - Dev loss time out value of fc_rport.
2893 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
2894 *
2895 * Return codes:
2896 * 0 - Success
2897 **/
1787static int 2898static int
1788lpfc_slave_configure(struct scsi_device *sdev) 2899lpfc_slave_configure(struct scsi_device *sdev)
1789{ 2900{
@@ -1813,6 +2924,12 @@ lpfc_slave_configure(struct scsi_device *sdev)
1813 return 0; 2924 return 0;
1814} 2925}
1815 2926
2927/**
2928 * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
2929 * @sdev: Pointer to scsi_device.
2930 *
2931 * This routine sets @sdev hostatdata filed to null.
2932 **/
1816static void 2933static void
1817lpfc_slave_destroy(struct scsi_device *sdev) 2934lpfc_slave_destroy(struct scsi_device *sdev)
1818{ 2935{
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 437f182e2322..c7c440d5fa29 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -124,6 +124,8 @@ struct lpfc_scsi_buf {
124 uint32_t seg_cnt; /* Number of scatter-gather segments returned by 124 uint32_t seg_cnt; /* Number of scatter-gather segments returned by
125 * dma_map_sg. The driver needs this for calls 125 * dma_map_sg. The driver needs this for calls
126 * to dma_unmap_sg. */ 126 * to dma_unmap_sg. */
127 uint32_t prot_seg_cnt; /* seg_cnt's counterpart for protection data */
128
127 dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */ 129 dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
128 130
129 /* 131 /*
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8ab5babdeebc..01dfdc8696f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -542,6 +542,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
542 */ 542 */
543 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 543 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
544 544
545
545 if (pring->ringno == LPFC_ELS_RING) { 546 if (pring->ringno == LPFC_ELS_RING) {
546 lpfc_debugfs_slow_ring_trc(phba, 547 lpfc_debugfs_slow_ring_trc(phba,
547 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 548 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
@@ -1259,68 +1260,6 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1259} 1260}
1260 1261
1261/** 1262/**
1262 * lpfc_sli_replace_hbqbuff: Replace the HBQ buffer with a new buffer.
1263 * @phba: Pointer to HBA context object.
1264 * @tag: Tag for the HBQ buffer.
1265 *
1266 * This function is called from unsolicited event handler code path to get the
1267 * HBQ buffer associated with an unsolicited iocb. This function is called with
1268 * no lock held. It returns the buffer associated with the given tag and posts
1269 * another buffer to the firmware. Note that the new buffer must be allocated
1270 * before taking the hbalock and that the hba lock must be held until it is
1271 * finished with the hbq entry swap.
1272 **/
1273static struct lpfc_dmabuf *
1274lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
1275{
1276 struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
1277 uint32_t hbqno;
1278 void *virt; /* virtual address ptr */
1279 dma_addr_t phys; /* mapped address */
1280 unsigned long flags;
1281
1282 hbqno = tag >> 16;
1283 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1284 /* Check whether HBQ is still in use */
1285 spin_lock_irqsave(&phba->hbalock, flags);
1286 if (!phba->hbq_in_use) {
1287 if (new_hbq_entry)
1288 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1289 new_hbq_entry);
1290 spin_unlock_irqrestore(&phba->hbalock, flags);
1291 return NULL;
1292 }
1293
1294 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
1295 if (hbq_entry == NULL) {
1296 if (new_hbq_entry)
1297 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1298 new_hbq_entry);
1299 spin_unlock_irqrestore(&phba->hbalock, flags);
1300 return NULL;
1301 }
1302 list_del(&hbq_entry->dbuf.list);
1303
1304 if (new_hbq_entry == NULL) {
1305 list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
1306 spin_unlock_irqrestore(&phba->hbalock, flags);
1307 return &hbq_entry->dbuf;
1308 }
1309 new_hbq_entry->tag = -1;
1310 phys = new_hbq_entry->dbuf.phys;
1311 virt = new_hbq_entry->dbuf.virt;
1312 new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
1313 new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
1314 hbq_entry->dbuf.phys = phys;
1315 hbq_entry->dbuf.virt = virt;
1316 lpfc_sli_free_hbq(phba, hbq_entry);
1317 list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
1318 spin_unlock_irqrestore(&phba->hbalock, flags);
1319
1320 return &new_hbq_entry->dbuf;
1321}
1322
1323/**
1324 * lpfc_sli_get_buff: Get the buffer associated with the buffer tag. 1263 * lpfc_sli_get_buff: Get the buffer associated with the buffer tag.
1325 * @phba: Pointer to HBA context object. 1264 * @phba: Pointer to HBA context object.
1326 * @pring: Pointer to driver SLI ring object. 1265 * @pring: Pointer to driver SLI ring object.
@@ -1334,13 +1273,17 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
1334 **/ 1273 **/
1335static struct lpfc_dmabuf * 1274static struct lpfc_dmabuf *
1336lpfc_sli_get_buff(struct lpfc_hba *phba, 1275lpfc_sli_get_buff(struct lpfc_hba *phba,
1337 struct lpfc_sli_ring *pring, 1276 struct lpfc_sli_ring *pring,
1338 uint32_t tag) 1277 uint32_t tag)
1339{ 1278{
1279 struct hbq_dmabuf *hbq_entry;
1280
1340 if (tag & QUE_BUFTAG_BIT) 1281 if (tag & QUE_BUFTAG_BIT)
1341 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 1282 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
1342 else 1283 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
1343 return lpfc_sli_replace_hbqbuff(phba, tag); 1284 if (!hbq_entry)
1285 return NULL;
1286 return &hbq_entry->dbuf;
1344} 1287}
1345 1288
1346 1289
@@ -1372,8 +1315,6 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1372 match = 0; 1315 match = 0;
1373 irsp = &(saveq->iocb); 1316 irsp = &(saveq->iocb);
1374 1317
1375 if (irsp->ulpStatus == IOSTAT_NEED_BUFFER)
1376 return 1;
1377 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 1318 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
1378 if (pring->lpfc_sli_rcv_async_status) 1319 if (pring->lpfc_sli_rcv_async_status)
1379 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 1320 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
@@ -1982,7 +1923,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1982 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1923 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1983 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 1924 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1984 spin_unlock_irqrestore(&phba->hbalock, iflag); 1925 spin_unlock_irqrestore(&phba->hbalock, iflag);
1985 lpfc_adjust_queue_depth(phba); 1926 lpfc_rampdown_queue_depth(phba);
1986 spin_lock_irqsave(&phba->hbalock, iflag); 1927 spin_lock_irqsave(&phba->hbalock, iflag);
1987 } 1928 }
1988 1929
@@ -2225,7 +2166,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2225 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2166 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2226 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2167 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2227 spin_unlock_irqrestore(&phba->hbalock, iflag); 2168 spin_unlock_irqrestore(&phba->hbalock, iflag);
2228 lpfc_adjust_queue_depth(phba); 2169 lpfc_rampdown_queue_depth(phba);
2229 spin_lock_irqsave(&phba->hbalock, iflag); 2170 spin_lock_irqsave(&phba->hbalock, iflag);
2230 } 2171 }
2231 2172
@@ -2790,7 +2731,6 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2790{ 2731{
2791 MAILBOX_t *mb; 2732 MAILBOX_t *mb;
2792 struct lpfc_sli *psli; 2733 struct lpfc_sli *psli;
2793 uint16_t skip_post;
2794 volatile uint32_t word0; 2734 volatile uint32_t word0;
2795 void __iomem *to_slim; 2735 void __iomem *to_slim;
2796 2736
@@ -2815,13 +2755,10 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2815 readl(to_slim); /* flush */ 2755 readl(to_slim); /* flush */
2816 2756
2817 /* Only skip post after fc_ffinit is completed */ 2757 /* Only skip post after fc_ffinit is completed */
2818 if (phba->pport->port_state) { 2758 if (phba->pport->port_state)
2819 skip_post = 1;
2820 word0 = 1; /* This is really setting up word1 */ 2759 word0 = 1; /* This is really setting up word1 */
2821 } else { 2760 else
2822 skip_post = 0;
2823 word0 = 0; /* This is really setting up word1 */ 2761 word0 = 0; /* This is really setting up word1 */
2824 }
2825 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2762 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2826 writel(*(uint32_t *) mb, to_slim); 2763 writel(*(uint32_t *) mb, to_slim);
2827 readl(to_slim); /* flush */ 2764 readl(to_slim); /* flush */
@@ -2835,10 +2772,8 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2835 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 2772 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
2836 psli->stats_start = get_seconds(); 2773 psli->stats_start = get_seconds();
2837 2774
2838 if (skip_post) 2775 /* Give the INITFF and Post time to settle. */
2839 mdelay(100); 2776 mdelay(100);
2840 else
2841 mdelay(2000);
2842 2777
2843 lpfc_hba_down_post(phba); 2778 lpfc_hba_down_post(phba);
2844 2779
@@ -3084,7 +3019,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3084 spin_unlock_irq(&phba->hbalock); 3019 spin_unlock_irq(&phba->hbalock);
3085 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3020 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3086 lpfc_sli_brdrestart(phba); 3021 lpfc_sli_brdrestart(phba);
3087 msleep(2500);
3088 rc = lpfc_sli_chipset_init(phba); 3022 rc = lpfc_sli_chipset_init(phba);
3089 if (rc) 3023 if (rc)
3090 break; 3024 break;
@@ -3111,7 +3045,8 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3111 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 3045 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
3112 LPFC_SLI3_HBQ_ENABLED | 3046 LPFC_SLI3_HBQ_ENABLED |
3113 LPFC_SLI3_CRP_ENABLED | 3047 LPFC_SLI3_CRP_ENABLED |
3114 LPFC_SLI3_INB_ENABLED); 3048 LPFC_SLI3_INB_ENABLED |
3049 LPFC_SLI3_BG_ENABLED);
3115 if (rc != MBX_SUCCESS) { 3050 if (rc != MBX_SUCCESS) {
3116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3117 "0442 Adapter failed to init, mbxCmd x%x " 3052 "0442 Adapter failed to init, mbxCmd x%x "
@@ -3144,17 +3079,29 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3144 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3079 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3145 if (pmb->mb.un.varCfgPort.ginb) { 3080 if (pmb->mb.un.varCfgPort.ginb) {
3146 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3081 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3082 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3147 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3083 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
3148 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy; 3084 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
3149 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter; 3085 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
3150 phba->inb_last_counter = 3086 phba->inb_last_counter =
3151 phba->mbox->us.s3_inb_pgp.counter; 3087 phba->mbox->us.s3_inb_pgp.counter;
3152 } else { 3088 } else {
3089 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
3153 phba->port_gp = phba->mbox->us.s3_pgp.port; 3090 phba->port_gp = phba->mbox->us.s3_pgp.port;
3154 phba->inb_ha_copy = NULL; 3091 phba->inb_ha_copy = NULL;
3155 phba->inb_counter = NULL; 3092 phba->inb_counter = NULL;
3156 } 3093 }
3094
3095 if (phba->cfg_enable_bg) {
3096 if (pmb->mb.un.varCfgPort.gbg)
3097 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3098 else
3099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3100 "0443 Adapter did not grant "
3101 "BlockGuard\n");
3102 }
3157 } else { 3103 } else {
3104 phba->hbq_get = NULL;
3158 phba->port_gp = phba->mbox->us.s2.port; 3105 phba->port_gp = phba->mbox->us.s2.port;
3159 phba->inb_ha_copy = NULL; 3106 phba->inb_ha_copy = NULL;
3160 phba->inb_counter = NULL; 3107 phba->inb_counter = NULL;
@@ -3305,10 +3252,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3305 struct lpfc_sli *psli = &phba->sli; 3252 struct lpfc_sli *psli = &phba->sli;
3306 struct lpfc_sli_ring *pring; 3253 struct lpfc_sli_ring *pring;
3307 3254
3308 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
3309 return;
3310 }
3311
3312 /* Mbox cmd <mbxCommand> timeout */ 3255 /* Mbox cmd <mbxCommand> timeout */
3313 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 3256 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3314 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 3257 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
@@ -4005,7 +3948,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
4005 shost = lpfc_shost_from_vport(phba->pport); 3948 shost = lpfc_shost_from_vport(phba->pport);
4006 fc_host_post_vendor_event(shost, fc_get_event_number(), 3949 fc_host_post_vendor_event(shost, fc_get_event_number(),
4007 sizeof(temp_event_data), (char *) &temp_event_data, 3950 sizeof(temp_event_data), (char *) &temp_event_data,
4008 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 3951 LPFC_NL_VENDOR_ID);
4009 3952
4010} 3953}
4011 3954
@@ -5184,6 +5127,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5184{ 5127{
5185 uint32_t ha_copy; 5128 uint32_t ha_copy;
5186 5129
5130 /* If PCI channel is offline, don't process it */
5131 if (unlikely(pci_channel_offline(phba->pcidev)))
5132 return 0;
5133
5187 /* If somebody is waiting to handle an eratt, don't process it 5134 /* If somebody is waiting to handle an eratt, don't process it
5188 * here. The brdkill function will do this. 5135 * here. The brdkill function will do this.
5189 */ 5136 */
@@ -5242,6 +5189,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5242 uint32_t ha_copy; 5189 uint32_t ha_copy;
5243 uint32_t work_ha_copy; 5190 uint32_t work_ha_copy;
5244 unsigned long status; 5191 unsigned long status;
5192 unsigned long iflag;
5245 uint32_t control; 5193 uint32_t control;
5246 5194
5247 MAILBOX_t *mbox, *pmbox; 5195 MAILBOX_t *mbox, *pmbox;
@@ -5274,7 +5222,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5274 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 5222 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5275 return IRQ_NONE; 5223 return IRQ_NONE;
5276 /* Need to read HA REG for slow-path events */ 5224 /* Need to read HA REG for slow-path events */
5277 spin_lock(&phba->hbalock); 5225 spin_lock_irqsave(&phba->hbalock, iflag);
5278 ha_copy = readl(phba->HAregaddr); 5226 ha_copy = readl(phba->HAregaddr);
5279 /* If somebody is waiting to handle an eratt don't process it 5227 /* If somebody is waiting to handle an eratt don't process it
5280 * here. The brdkill function will do this. 5228 * here. The brdkill function will do this.
@@ -5294,7 +5242,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5294 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 5242 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
5295 phba->HAregaddr); 5243 phba->HAregaddr);
5296 readl(phba->HAregaddr); /* flush */ 5244 readl(phba->HAregaddr); /* flush */
5297 spin_unlock(&phba->hbalock); 5245 spin_unlock_irqrestore(&phba->hbalock, iflag);
5298 } else 5246 } else
5299 ha_copy = phba->ha_copy; 5247 ha_copy = phba->ha_copy;
5300 5248
@@ -5307,13 +5255,13 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5307 * Turn off Link Attention interrupts 5255 * Turn off Link Attention interrupts
5308 * until CLEAR_LA done 5256 * until CLEAR_LA done
5309 */ 5257 */
5310 spin_lock(&phba->hbalock); 5258 spin_lock_irqsave(&phba->hbalock, iflag);
5311 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 5259 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
5312 control = readl(phba->HCregaddr); 5260 control = readl(phba->HCregaddr);
5313 control &= ~HC_LAINT_ENA; 5261 control &= ~HC_LAINT_ENA;
5314 writel(control, phba->HCregaddr); 5262 writel(control, phba->HCregaddr);
5315 readl(phba->HCregaddr); /* flush */ 5263 readl(phba->HCregaddr); /* flush */
5316 spin_unlock(&phba->hbalock); 5264 spin_unlock_irqrestore(&phba->hbalock, iflag);
5317 } 5265 }
5318 else 5266 else
5319 work_ha_copy &= ~HA_LATT; 5267 work_ha_copy &= ~HA_LATT;
@@ -5328,7 +5276,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5328 (HA_RXMASK << (4*LPFC_ELS_RING))); 5276 (HA_RXMASK << (4*LPFC_ELS_RING)));
5329 status >>= (4*LPFC_ELS_RING); 5277 status >>= (4*LPFC_ELS_RING);
5330 if (status & HA_RXMASK) { 5278 if (status & HA_RXMASK) {
5331 spin_lock(&phba->hbalock); 5279 spin_lock_irqsave(&phba->hbalock, iflag);
5332 control = readl(phba->HCregaddr); 5280 control = readl(phba->HCregaddr);
5333 5281
5334 lpfc_debugfs_slow_ring_trc(phba, 5282 lpfc_debugfs_slow_ring_trc(phba,
@@ -5357,10 +5305,10 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5357 (uint32_t)((unsigned long) 5305 (uint32_t)((unsigned long)
5358 &phba->work_waitq)); 5306 &phba->work_waitq));
5359 } 5307 }
5360 spin_unlock(&phba->hbalock); 5308 spin_unlock_irqrestore(&phba->hbalock, iflag);
5361 } 5309 }
5362 } 5310 }
5363 spin_lock(&phba->hbalock); 5311 spin_lock_irqsave(&phba->hbalock, iflag);
5364 if (work_ha_copy & HA_ERATT) 5312 if (work_ha_copy & HA_ERATT)
5365 lpfc_sli_read_hs(phba); 5313 lpfc_sli_read_hs(phba);
5366 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 5314 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
@@ -5372,7 +5320,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5372 /* First check out the status word */ 5320 /* First check out the status word */
5373 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 5321 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
5374 if (pmbox->mbxOwner != OWN_HOST) { 5322 if (pmbox->mbxOwner != OWN_HOST) {
5375 spin_unlock(&phba->hbalock); 5323 spin_unlock_irqrestore(&phba->hbalock, iflag);
5376 /* 5324 /*
5377 * Stray Mailbox Interrupt, mbxCommand <cmd> 5325 * Stray Mailbox Interrupt, mbxCommand <cmd>
5378 * mbxStatus <status> 5326 * mbxStatus <status>
@@ -5389,7 +5337,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5389 work_ha_copy &= ~HA_MBATT; 5337 work_ha_copy &= ~HA_MBATT;
5390 } else { 5338 } else {
5391 phba->sli.mbox_active = NULL; 5339 phba->sli.mbox_active = NULL;
5392 spin_unlock(&phba->hbalock); 5340 spin_unlock_irqrestore(&phba->hbalock, iflag);
5393 phba->last_completion_time = jiffies; 5341 phba->last_completion_time = jiffies;
5394 del_timer(&phba->sli.mbox_tmo); 5342 del_timer(&phba->sli.mbox_tmo);
5395 if (pmb->mbox_cmpl) { 5343 if (pmb->mbox_cmpl) {
@@ -5438,14 +5386,18 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5438 goto send_current_mbox; 5386 goto send_current_mbox;
5439 } 5387 }
5440 } 5388 }
5441 spin_lock(&phba->pport->work_port_lock); 5389 spin_lock_irqsave(
5390 &phba->pport->work_port_lock,
5391 iflag);
5442 phba->pport->work_port_events &= 5392 phba->pport->work_port_events &=
5443 ~WORKER_MBOX_TMO; 5393 ~WORKER_MBOX_TMO;
5444 spin_unlock(&phba->pport->work_port_lock); 5394 spin_unlock_irqrestore(
5395 &phba->pport->work_port_lock,
5396 iflag);
5445 lpfc_mbox_cmpl_put(phba, pmb); 5397 lpfc_mbox_cmpl_put(phba, pmb);
5446 } 5398 }
5447 } else 5399 } else
5448 spin_unlock(&phba->hbalock); 5400 spin_unlock_irqrestore(&phba->hbalock, iflag);
5449 5401
5450 if ((work_ha_copy & HA_MBATT) && 5402 if ((work_ha_copy & HA_MBATT) &&
5451 (phba->sli.mbox_active == NULL)) { 5403 (phba->sli.mbox_active == NULL)) {
@@ -5461,9 +5413,9 @@ send_current_mbox:
5461 "MBX_SUCCESS"); 5413 "MBX_SUCCESS");
5462 } 5414 }
5463 5415
5464 spin_lock(&phba->hbalock); 5416 spin_lock_irqsave(&phba->hbalock, iflag);
5465 phba->work_ha |= work_ha_copy; 5417 phba->work_ha |= work_ha_copy;
5466 spin_unlock(&phba->hbalock); 5418 spin_unlock_irqrestore(&phba->hbalock, iflag);
5467 lpfc_worker_wake_up(phba); 5419 lpfc_worker_wake_up(phba);
5468 } 5420 }
5469 return IRQ_HANDLED; 5421 return IRQ_HANDLED;
@@ -5495,6 +5447,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5495 struct lpfc_hba *phba; 5447 struct lpfc_hba *phba;
5496 uint32_t ha_copy; 5448 uint32_t ha_copy;
5497 unsigned long status; 5449 unsigned long status;
5450 unsigned long iflag;
5498 5451
5499 /* Get the driver's phba structure from the dev_id and 5452 /* Get the driver's phba structure from the dev_id and
5500 * assume the HBA is not interrupting. 5453 * assume the HBA is not interrupting.
@@ -5520,11 +5473,11 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5520 /* Need to read HA REG for FCP ring and other ring events */ 5473 /* Need to read HA REG for FCP ring and other ring events */
5521 ha_copy = readl(phba->HAregaddr); 5474 ha_copy = readl(phba->HAregaddr);
5522 /* Clear up only attention source related to fast-path */ 5475 /* Clear up only attention source related to fast-path */
5523 spin_lock(&phba->hbalock); 5476 spin_lock_irqsave(&phba->hbalock, iflag);
5524 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 5477 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
5525 phba->HAregaddr); 5478 phba->HAregaddr);
5526 readl(phba->HAregaddr); /* flush */ 5479 readl(phba->HAregaddr); /* flush */
5527 spin_unlock(&phba->hbalock); 5480 spin_unlock_irqrestore(&phba->hbalock, iflag);
5528 } else 5481 } else
5529 ha_copy = phba->ha_copy; 5482 ha_copy = phba->ha_copy;
5530 5483
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index cc43e9de22cc..7e32e95c5392 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.2.8" 21#define LPFC_DRIVER_VERSION "8.3.0"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a7de1cc02b40..63b54c66756c 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -288,10 +288,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
288 int vpi; 288 int vpi;
289 int rc = VPORT_ERROR; 289 int rc = VPORT_ERROR;
290 int status; 290 int status;
291 int size;
292 291
293 if ((phba->sli_rev < 3) || 292 if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
294 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
295 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 293 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
296 "1808 Create VPORT failed: " 294 "1808 Create VPORT failed: "
297 "NPIV is not enabled: SLImode:%d\n", 295 "NPIV is not enabled: SLImode:%d\n",
@@ -351,20 +349,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
351 349
352 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); 350 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
353 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); 351 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
354 size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN);
355 if (size) {
356 vport->vname = kzalloc(size+1, GFP_KERNEL);
357 if (!vport->vname) {
358 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
359 "1814 Create VPORT failed. "
360 "vname allocation failed.\n");
361 rc = VPORT_ERROR;
362 lpfc_free_vpi(phba, vpi);
363 destroy_port(vport);
364 goto error_out;
365 }
366 memcpy(vport->vname, fc_vport->symbolic_name, size+1);
367 }
368 if (fc_vport->node_name != 0) 352 if (fc_vport->node_name != 0)
369 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); 353 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
370 if (fc_vport->port_name != 0) 354 if (fc_vport->port_name != 0)
@@ -394,6 +378,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
394 goto error_out; 378 goto error_out;
395 } 379 }
396 380
381 /* Create binary sysfs attribute for vport */
382 lpfc_alloc_sysfs_attr(vport);
383
397 *(struct lpfc_vport **)fc_vport->dd_data = vport; 384 *(struct lpfc_vport **)fc_vport->dd_data = vport;
398 vport->fc_vport = fc_vport; 385 vport->fc_vport = fc_vport;
399 386
@@ -405,6 +392,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
405 } 392 }
406 393
407 if (disable) { 394 if (disable) {
395 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
408 rc = VPORT_OK; 396 rc = VPORT_OK;
409 goto out; 397 goto out;
410 } 398 }
@@ -587,8 +575,12 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
587 spin_lock_irq(&phba->hbalock); 575 spin_lock_irq(&phba->hbalock);
588 vport->load_flag |= FC_UNLOADING; 576 vport->load_flag |= FC_UNLOADING;
589 spin_unlock_irq(&phba->hbalock); 577 spin_unlock_irq(&phba->hbalock);
590 kfree(vport->vname); 578
579 lpfc_free_sysfs_attr(vport);
580
591 lpfc_debugfs_terminate(vport); 581 lpfc_debugfs_terminate(vport);
582
583 /* Remove FC host and then SCSI host with the vport */
592 fc_remove_host(lpfc_shost_from_vport(vport)); 584 fc_remove_host(lpfc_shost_from_vport(vport));
593 scsi_remove_host(lpfc_shost_from_vport(vport)); 585 scsi_remove_host(lpfc_shost_from_vport(vport));
594 586
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 887682a24e36..c24e86f07804 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -53,7 +53,8 @@ struct mac_esp_priv {
53 void __iomem *pdma_io; 53 void __iomem *pdma_io;
54 int error; 54 int error;
55}; 55};
56static struct platform_device *internal_esp, *external_esp; 56static struct platform_device *internal_pdev, *external_pdev;
57static struct esp *esp_chips[2];
57 58
58#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ 59#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
59 platform_get_drvdata((struct platform_device *) \ 60 platform_get_drvdata((struct platform_device *) \
@@ -170,7 +171,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
170 171
171#define MAC_ESP_PDMA_LOOP(operands) \ 172#define MAC_ESP_PDMA_LOOP(operands) \
172 asm volatile ( \ 173 asm volatile ( \
173 " tstw %2 \n" \ 174 " tstw %1 \n" \
174 " jbeq 20f \n" \ 175 " jbeq 20f \n" \
175 "1: movew " operands " \n" \ 176 "1: movew " operands " \n" \
176 "2: movew " operands " \n" \ 177 "2: movew " operands " \n" \
@@ -188,14 +189,14 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
188 "14: movew " operands " \n" \ 189 "14: movew " operands " \n" \
189 "15: movew " operands " \n" \ 190 "15: movew " operands " \n" \
190 "16: movew " operands " \n" \ 191 "16: movew " operands " \n" \
191 " subqw #1,%2 \n" \ 192 " subqw #1,%1 \n" \
192 " jbne 1b \n" \ 193 " jbne 1b \n" \
193 "20: tstw %3 \n" \ 194 "20: tstw %2 \n" \
194 " jbeq 30f \n" \ 195 " jbeq 30f \n" \
195 "21: movew " operands " \n" \ 196 "21: movew " operands " \n" \
196 " subqw #1,%3 \n" \ 197 " subqw #1,%2 \n" \
197 " jbne 21b \n" \ 198 " jbne 21b \n" \
198 "30: tstw %4 \n" \ 199 "30: tstw %3 \n" \
199 " jbeq 40f \n" \ 200 " jbeq 40f \n" \
200 "31: moveb " operands " \n" \ 201 "31: moveb " operands " \n" \
201 "32: nop \n" \ 202 "32: nop \n" \
@@ -223,8 +224,8 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
223 " .long 31b,40b \n" \ 224 " .long 31b,40b \n" \
224 " .long 32b,40b \n" \ 225 " .long 32b,40b \n" \
225 " .previous \n" \ 226 " .previous \n" \
226 : "+a" (addr) \ 227 : "+a" (addr), "+r" (count32), "+r" (count2) \
227 : "a" (mep->pdma_io), "r" (count32), "r" (count2), "g" (esp_count)) 228 : "g" (count1), "a" (mep->pdma_io))
228 229
229static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count, 230static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
230 u32 dma_count, int write, u8 cmd) 231 u32 dma_count, int write, u8 cmd)
@@ -247,19 +248,20 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
247 do { 248 do {
248 unsigned int count32 = esp_count >> 5; 249 unsigned int count32 = esp_count >> 5;
249 unsigned int count2 = (esp_count & 0x1F) >> 1; 250 unsigned int count2 = (esp_count & 0x1F) >> 1;
251 unsigned int count1 = esp_count & 1;
250 unsigned int start_addr = addr; 252 unsigned int start_addr = addr;
251 253
252 if (mac_esp_wait_for_dreq(esp)) 254 if (mac_esp_wait_for_dreq(esp))
253 break; 255 break;
254 256
255 if (write) { 257 if (write) {
256 MAC_ESP_PDMA_LOOP("%1@,%0@+"); 258 MAC_ESP_PDMA_LOOP("%4@,%0@+");
257 259
258 esp_count -= addr - start_addr; 260 esp_count -= addr - start_addr;
259 } else { 261 } else {
260 unsigned int n; 262 unsigned int n;
261 263
262 MAC_ESP_PDMA_LOOP("%0@+,%1@"); 264 MAC_ESP_PDMA_LOOP("%0@+,%4@");
263 265
264 if (mac_esp_wait_for_empty_fifo(esp)) 266 if (mac_esp_wait_for_empty_fifo(esp))
265 break; 267 break;
@@ -442,6 +444,32 @@ static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
442 return dma_len > 0xFFFF ? 0xFFFF : dma_len; 444 return dma_len > 0xFFFF ? 0xFFFF : dma_len;
443} 445}
444 446
447static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
448{
449 int got_intr;
450
451 /*
452 * This is an edge triggered IRQ, so we have to be careful to
453 * avoid missing a transition when it is shared by two ESP devices.
454 */
455
456 do {
457 got_intr = 0;
458 if (esp_chips[0] &&
459 (mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) {
460 (void)scsi_esp_intr(irq, esp_chips[0]);
461 got_intr = 1;
462 }
463 if (esp_chips[1] &&
464 (mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) {
465 (void)scsi_esp_intr(irq, esp_chips[1]);
466 got_intr = 1;
467 }
468 } while (got_intr);
469
470 return IRQ_HANDLED;
471}
472
445static struct esp_driver_ops mac_esp_ops = { 473static struct esp_driver_ops mac_esp_ops = {
446 .esp_write8 = mac_esp_write8, 474 .esp_write8 = mac_esp_write8,
447 .esp_read8 = mac_esp_read8, 475 .esp_read8 = mac_esp_read8,
@@ -556,10 +584,16 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
556 } 584 }
557 585
558 host->irq = IRQ_MAC_SCSI; 586 host->irq = IRQ_MAC_SCSI;
559 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "Mac ESP", 587 esp_chips[dev->id] = esp;
560 esp); 588 mb();
561 if (err < 0) 589 if (esp_chips[!dev->id] == NULL) {
562 goto fail_free_priv; 590 err = request_irq(host->irq, mac_scsi_esp_intr, 0,
591 "Mac ESP", NULL);
592 if (err < 0) {
593 esp_chips[dev->id] = NULL;
594 goto fail_free_priv;
595 }
596 }
563 597
564 err = scsi_esp_register(esp, &dev->dev); 598 err = scsi_esp_register(esp, &dev->dev);
565 if (err) 599 if (err)
@@ -568,7 +602,8 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
568 return 0; 602 return 0;
569 603
570fail_free_irq: 604fail_free_irq:
571 free_irq(host->irq, esp); 605 if (esp_chips[!dev->id] == NULL)
606 free_irq(host->irq, esp);
572fail_free_priv: 607fail_free_priv:
573 kfree(mep); 608 kfree(mep);
574fail_free_command_block: 609fail_free_command_block:
@@ -587,7 +622,9 @@ static int __devexit esp_mac_remove(struct platform_device *dev)
587 622
588 scsi_esp_unregister(esp); 623 scsi_esp_unregister(esp);
589 624
590 free_irq(irq, esp); 625 esp_chips[dev->id] = NULL;
626 if (!(esp_chips[0] || esp_chips[1]))
627 free_irq(irq, NULL);
591 628
592 kfree(mep); 629 kfree(mep);
593 630
@@ -614,19 +651,18 @@ static int __init mac_esp_init(void)
614 if (err) 651 if (err)
615 return err; 652 return err;
616 653
617 internal_esp = platform_device_alloc(DRV_MODULE_NAME, 0); 654 internal_pdev = platform_device_alloc(DRV_MODULE_NAME, 0);
618 if (internal_esp && platform_device_add(internal_esp)) { 655 if (internal_pdev && platform_device_add(internal_pdev)) {
619 platform_device_put(internal_esp); 656 platform_device_put(internal_pdev);
620 internal_esp = NULL; 657 internal_pdev = NULL;
621 } 658 }
622 659 external_pdev = platform_device_alloc(DRV_MODULE_NAME, 1);
623 external_esp = platform_device_alloc(DRV_MODULE_NAME, 1); 660 if (external_pdev && platform_device_add(external_pdev)) {
624 if (external_esp && platform_device_add(external_esp)) { 661 platform_device_put(external_pdev);
625 platform_device_put(external_esp); 662 external_pdev = NULL;
626 external_esp = NULL;
627 } 663 }
628 664
629 if (internal_esp || external_esp) { 665 if (internal_pdev || external_pdev) {
630 return 0; 666 return 0;
631 } else { 667 } else {
632 platform_driver_unregister(&esp_mac_driver); 668 platform_driver_unregister(&esp_mac_driver);
@@ -638,13 +674,13 @@ static void __exit mac_esp_exit(void)
638{ 674{
639 platform_driver_unregister(&esp_mac_driver); 675 platform_driver_unregister(&esp_mac_driver);
640 676
641 if (internal_esp) { 677 if (internal_pdev) {
642 platform_device_unregister(internal_esp); 678 platform_device_unregister(internal_pdev);
643 internal_esp = NULL; 679 internal_pdev = NULL;
644 } 680 }
645 if (external_esp) { 681 if (external_pdev) {
646 platform_device_unregister(external_esp); 682 platform_device_unregister(external_pdev);
647 external_esp = NULL; 683 external_pdev = NULL;
648 } 684 }
649} 685}
650 686
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 22052bb7becb..d06ec5aa6924 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -3401,8 +3401,7 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
3401 data->IrqNumber = pdev->irq; 3401 data->IrqNumber = pdev->irq;
3402 data->BaseAddress = pci_resource_start(pdev, 0); 3402 data->BaseAddress = pci_resource_start(pdev, 0);
3403 data->NumAddress = pci_resource_len (pdev, 0); 3403 data->NumAddress = pci_resource_len (pdev, 0);
3404 data->MmioAddress = ioremap_nocache(pci_resource_start(pdev, 1), 3404 data->MmioAddress = pci_ioremap_bar(pdev, 1);
3405 pci_resource_len (pdev, 1));
3406 data->MmioLength = pci_resource_len (pdev, 1); 3405 data->MmioLength = pci_resource_len (pdev, 1);
3407 3406
3408 pci_set_master(pdev); 3407 pci_set_master(pdev);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index b6cd12b2e996..8cb9240596ab 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4294,8 +4294,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4294 error = -ENODEV; 4294 error = -ENODEV;
4295 4295
4296#if MEMORY_MAPPED_IO 4296#if MEMORY_MAPPED_IO
4297 ha->mmpbase = ioremap(pci_resource_start(ha->pdev, 1), 4297 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4298 pci_resource_len(ha->pdev, 1));
4299 if (!ha->mmpbase) { 4298 if (!ha->mmpbase) {
4300 printk(KERN_INFO "qla1280: Unable to map I/O memory\n"); 4299 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4301 goto error_free_response_ring; 4300 goto error_free_response_ring;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ed731968f15f..cd53627cc761 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -19,8 +19,9 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
19 struct bin_attribute *bin_attr, 19 struct bin_attribute *bin_attr,
20 char *buf, loff_t off, size_t count) 20 char *buf, loff_t off, size_t count)
21{ 21{
22 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 22 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
23 struct device, kobj))); 23 struct device, kobj)));
24 struct qla_hw_data *ha = vha->hw;
24 25
25 if (ha->fw_dump_reading == 0) 26 if (ha->fw_dump_reading == 0)
26 return 0; 27 return 0;
@@ -34,8 +35,9 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
34 struct bin_attribute *bin_attr, 35 struct bin_attribute *bin_attr,
35 char *buf, loff_t off, size_t count) 36 char *buf, loff_t off, size_t count)
36{ 37{
37 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 38 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
38 struct device, kobj))); 39 struct device, kobj)));
40 struct qla_hw_data *ha = vha->hw;
39 int reading; 41 int reading;
40 42
41 if (off != 0) 43 if (off != 0)
@@ -48,7 +50,7 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
48 break; 50 break;
49 51
50 qla_printk(KERN_INFO, ha, 52 qla_printk(KERN_INFO, ha,
51 "Firmware dump cleared on (%ld).\n", ha->host_no); 53 "Firmware dump cleared on (%ld).\n", vha->host_no);
52 54
53 ha->fw_dump_reading = 0; 55 ha->fw_dump_reading = 0;
54 ha->fw_dumped = 0; 56 ha->fw_dumped = 0;
@@ -59,14 +61,14 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
59 61
60 qla_printk(KERN_INFO, ha, 62 qla_printk(KERN_INFO, ha,
61 "Raw firmware dump ready for read on (%ld).\n", 63 "Raw firmware dump ready for read on (%ld).\n",
62 ha->host_no); 64 vha->host_no);
63 } 65 }
64 break; 66 break;
65 case 2: 67 case 2:
66 qla2x00_alloc_fw_dump(ha); 68 qla2x00_alloc_fw_dump(vha);
67 break; 69 break;
68 case 3: 70 case 3:
69 qla2x00_system_error(ha); 71 qla2x00_system_error(vha);
70 break; 72 break;
71 } 73 }
72 return (count); 74 return (count);
@@ -87,8 +89,9 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
87 struct bin_attribute *bin_attr, 89 struct bin_attribute *bin_attr,
88 char *buf, loff_t off, size_t count) 90 char *buf, loff_t off, size_t count)
89{ 91{
90 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 92 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
91 struct device, kobj))); 93 struct device, kobj)));
94 struct qla_hw_data *ha = vha->hw;
92 95
93 if (!capable(CAP_SYS_ADMIN)) 96 if (!capable(CAP_SYS_ADMIN))
94 return 0; 97 return 0;
@@ -103,8 +106,9 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
103 struct bin_attribute *bin_attr, 106 struct bin_attribute *bin_attr,
104 char *buf, loff_t off, size_t count) 107 char *buf, loff_t off, size_t count)
105{ 108{
106 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 109 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
107 struct device, kobj))); 110 struct device, kobj)));
111 struct qla_hw_data *ha = vha->hw;
108 uint16_t cnt; 112 uint16_t cnt;
109 113
110 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size) 114 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size)
@@ -134,11 +138,11 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
134 } 138 }
135 139
136 /* Write NVRAM. */ 140 /* Write NVRAM. */
137 ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count); 141 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
138 ha->isp_ops->read_nvram(ha, (uint8_t *)ha->nvram, ha->nvram_base, 142 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
139 count); 143 count);
140 144
141 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 145 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
142 146
143 return (count); 147 return (count);
144} 148}
@@ -158,8 +162,9 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
158 struct bin_attribute *bin_attr, 162 struct bin_attribute *bin_attr,
159 char *buf, loff_t off, size_t count) 163 char *buf, loff_t off, size_t count)
160{ 164{
161 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 165 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
162 struct device, kobj))); 166 struct device, kobj)));
167 struct qla_hw_data *ha = vha->hw;
163 168
164 if (ha->optrom_state != QLA_SREADING) 169 if (ha->optrom_state != QLA_SREADING)
165 return 0; 170 return 0;
@@ -173,8 +178,9 @@ qla2x00_sysfs_write_optrom(struct kobject *kobj,
173 struct bin_attribute *bin_attr, 178 struct bin_attribute *bin_attr,
174 char *buf, loff_t off, size_t count) 179 char *buf, loff_t off, size_t count)
175{ 180{
176 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 181 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
177 struct device, kobj))); 182 struct device, kobj)));
183 struct qla_hw_data *ha = vha->hw;
178 184
179 if (ha->optrom_state != QLA_SWRITING) 185 if (ha->optrom_state != QLA_SWRITING)
180 return -EINVAL; 186 return -EINVAL;
@@ -203,8 +209,10 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
203 struct bin_attribute *bin_attr, 209 struct bin_attribute *bin_attr,
204 char *buf, loff_t off, size_t count) 210 char *buf, loff_t off, size_t count)
205{ 211{
206 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 212 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
207 struct device, kobj))); 213 struct device, kobj)));
214 struct qla_hw_data *ha = vha->hw;
215
208 uint32_t start = 0; 216 uint32_t start = 0;
209 uint32_t size = ha->optrom_size; 217 uint32_t size = ha->optrom_size;
210 int val, valid; 218 int val, valid;
@@ -262,7 +270,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
262 ha->optrom_region_start, ha->optrom_region_size)); 270 ha->optrom_region_start, ha->optrom_region_size));
263 271
264 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 272 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
265 ha->isp_ops->read_optrom(ha, ha->optrom_buffer, 273 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
266 ha->optrom_region_start, ha->optrom_region_size); 274 ha->optrom_region_start, ha->optrom_region_size);
267 break; 275 break;
268 case 2: 276 case 2:
@@ -333,7 +341,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
333 "Writing flash region -- 0x%x/0x%x.\n", 341 "Writing flash region -- 0x%x/0x%x.\n",
334 ha->optrom_region_start, ha->optrom_region_size)); 342 ha->optrom_region_start, ha->optrom_region_size));
335 343
336 ha->isp_ops->write_optrom(ha, ha->optrom_buffer, 344 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
337 ha->optrom_region_start, ha->optrom_region_size); 345 ha->optrom_region_start, ha->optrom_region_size);
338 break; 346 break;
339 default: 347 default:
@@ -356,8 +364,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
356 struct bin_attribute *bin_attr, 364 struct bin_attribute *bin_attr,
357 char *buf, loff_t off, size_t count) 365 char *buf, loff_t off, size_t count)
358{ 366{
359 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 367 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
360 struct device, kobj))); 368 struct device, kobj)));
369 struct qla_hw_data *ha = vha->hw;
361 370
362 if (!capable(CAP_SYS_ADMIN)) 371 if (!capable(CAP_SYS_ADMIN))
363 return 0; 372 return 0;
@@ -371,15 +380,16 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
371 struct bin_attribute *bin_attr, 380 struct bin_attribute *bin_attr,
372 char *buf, loff_t off, size_t count) 381 char *buf, loff_t off, size_t count)
373{ 382{
374 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 383 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
375 struct device, kobj))); 384 struct device, kobj)));
385 struct qla_hw_data *ha = vha->hw;
376 386
377 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size) 387 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size)
378 return 0; 388 return 0;
379 389
380 /* Write NVRAM. */ 390 /* Write NVRAM. */
381 ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count); 391 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
382 ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, ha->vpd_base, count); 392 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
383 393
384 return count; 394 return count;
385} 395}
@@ -399,8 +409,9 @@ qla2x00_sysfs_read_sfp(struct kobject *kobj,
399 struct bin_attribute *bin_attr, 409 struct bin_attribute *bin_attr,
400 char *buf, loff_t off, size_t count) 410 char *buf, loff_t off, size_t count)
401{ 411{
402 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 412 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
403 struct device, kobj))); 413 struct device, kobj)));
414 struct qla_hw_data *ha = vha->hw;
404 uint16_t iter, addr, offset; 415 uint16_t iter, addr, offset;
405 int rval; 416 int rval;
406 417
@@ -429,7 +440,7 @@ do_read:
429 offset = 0; 440 offset = 0;
430 } 441 }
431 442
432 rval = qla2x00_read_sfp(ha, ha->sfp_data_dma, addr, offset, 443 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
433 SFP_BLOCK_SIZE); 444 SFP_BLOCK_SIZE);
434 if (rval != QLA_SUCCESS) { 445 if (rval != QLA_SUCCESS) {
435 qla_printk(KERN_WARNING, ha, 446 qla_printk(KERN_WARNING, ha,
@@ -469,30 +480,31 @@ static struct sysfs_entry {
469}; 480};
470 481
471void 482void
472qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha) 483qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
473{ 484{
474 struct Scsi_Host *host = ha->host; 485 struct Scsi_Host *host = vha->host;
475 struct sysfs_entry *iter; 486 struct sysfs_entry *iter;
476 int ret; 487 int ret;
477 488
478 for (iter = bin_file_entries; iter->name; iter++) { 489 for (iter = bin_file_entries; iter->name; iter++) {
479 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) 490 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
480 continue; 491 continue;
481 492
482 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 493 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
483 iter->attr); 494 iter->attr);
484 if (ret) 495 if (ret)
485 qla_printk(KERN_INFO, ha, 496 qla_printk(KERN_INFO, vha->hw,
486 "Unable to create sysfs %s binary attribute " 497 "Unable to create sysfs %s binary attribute "
487 "(%d).\n", iter->name, ret); 498 "(%d).\n", iter->name, ret);
488 } 499 }
489} 500}
490 501
491void 502void
492qla2x00_free_sysfs_attr(scsi_qla_host_t *ha) 503qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
493{ 504{
494 struct Scsi_Host *host = ha->host; 505 struct Scsi_Host *host = vha->host;
495 struct sysfs_entry *iter; 506 struct sysfs_entry *iter;
507 struct qla_hw_data *ha = vha->hw;
496 508
497 for (iter = bin_file_entries; iter->name; iter++) { 509 for (iter = bin_file_entries; iter->name; iter++) {
498 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) 510 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
@@ -503,7 +515,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
503 } 515 }
504 516
505 if (ha->beacon_blink_led == 1) 517 if (ha->beacon_blink_led == 1)
506 ha->isp_ops->beacon_off(ha); 518 ha->isp_ops->beacon_off(vha);
507} 519}
508 520
509/* Scsi_Host attributes. */ 521/* Scsi_Host attributes. */
@@ -519,22 +531,24 @@ static ssize_t
519qla2x00_fw_version_show(struct device *dev, 531qla2x00_fw_version_show(struct device *dev,
520 struct device_attribute *attr, char *buf) 532 struct device_attribute *attr, char *buf)
521{ 533{
522 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 534 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
523 char fw_str[30]; 535 struct qla_hw_data *ha = vha->hw;
536 char fw_str[128];
524 537
525 return snprintf(buf, PAGE_SIZE, "%s\n", 538 return snprintf(buf, PAGE_SIZE, "%s\n",
526 ha->isp_ops->fw_version_str(ha, fw_str)); 539 ha->isp_ops->fw_version_str(vha, fw_str));
527} 540}
528 541
529static ssize_t 542static ssize_t
530qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, 543qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
531 char *buf) 544 char *buf)
532{ 545{
533 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 546 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
547 struct qla_hw_data *ha = vha->hw;
534 uint32_t sn; 548 uint32_t sn;
535 549
536 if (IS_FWI2_CAPABLE(ha)) { 550 if (IS_FWI2_CAPABLE(ha)) {
537 qla2xxx_get_vpd_field(ha, "SN", buf, PAGE_SIZE); 551 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
538 return snprintf(buf, PAGE_SIZE, "%s\n", buf); 552 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
539 } 553 }
540 554
@@ -547,15 +561,16 @@ static ssize_t
547qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, 561qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
548 char *buf) 562 char *buf)
549{ 563{
550 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 564 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
551 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", ha->pdev->device); 565 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
552} 566}
553 567
554static ssize_t 568static ssize_t
555qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, 569qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
556 char *buf) 570 char *buf)
557{ 571{
558 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 572 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
573 struct qla_hw_data *ha = vha->hw;
559 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", 574 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
560 ha->product_id[0], ha->product_id[1], ha->product_id[2], 575 ha->product_id[0], ha->product_id[1], ha->product_id[2],
561 ha->product_id[3]); 576 ha->product_id[3]);
@@ -565,43 +580,44 @@ static ssize_t
565qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, 580qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
566 char *buf) 581 char *buf)
567{ 582{
568 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 583 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
569 return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_number); 584 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
570} 585}
571 586
572static ssize_t 587static ssize_t
573qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, 588qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
574 char *buf) 589 char *buf)
575{ 590{
576 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 591 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
577 return snprintf(buf, PAGE_SIZE, "%s\n", 592 return snprintf(buf, PAGE_SIZE, "%s\n",
578 ha->model_desc ? ha->model_desc: ""); 593 vha->hw->model_desc ? vha->hw->model_desc : "");
579} 594}
580 595
581static ssize_t 596static ssize_t
582qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, 597qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
583 char *buf) 598 char *buf)
584{ 599{
585 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 600 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
586 char pci_info[30]; 601 char pci_info[30];
587 602
588 return snprintf(buf, PAGE_SIZE, "%s\n", 603 return snprintf(buf, PAGE_SIZE, "%s\n",
589 ha->isp_ops->pci_info_str(ha, pci_info)); 604 vha->hw->isp_ops->pci_info_str(vha, pci_info));
590} 605}
591 606
592static ssize_t 607static ssize_t
593qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, 608qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
594 char *buf) 609 char *buf)
595{ 610{
596 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 611 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
612 struct qla_hw_data *ha = vha->hw;
597 int len = 0; 613 int len = 0;
598 614
599 if (atomic_read(&ha->loop_state) == LOOP_DOWN || 615 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
600 atomic_read(&ha->loop_state) == LOOP_DEAD) 616 atomic_read(&vha->loop_state) == LOOP_DEAD)
601 len = snprintf(buf, PAGE_SIZE, "Link Down\n"); 617 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
602 else if (atomic_read(&ha->loop_state) != LOOP_READY || 618 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
603 test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) || 619 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
604 test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) 620 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
605 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n"); 621 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
606 else { 622 else {
607 len = snprintf(buf, PAGE_SIZE, "Link Up - "); 623 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
@@ -632,10 +648,10 @@ static ssize_t
632qla2x00_zio_show(struct device *dev, struct device_attribute *attr, 648qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
633 char *buf) 649 char *buf)
634{ 650{
635 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 651 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
636 int len = 0; 652 int len = 0;
637 653
638 switch (ha->zio_mode) { 654 switch (vha->hw->zio_mode) {
639 case QLA_ZIO_MODE_6: 655 case QLA_ZIO_MODE_6:
640 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); 656 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
641 break; 657 break;
@@ -650,7 +666,8 @@ static ssize_t
650qla2x00_zio_store(struct device *dev, struct device_attribute *attr, 666qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
651 const char *buf, size_t count) 667 const char *buf, size_t count)
652{ 668{
653 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 669 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
670 struct qla_hw_data *ha = vha->hw;
654 int val = 0; 671 int val = 0;
655 uint16_t zio_mode; 672 uint16_t zio_mode;
656 673
@@ -668,7 +685,7 @@ qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
668 /* Update per-hba values and queue a reset. */ 685 /* Update per-hba values and queue a reset. */
669 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) { 686 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
670 ha->zio_mode = zio_mode; 687 ha->zio_mode = zio_mode;
671 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 688 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
672 } 689 }
673 return strlen(buf); 690 return strlen(buf);
674} 691}
@@ -677,16 +694,16 @@ static ssize_t
677qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, 694qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
678 char *buf) 695 char *buf)
679{ 696{
680 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 697 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
681 698
682 return snprintf(buf, PAGE_SIZE, "%d us\n", ha->zio_timer * 100); 699 return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
683} 700}
684 701
685static ssize_t 702static ssize_t
686qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr, 703qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
687 const char *buf, size_t count) 704 const char *buf, size_t count)
688{ 705{
689 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 706 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
690 int val = 0; 707 int val = 0;
691 uint16_t zio_timer; 708 uint16_t zio_timer;
692 709
@@ -696,7 +713,7 @@ qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
696 return -ERANGE; 713 return -ERANGE;
697 714
698 zio_timer = (uint16_t)(val / 100); 715 zio_timer = (uint16_t)(val / 100);
699 ha->zio_timer = zio_timer; 716 vha->hw->zio_timer = zio_timer;
700 717
701 return strlen(buf); 718 return strlen(buf);
702} 719}
@@ -705,10 +722,10 @@ static ssize_t
705qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, 722qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
706 char *buf) 723 char *buf)
707{ 724{
708 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 725 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
709 int len = 0; 726 int len = 0;
710 727
711 if (ha->beacon_blink_led) 728 if (vha->hw->beacon_blink_led)
712 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); 729 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
713 else 730 else
714 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 731 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
@@ -719,14 +736,15 @@ static ssize_t
719qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, 736qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
720 const char *buf, size_t count) 737 const char *buf, size_t count)
721{ 738{
722 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 739 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
740 struct qla_hw_data *ha = vha->hw;
723 int val = 0; 741 int val = 0;
724 int rval; 742 int rval;
725 743
726 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 744 if (IS_QLA2100(ha) || IS_QLA2200(ha))
727 return -EPERM; 745 return -EPERM;
728 746
729 if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) { 747 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
730 qla_printk(KERN_WARNING, ha, 748 qla_printk(KERN_WARNING, ha,
731 "Abort ISP active -- ignoring beacon request.\n"); 749 "Abort ISP active -- ignoring beacon request.\n");
732 return -EBUSY; 750 return -EBUSY;
@@ -736,9 +754,9 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
736 return -EINVAL; 754 return -EINVAL;
737 755
738 if (val) 756 if (val)
739 rval = ha->isp_ops->beacon_on(ha); 757 rval = ha->isp_ops->beacon_on(vha);
740 else 758 else
741 rval = ha->isp_ops->beacon_off(ha); 759 rval = ha->isp_ops->beacon_off(vha);
742 760
743 if (rval != QLA_SUCCESS) 761 if (rval != QLA_SUCCESS)
744 count = 0; 762 count = 0;
@@ -750,8 +768,8 @@ static ssize_t
750qla2x00_optrom_bios_version_show(struct device *dev, 768qla2x00_optrom_bios_version_show(struct device *dev,
751 struct device_attribute *attr, char *buf) 769 struct device_attribute *attr, char *buf)
752{ 770{
753 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 771 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
754 772 struct qla_hw_data *ha = vha->hw;
755 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], 773 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
756 ha->bios_revision[0]); 774 ha->bios_revision[0]);
757} 775}
@@ -760,8 +778,8 @@ static ssize_t
760qla2x00_optrom_efi_version_show(struct device *dev, 778qla2x00_optrom_efi_version_show(struct device *dev,
761 struct device_attribute *attr, char *buf) 779 struct device_attribute *attr, char *buf)
762{ 780{
763 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 781 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
764 782 struct qla_hw_data *ha = vha->hw;
765 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], 783 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
766 ha->efi_revision[0]); 784 ha->efi_revision[0]);
767} 785}
@@ -770,8 +788,8 @@ static ssize_t
770qla2x00_optrom_fcode_version_show(struct device *dev, 788qla2x00_optrom_fcode_version_show(struct device *dev,
771 struct device_attribute *attr, char *buf) 789 struct device_attribute *attr, char *buf)
772{ 790{
773 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 791 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
774 792 struct qla_hw_data *ha = vha->hw;
775 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], 793 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
776 ha->fcode_revision[0]); 794 ha->fcode_revision[0]);
777} 795}
@@ -780,8 +798,8 @@ static ssize_t
780qla2x00_optrom_fw_version_show(struct device *dev, 798qla2x00_optrom_fw_version_show(struct device *dev,
781 struct device_attribute *attr, char *buf) 799 struct device_attribute *attr, char *buf)
782{ 800{
783 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 801 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
784 802 struct qla_hw_data *ha = vha->hw;
785 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", 803 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
786 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], 804 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
787 ha->fw_revision[3]); 805 ha->fw_revision[3]);
@@ -791,8 +809,8 @@ static ssize_t
791qla2x00_total_isp_aborts_show(struct device *dev, 809qla2x00_total_isp_aborts_show(struct device *dev,
792 struct device_attribute *attr, char *buf) 810 struct device_attribute *attr, char *buf)
793{ 811{
794 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 812 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
795 813 struct qla_hw_data *ha = vha->hw;
796 return snprintf(buf, PAGE_SIZE, "%d\n", 814 return snprintf(buf, PAGE_SIZE, "%d\n",
797 ha->qla_stats.total_isp_aborts); 815 ha->qla_stats.total_isp_aborts);
798} 816}
@@ -848,16 +866,17 @@ struct device_attribute *qla2x00_host_attrs[] = {
848static void 866static void
849qla2x00_get_host_port_id(struct Scsi_Host *shost) 867qla2x00_get_host_port_id(struct Scsi_Host *shost)
850{ 868{
851 scsi_qla_host_t *ha = shost_priv(shost); 869 scsi_qla_host_t *vha = shost_priv(shost);
852 870
853 fc_host_port_id(shost) = ha->d_id.b.domain << 16 | 871 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
854 ha->d_id.b.area << 8 | ha->d_id.b.al_pa; 872 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
855} 873}
856 874
857static void 875static void
858qla2x00_get_host_speed(struct Scsi_Host *shost) 876qla2x00_get_host_speed(struct Scsi_Host *shost)
859{ 877{
860 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 878 struct qla_hw_data *ha = ((struct scsi_qla_host *)
879 (shost_priv(shost)))->hw;
861 u32 speed = FC_PORTSPEED_UNKNOWN; 880 u32 speed = FC_PORTSPEED_UNKNOWN;
862 881
863 switch (ha->link_data_rate) { 882 switch (ha->link_data_rate) {
@@ -880,14 +899,14 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
880static void 899static void
881qla2x00_get_host_port_type(struct Scsi_Host *shost) 900qla2x00_get_host_port_type(struct Scsi_Host *shost)
882{ 901{
883 scsi_qla_host_t *ha = shost_priv(shost); 902 scsi_qla_host_t *vha = shost_priv(shost);
884 uint32_t port_type = FC_PORTTYPE_UNKNOWN; 903 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
885 904
886 if (ha->parent) { 905 if (vha->vp_idx) {
887 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 906 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
888 return; 907 return;
889 } 908 }
890 switch (ha->current_topology) { 909 switch (vha->hw->current_topology) {
891 case ISP_CFG_NL: 910 case ISP_CFG_NL:
892 port_type = FC_PORTTYPE_LPORT; 911 port_type = FC_PORTTYPE_LPORT;
893 break; 912 break;
@@ -908,11 +927,11 @@ static void
908qla2x00_get_starget_node_name(struct scsi_target *starget) 927qla2x00_get_starget_node_name(struct scsi_target *starget)
909{ 928{
910 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 929 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
911 scsi_qla_host_t *ha = shost_priv(host); 930 scsi_qla_host_t *vha = shost_priv(host);
912 fc_port_t *fcport; 931 fc_port_t *fcport;
913 u64 node_name = 0; 932 u64 node_name = 0;
914 933
915 list_for_each_entry(fcport, &ha->fcports, list) { 934 list_for_each_entry(fcport, &vha->vp_fcports, list) {
916 if (fcport->rport && 935 if (fcport->rport &&
917 starget->id == fcport->rport->scsi_target_id) { 936 starget->id == fcport->rport->scsi_target_id) {
918 node_name = wwn_to_u64(fcport->node_name); 937 node_name = wwn_to_u64(fcport->node_name);
@@ -927,11 +946,11 @@ static void
927qla2x00_get_starget_port_name(struct scsi_target *starget) 946qla2x00_get_starget_port_name(struct scsi_target *starget)
928{ 947{
929 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 948 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
930 scsi_qla_host_t *ha = shost_priv(host); 949 scsi_qla_host_t *vha = shost_priv(host);
931 fc_port_t *fcport; 950 fc_port_t *fcport;
932 u64 port_name = 0; 951 u64 port_name = 0;
933 952
934 list_for_each_entry(fcport, &ha->fcports, list) { 953 list_for_each_entry(fcport, &vha->vp_fcports, list) {
935 if (fcport->rport && 954 if (fcport->rport &&
936 starget->id == fcport->rport->scsi_target_id) { 955 starget->id == fcport->rport->scsi_target_id) {
937 port_name = wwn_to_u64(fcport->port_name); 956 port_name = wwn_to_u64(fcport->port_name);
@@ -946,11 +965,11 @@ static void
946qla2x00_get_starget_port_id(struct scsi_target *starget) 965qla2x00_get_starget_port_id(struct scsi_target *starget)
947{ 966{
948 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 967 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
949 scsi_qla_host_t *ha = shost_priv(host); 968 scsi_qla_host_t *vha = shost_priv(host);
950 fc_port_t *fcport; 969 fc_port_t *fcport;
951 uint32_t port_id = ~0U; 970 uint32_t port_id = ~0U;
952 971
953 list_for_each_entry(fcport, &ha->fcports, list) { 972 list_for_each_entry(fcport, &vha->vp_fcports, list) {
954 if (fcport->rport && 973 if (fcport->rport &&
955 starget->id == fcport->rport->scsi_target_id) { 974 starget->id == fcport->rport->scsi_target_id) {
956 port_id = fcport->d_id.b.domain << 16 | 975 port_id = fcport->d_id.b.domain << 16 |
@@ -999,9 +1018,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
999 * final cleanup of firmware resources (PCBs and XCBs). 1018 * final cleanup of firmware resources (PCBs and XCBs).
1000 */ 1019 */
1001 if (fcport->loop_id != FC_NO_LOOP_ID) { 1020 if (fcport->loop_id != FC_NO_LOOP_ID) {
1002 fcport->ha->isp_ops->fabric_logout(fcport->ha, fcport->loop_id, 1021 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1003 fcport->d_id.b.domain, fcport->d_id.b.area, 1022 fcport->loop_id, fcport->d_id.b.domain,
1004 fcport->d_id.b.al_pa); 1023 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1005 fcport->loop_id = FC_NO_LOOP_ID; 1024 fcport->loop_id = FC_NO_LOOP_ID;
1006 } 1025 }
1007 1026
@@ -1011,16 +1030,18 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1011static int 1030static int
1012qla2x00_issue_lip(struct Scsi_Host *shost) 1031qla2x00_issue_lip(struct Scsi_Host *shost)
1013{ 1032{
1014 scsi_qla_host_t *ha = shost_priv(shost); 1033 scsi_qla_host_t *vha = shost_priv(shost);
1015 1034
1016 qla2x00_loop_reset(ha); 1035 qla2x00_loop_reset(vha);
1017 return 0; 1036 return 0;
1018} 1037}
1019 1038
1020static struct fc_host_statistics * 1039static struct fc_host_statistics *
1021qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 1040qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1022{ 1041{
1023 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 1042 scsi_qla_host_t *vha = shost_priv(shost);
1043 struct qla_hw_data *ha = vha->hw;
1044 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1024 int rval; 1045 int rval;
1025 struct link_statistics *stats; 1046 struct link_statistics *stats;
1026 dma_addr_t stats_dma; 1047 dma_addr_t stats_dma;
@@ -1032,21 +1053,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1032 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1053 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1033 if (stats == NULL) { 1054 if (stats == NULL) {
1034 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1055 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
1035 __func__, ha->host_no)); 1056 __func__, base_vha->host_no));
1036 goto done; 1057 goto done;
1037 } 1058 }
1038 memset(stats, 0, DMA_POOL_SIZE); 1059 memset(stats, 0, DMA_POOL_SIZE);
1039 1060
1040 rval = QLA_FUNCTION_FAILED; 1061 rval = QLA_FUNCTION_FAILED;
1041 if (IS_FWI2_CAPABLE(ha)) { 1062 if (IS_FWI2_CAPABLE(ha)) {
1042 rval = qla24xx_get_isp_stats(ha, stats, stats_dma); 1063 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1043 } else if (atomic_read(&ha->loop_state) == LOOP_READY && 1064 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1044 !test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) && 1065 !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1045 !test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) && 1066 !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1046 !ha->dpc_active) { 1067 !ha->dpc_active) {
1047 /* Must be in a 'READY' state for statistics retrieval. */ 1068 /* Must be in a 'READY' state for statistics retrieval. */
1048 rval = qla2x00_get_link_status(ha, ha->loop_id, stats, 1069 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1049 stats_dma); 1070 stats, stats_dma);
1050 } 1071 }
1051 1072
1052 if (rval != QLA_SUCCESS) 1073 if (rval != QLA_SUCCESS)
@@ -1077,29 +1098,29 @@ done:
1077static void 1098static void
1078qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) 1099qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1079{ 1100{
1080 scsi_qla_host_t *ha = shost_priv(shost); 1101 scsi_qla_host_t *vha = shost_priv(shost);
1081 1102
1082 qla2x00_get_sym_node_name(ha, fc_host_symbolic_name(shost)); 1103 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1083} 1104}
1084 1105
1085static void 1106static void
1086qla2x00_set_host_system_hostname(struct Scsi_Host *shost) 1107qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1087{ 1108{
1088 scsi_qla_host_t *ha = shost_priv(shost); 1109 scsi_qla_host_t *vha = shost_priv(shost);
1089 1110
1090 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 1111 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1091} 1112}
1092 1113
1093static void 1114static void
1094qla2x00_get_host_fabric_name(struct Scsi_Host *shost) 1115qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1095{ 1116{
1096 scsi_qla_host_t *ha = shost_priv(shost); 1117 scsi_qla_host_t *vha = shost_priv(shost);
1097 u64 node_name; 1118 u64 node_name;
1098 1119
1099 if (ha->device_flags & SWITCH_FOUND) 1120 if (vha->device_flags & SWITCH_FOUND)
1100 node_name = wwn_to_u64(ha->fabric_node_name); 1121 node_name = wwn_to_u64(vha->fabric_node_name);
1101 else 1122 else
1102 node_name = wwn_to_u64(ha->node_name); 1123 node_name = wwn_to_u64(vha->node_name);
1103 1124
1104 fc_host_fabric_name(shost) = node_name; 1125 fc_host_fabric_name(shost) = node_name;
1105} 1126}
@@ -1107,11 +1128,12 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1107static void 1128static void
1108qla2x00_get_host_port_state(struct Scsi_Host *shost) 1129qla2x00_get_host_port_state(struct Scsi_Host *shost)
1109{ 1130{
1110 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 1131 scsi_qla_host_t *vha = shost_priv(shost);
1132 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1111 1133
1112 if (!ha->flags.online) 1134 if (!base_vha->flags.online)
1113 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1135 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1114 else if (atomic_read(&ha->loop_state) == LOOP_TIMEOUT) 1136 else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
1115 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 1137 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1116 else 1138 else
1117 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 1139 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
@@ -1121,8 +1143,11 @@ static int
1121qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1143qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1122{ 1144{
1123 int ret = 0; 1145 int ret = 0;
1124 scsi_qla_host_t *ha = shost_priv(fc_vport->shost); 1146 int cnt = 0;
1125 scsi_qla_host_t *vha; 1147 uint8_t qos = QLA_DEFAULT_QUE_QOS;
1148 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1149 scsi_qla_host_t *vha = NULL;
1150 struct qla_hw_data *ha = base_vha->hw;
1126 1151
1127 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1152 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1128 if (ret) { 1153 if (ret) {
@@ -1144,18 +1169,19 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1144 atomic_set(&vha->vp_state, VP_FAILED); 1169 atomic_set(&vha->vp_state, VP_FAILED);
1145 1170
1146 /* ready to create vport */ 1171 /* ready to create vport */
1147 qla_printk(KERN_INFO, vha, "VP entry id %d assigned.\n", vha->vp_idx); 1172 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
1173 vha->vp_idx);
1148 1174
1149 /* initialized vport states */ 1175 /* initialized vport states */
1150 atomic_set(&vha->loop_state, LOOP_DOWN); 1176 atomic_set(&vha->loop_state, LOOP_DOWN);
1151 vha->vp_err_state= VP_ERR_PORTDWN; 1177 vha->vp_err_state= VP_ERR_PORTDWN;
1152 vha->vp_prev_err_state= VP_ERR_UNKWN; 1178 vha->vp_prev_err_state= VP_ERR_UNKWN;
1153 /* Check if physical ha port is Up */ 1179 /* Check if physical ha port is Up */
1154 if (atomic_read(&ha->loop_state) == LOOP_DOWN || 1180 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1155 atomic_read(&ha->loop_state) == LOOP_DEAD) { 1181 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1156 /* Don't retry or attempt login of this virtual port */ 1182 /* Don't retry or attempt login of this virtual port */
1157 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n", 1183 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
1158 vha->host_no)); 1184 base_vha->host_no));
1159 atomic_set(&vha->loop_state, LOOP_DEAD); 1185 atomic_set(&vha->loop_state, LOOP_DEAD);
1160 if (!disable) 1186 if (!disable)
1161 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1187 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@@ -1171,18 +1197,32 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1171 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1197 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1172 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1198 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1173 fc_host_supported_classes(vha->host) = 1199 fc_host_supported_classes(vha->host) =
1174 fc_host_supported_classes(ha->host); 1200 fc_host_supported_classes(base_vha->host);
1175 fc_host_supported_speeds(vha->host) = 1201 fc_host_supported_speeds(vha->host) =
1176 fc_host_supported_speeds(ha->host); 1202 fc_host_supported_speeds(base_vha->host);
1177 1203
1178 qla24xx_vport_disable(fc_vport, disable); 1204 qla24xx_vport_disable(fc_vport, disable);
1179 1205
1206 /* Create a queue pair for the vport */
1207 if (ha->mqenable) {
1208 if (ha->npiv_info) {
1209 for (; cnt < ha->nvram_npiv_size; cnt++) {
1210 if (ha->npiv_info[cnt].port_name ==
1211 vha->port_name &&
1212 ha->npiv_info[cnt].node_name ==
1213 vha->node_name) {
1214 qos = ha->npiv_info[cnt].q_qos;
1215 break;
1216 }
1217 }
1218 }
1219 qla25xx_create_queues(vha, qos);
1220 }
1221
1180 return 0; 1222 return 0;
1181vport_create_failed_2: 1223vport_create_failed_2:
1182 qla24xx_disable_vp(vha); 1224 qla24xx_disable_vp(vha);
1183 qla24xx_deallocate_vp_id(vha); 1225 qla24xx_deallocate_vp_id(vha);
1184 kfree(vha->port_name);
1185 kfree(vha->node_name);
1186 scsi_host_put(vha->host); 1226 scsi_host_put(vha->host);
1187 return FC_VPORT_FAILED; 1227 return FC_VPORT_FAILED;
1188} 1228}
@@ -1191,17 +1231,34 @@ static int
1191qla24xx_vport_delete(struct fc_vport *fc_vport) 1231qla24xx_vport_delete(struct fc_vport *fc_vport)
1192{ 1232{
1193 scsi_qla_host_t *vha = fc_vport->dd_data; 1233 scsi_qla_host_t *vha = fc_vport->dd_data;
1194 scsi_qla_host_t *pha = to_qla_parent(vha); 1234 fc_port_t *fcport, *tfcport;
1235 struct qla_hw_data *ha = vha->hw;
1236 uint16_t id = vha->vp_idx;
1195 1237
1196 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || 1238 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1197 test_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags)) 1239 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1198 msleep(1000); 1240 msleep(1000);
1199 1241
1242 if (ha->mqenable) {
1243 if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS)
1244 qla_printk(KERN_WARNING, ha,
1245 "Queue delete failed.\n");
1246 vha->req_ques[0] = ha->req_q_map[0]->id;
1247 }
1248
1200 qla24xx_disable_vp(vha); 1249 qla24xx_disable_vp(vha);
1201 qla24xx_deallocate_vp_id(vha);
1202 1250
1203 kfree(vha->node_name); 1251 fc_remove_host(vha->host);
1204 kfree(vha->port_name); 1252
1253 scsi_remove_host(vha->host);
1254
1255 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
1256 list_del(&fcport->list);
1257 kfree(fcport);
1258 fcport = NULL;
1259 }
1260
1261 qla24xx_deallocate_vp_id(vha);
1205 1262
1206 if (vha->timer_active) { 1263 if (vha->timer_active) {
1207 qla2x00_vp_stop_timer(vha); 1264 qla2x00_vp_stop_timer(vha);
@@ -1210,12 +1267,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1210 vha->host_no, vha->vp_idx, vha)); 1267 vha->host_no, vha->vp_idx, vha));
1211 } 1268 }
1212 1269
1213 fc_remove_host(vha->host);
1214
1215 scsi_remove_host(vha->host);
1216
1217 scsi_host_put(vha->host); 1270 scsi_host_put(vha->host);
1218 1271 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
1219 return 0; 1272 return 0;
1220} 1273}
1221 1274
@@ -1318,15 +1371,16 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
1318}; 1371};
1319 1372
1320void 1373void
1321qla2x00_init_host_attr(scsi_qla_host_t *ha) 1374qla2x00_init_host_attr(scsi_qla_host_t *vha)
1322{ 1375{
1376 struct qla_hw_data *ha = vha->hw;
1323 u32 speed = FC_PORTSPEED_UNKNOWN; 1377 u32 speed = FC_PORTSPEED_UNKNOWN;
1324 1378
1325 fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name); 1379 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1326 fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name); 1380 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1327 fc_host_supported_classes(ha->host) = FC_COS_CLASS3; 1381 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
1328 fc_host_max_npiv_vports(ha->host) = ha->max_npiv_vports;; 1382 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
1329 fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count; 1383 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
1330 1384
1331 if (IS_QLA25XX(ha)) 1385 if (IS_QLA25XX(ha))
1332 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1386 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
@@ -1338,5 +1392,5 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
1338 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1392 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
1339 else 1393 else
1340 speed = FC_PORTSPEED_1GBIT; 1394 speed = FC_PORTSPEED_1GBIT;
1341 fc_host_supported_speeds(ha->host) = speed; 1395 fc_host_supported_speeds(vha->host) = speed;
1342} 1396}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 510ba64bc286..1cf77772623b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -9,7 +9,7 @@
9#include <linux/delay.h> 9#include <linux/delay.h>
10 10
11static inline void 11static inline void
12qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump) 12qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
13{ 13{
14 fw_dump->fw_major_version = htonl(ha->fw_major_version); 14 fw_dump->fw_major_version = htonl(ha->fw_major_version);
15 fw_dump->fw_minor_version = htonl(ha->fw_minor_version); 15 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
@@ -23,22 +23,24 @@ qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump)
23} 23}
24 24
25static inline void * 25static inline void *
26qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr) 26qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
27{ 27{
28 struct req_que *req = ha->req_q_map[0];
29 struct rsp_que *rsp = ha->rsp_q_map[0];
28 /* Request queue. */ 30 /* Request queue. */
29 memcpy(ptr, ha->request_ring, ha->request_q_length * 31 memcpy(ptr, req->ring, req->length *
30 sizeof(request_t)); 32 sizeof(request_t));
31 33
32 /* Response queue. */ 34 /* Response queue. */
33 ptr += ha->request_q_length * sizeof(request_t); 35 ptr += req->length * sizeof(request_t);
34 memcpy(ptr, ha->response_ring, ha->response_q_length * 36 memcpy(ptr, rsp->ring, rsp->length *
35 sizeof(response_t)); 37 sizeof(response_t));
36 38
37 return ptr + (ha->response_q_length * sizeof(response_t)); 39 return ptr + (rsp->length * sizeof(response_t));
38} 40}
39 41
40static int 42static int
41qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram, 43qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
42 uint32_t ram_dwords, void **nxt) 44 uint32_t ram_dwords, void **nxt)
43{ 45{
44 int rval; 46 int rval;
@@ -112,7 +114,7 @@ qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
112} 114}
113 115
114static int 116static int
115qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram, 117qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
116 uint32_t cram_size, void **nxt) 118 uint32_t cram_size, void **nxt)
117{ 119{
118 int rval; 120 int rval;
@@ -163,7 +165,7 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
163} 165}
164 166
165static int 167static int
166qla24xx_soft_reset(scsi_qla_host_t *ha) 168qla24xx_soft_reset(struct qla_hw_data *ha)
167{ 169{
168 int rval = QLA_SUCCESS; 170 int rval = QLA_SUCCESS;
169 uint32_t cnt; 171 uint32_t cnt;
@@ -215,8 +217,8 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
215} 217}
216 218
217static int 219static int
218qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram, 220qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
219 uint32_t ram_words, void **nxt) 221 uint16_t ram_words, void **nxt)
220{ 222{
221 int rval; 223 int rval;
222 uint32_t cnt, stat, timer, words, idx; 224 uint32_t cnt, stat, timer, words, idx;
@@ -314,16 +316,17 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
314 * @hardware_locked: Called with the hardware_lock 316 * @hardware_locked: Called with the hardware_lock
315 */ 317 */
316void 318void
317qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 319qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
318{ 320{
319 int rval; 321 int rval;
320 uint32_t cnt; 322 uint32_t cnt;
321 323 struct qla_hw_data *ha = vha->hw;
322 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 324 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
323 uint16_t __iomem *dmp_reg; 325 uint16_t __iomem *dmp_reg;
324 unsigned long flags; 326 unsigned long flags;
325 struct qla2300_fw_dump *fw; 327 struct qla2300_fw_dump *fw;
326 void *nxt; 328 void *nxt;
329 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
327 330
328 flags = 0; 331 flags = 0;
329 332
@@ -468,7 +471,7 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
468 } else { 471 } else {
469 qla_printk(KERN_INFO, ha, 472 qla_printk(KERN_INFO, ha,
470 "Firmware dump saved to temp buffer (%ld/%p).\n", 473 "Firmware dump saved to temp buffer (%ld/%p).\n",
471 ha->host_no, ha->fw_dump); 474 base_vha->host_no, ha->fw_dump);
472 ha->fw_dumped = 1; 475 ha->fw_dumped = 1;
473 } 476 }
474 477
@@ -483,16 +486,18 @@ qla2300_fw_dump_failed:
483 * @hardware_locked: Called with the hardware_lock 486 * @hardware_locked: Called with the hardware_lock
484 */ 487 */
485void 488void
486qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 489qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
487{ 490{
488 int rval; 491 int rval;
489 uint32_t cnt, timer; 492 uint32_t cnt, timer;
490 uint16_t risc_address; 493 uint16_t risc_address;
491 uint16_t mb0, mb2; 494 uint16_t mb0, mb2;
495 struct qla_hw_data *ha = vha->hw;
492 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 496 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
493 uint16_t __iomem *dmp_reg; 497 uint16_t __iomem *dmp_reg;
494 unsigned long flags; 498 unsigned long flags;
495 struct qla2100_fw_dump *fw; 499 struct qla2100_fw_dump *fw;
500 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
496 501
497 risc_address = 0; 502 risc_address = 0;
498 mb0 = mb2 = 0; 503 mb0 = mb2 = 0;
@@ -673,7 +678,7 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
673 } else { 678 } else {
674 qla_printk(KERN_INFO, ha, 679 qla_printk(KERN_INFO, ha,
675 "Firmware dump saved to temp buffer (%ld/%p).\n", 680 "Firmware dump saved to temp buffer (%ld/%p).\n",
676 ha->host_no, ha->fw_dump); 681 base_vha->host_no, ha->fw_dump);
677 ha->fw_dumped = 1; 682 ha->fw_dumped = 1;
678 } 683 }
679 684
@@ -683,12 +688,12 @@ qla2100_fw_dump_failed:
683} 688}
684 689
685void 690void
686qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 691qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
687{ 692{
688 int rval; 693 int rval;
689 uint32_t cnt; 694 uint32_t cnt;
690 uint32_t risc_address; 695 uint32_t risc_address;
691 696 struct qla_hw_data *ha = vha->hw;
692 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 697 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
693 uint32_t __iomem *dmp_reg; 698 uint32_t __iomem *dmp_reg;
694 uint32_t *iter_reg; 699 uint32_t *iter_reg;
@@ -697,6 +702,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
697 struct qla24xx_fw_dump *fw; 702 struct qla24xx_fw_dump *fw;
698 uint32_t ext_mem_cnt; 703 uint32_t ext_mem_cnt;
699 void *nxt; 704 void *nxt;
705 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
700 706
701 risc_address = ext_mem_cnt = 0; 707 risc_address = ext_mem_cnt = 0;
702 flags = 0; 708 flags = 0;
@@ -919,7 +925,7 @@ qla24xx_fw_dump_failed_0:
919 } else { 925 } else {
920 qla_printk(KERN_INFO, ha, 926 qla_printk(KERN_INFO, ha,
921 "Firmware dump saved to temp buffer (%ld/%p).\n", 927 "Firmware dump saved to temp buffer (%ld/%p).\n",
922 ha->host_no, ha->fw_dump); 928 base_vha->host_no, ha->fw_dump);
923 ha->fw_dumped = 1; 929 ha->fw_dumped = 1;
924 } 930 }
925 931
@@ -929,13 +935,14 @@ qla24xx_fw_dump_failed:
929} 935}
930 936
931void 937void
932qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 938qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
933{ 939{
934 int rval; 940 int rval;
935 uint32_t cnt; 941 uint32_t cnt;
936 uint32_t risc_address; 942 uint32_t risc_address;
937 943 struct qla_hw_data *ha = vha->hw;
938 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 944 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
945 struct device_reg_25xxmq __iomem *reg25;
939 uint32_t __iomem *dmp_reg; 946 uint32_t __iomem *dmp_reg;
940 uint32_t *iter_reg; 947 uint32_t *iter_reg;
941 uint16_t __iomem *mbx_reg; 948 uint16_t __iomem *mbx_reg;
@@ -944,6 +951,11 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
944 uint32_t ext_mem_cnt; 951 uint32_t ext_mem_cnt;
945 void *nxt; 952 void *nxt;
946 struct qla2xxx_fce_chain *fcec; 953 struct qla2xxx_fce_chain *fcec;
954 struct qla2xxx_mq_chain *mq = NULL;
955 uint32_t qreg_size;
956 uint8_t req_cnt, rsp_cnt, que_cnt;
957 uint32_t que_idx;
958 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
947 959
948 risc_address = ext_mem_cnt = 0; 960 risc_address = ext_mem_cnt = 0;
949 flags = 0; 961 flags = 0;
@@ -988,6 +1000,29 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
988 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); 1000 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
989 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1001 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
990 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window)); 1002 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1003
1004 /* Multi queue registers */
1005 if (ha->mqenable) {
1006 qreg_size = sizeof(struct qla2xxx_mq_chain);
1007 mq = kzalloc(qreg_size, GFP_KERNEL);
1008 if (!mq)
1009 goto qla25xx_fw_dump_failed_0;
1010 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
1011 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
1012 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
1013 mq->count = htonl(que_cnt);
1014 mq->chain_size = htonl(qreg_size);
1015 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
1016 for (cnt = 0; cnt < que_cnt; cnt++) {
1017 reg25 = (struct device_reg_25xxmq *) ((void *)
1018 ha->mqiobase + cnt * QLA_QUE_PAGE);
1019 que_idx = cnt * 4;
1020 mq->qregs[que_idx] = htonl(reg25->req_q_in);
1021 mq->qregs[que_idx+1] = htonl(reg25->req_q_out);
1022 mq->qregs[que_idx+2] = htonl(reg25->rsp_q_in);
1023 mq->qregs[que_idx+3] = htonl(reg25->rsp_q_out);
1024 }
1025 }
991 WRT_REG_DWORD(&reg->iobase_window, 0x00); 1026 WRT_REG_DWORD(&reg->iobase_window, 0x00);
992 RD_REG_DWORD(&reg->iobase_window); 1027 RD_REG_DWORD(&reg->iobase_window);
993 1028
@@ -1225,7 +1260,14 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1225 1260
1226 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1261 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1227 1262
1228 fcec = nxt + ntohl(ha->fw_dump->eft_size); 1263 if (ha->mqenable) {
1264 nxt = nxt + ntohl(ha->fw_dump->eft_size);
1265 memcpy(nxt, mq, qreg_size);
1266 kfree(mq);
1267 fcec = nxt + qreg_size;
1268 } else {
1269 fcec = nxt + ntohl(ha->fw_dump->eft_size);
1270 }
1229 fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST); 1271 fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
1230 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + 1272 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
1231 fce_calc_size(ha->fce_bufs)); 1273 fce_calc_size(ha->fce_bufs));
@@ -1248,7 +1290,7 @@ qla25xx_fw_dump_failed_0:
1248 } else { 1290 } else {
1249 qla_printk(KERN_INFO, ha, 1291 qla_printk(KERN_INFO, ha,
1250 "Firmware dump saved to temp buffer (%ld/%p).\n", 1292 "Firmware dump saved to temp buffer (%ld/%p).\n",
1251 ha->host_no, ha->fw_dump); 1293 base_vha->host_no, ha->fw_dump);
1252 ha->fw_dumped = 1; 1294 ha->fw_dumped = 1;
1253 } 1295 }
1254 1296
@@ -1256,15 +1298,15 @@ qla25xx_fw_dump_failed:
1256 if (!hardware_locked) 1298 if (!hardware_locked)
1257 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1299 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1258} 1300}
1259
1260/****************************************************************************/ 1301/****************************************************************************/
1261/* Driver Debug Functions. */ 1302/* Driver Debug Functions. */
1262/****************************************************************************/ 1303/****************************************************************************/
1263 1304
1264void 1305void
1265qla2x00_dump_regs(scsi_qla_host_t *ha) 1306qla2x00_dump_regs(scsi_qla_host_t *vha)
1266{ 1307{
1267 int i; 1308 int i;
1309 struct qla_hw_data *ha = vha->hw;
1268 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1310 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1269 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 1311 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1270 uint16_t __iomem *mbx_reg; 1312 uint16_t __iomem *mbx_reg;
@@ -1274,7 +1316,7 @@ qla2x00_dump_regs(scsi_qla_host_t *ha)
1274 1316
1275 printk("Mailbox registers:\n"); 1317 printk("Mailbox registers:\n");
1276 for (i = 0; i < 6; i++) 1318 for (i = 0; i < 6; i++)
1277 printk("scsi(%ld): mbox %d 0x%04x \n", ha->host_no, i, 1319 printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
1278 RD_REG_WORD(mbx_reg++)); 1320 RD_REG_WORD(mbx_reg++));
1279} 1321}
1280 1322
@@ -1302,3 +1344,5 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size)
1302 if (cnt % 16) 1344 if (cnt % 16)
1303 printk("\n"); 1345 printk("\n");
1304} 1346}
1347
1348
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2e9c0c097f5e..c1794a70a45f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -4,6 +4,9 @@
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7
8#include "qla_def.h"
9
7/* 10/*
8 * Driver debug definitions. 11 * Driver debug definitions.
9 */ 12 */
@@ -23,6 +26,7 @@
23/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ 26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
24/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ 27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
25/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ 28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
29/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */
26 30
27/* 31/*
28* Macros use for debugging the driver. 32* Macros use for debugging the driver.
@@ -43,6 +47,7 @@
43#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0) 47#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
44#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0) 48#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0)
45#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0) 49#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0)
50#define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0)
46 51
47#if defined(QL_DEBUG_LEVEL_3) 52#if defined(QL_DEBUG_LEVEL_3)
48#define DEBUG3(x) do {x;} while (0) 53#define DEBUG3(x) do {x;} while (0)
@@ -127,7 +132,6 @@
127#else 132#else
128#define DEBUG16(x) do {} while (0) 133#define DEBUG16(x) do {} while (0)
129#endif 134#endif
130
131/* 135/*
132 * Firmware Dump structure definition 136 * Firmware Dump structure definition
133 */ 137 */
@@ -266,8 +270,17 @@ struct qla2xxx_fce_chain {
266 uint32_t eregs[8]; 270 uint32_t eregs[8];
267}; 271};
268 272
273struct qla2xxx_mq_chain {
274 uint32_t type;
275 uint32_t chain_size;
276
277 uint32_t count;
278 uint32_t qregs[4 * QLA_MQ_SIZE];
279};
280
269#define DUMP_CHAIN_VARIANT 0x80000000 281#define DUMP_CHAIN_VARIANT 0x80000000
270#define DUMP_CHAIN_FCE 0x7FFFFAF0 282#define DUMP_CHAIN_FCE 0x7FFFFAF0
283#define DUMP_CHAIN_MQ 0x7FFFFAF1
271#define DUMP_CHAIN_LAST 0x80000000 284#define DUMP_CHAIN_LAST 0x80000000
272 285
273struct qla2xxx_fw_dump { 286struct qla2xxx_fw_dump {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b97194096d8e..a29c95204975 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -181,11 +181,14 @@
181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
183 183
184struct req_que;
185
184/* 186/*
185 * SCSI Request Block 187 * SCSI Request Block
186 */ 188 */
187typedef struct srb { 189typedef struct srb {
188 struct scsi_qla_host *ha; /* HA the SP is queued on */ 190 struct scsi_qla_host *vha; /* HA the SP is queued on */
191 struct req_que *que;
189 struct fc_port *fcport; 192 struct fc_port *fcport;
190 193
191 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 194 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
@@ -369,9 +372,17 @@ struct device_reg_2xxx {
369 } u_end; 372 } u_end;
370}; 373};
371 374
375struct device_reg_25xxmq {
376 volatile uint32_t req_q_in;
377 volatile uint32_t req_q_out;
378 volatile uint32_t rsp_q_in;
379 volatile uint32_t rsp_q_out;
380};
381
372typedef union { 382typedef union {
373 struct device_reg_2xxx isp; 383 struct device_reg_2xxx isp;
374 struct device_reg_24xx isp24; 384 struct device_reg_24xx isp24;
385 struct device_reg_25xxmq isp25mq;
375} device_reg_t; 386} device_reg_t;
376 387
377#define ISP_REQ_Q_IN(ha, reg) \ 388#define ISP_REQ_Q_IN(ha, reg) \
@@ -1524,7 +1535,7 @@ typedef struct {
1524 */ 1535 */
1525typedef struct fc_port { 1536typedef struct fc_port {
1526 struct list_head list; 1537 struct list_head list;
1527 struct scsi_qla_host *ha; 1538 struct scsi_qla_host *vha;
1528 1539
1529 uint8_t node_name[WWN_SIZE]; 1540 uint8_t node_name[WWN_SIZE];
1530 uint8_t port_name[WWN_SIZE]; 1541 uint8_t port_name[WWN_SIZE];
@@ -1550,7 +1561,6 @@ typedef struct fc_port {
1550 unsigned long last_queue_full; 1561 unsigned long last_queue_full;
1551 unsigned long last_ramp_up; 1562 unsigned long last_ramp_up;
1552 1563
1553 struct list_head vp_fcport;
1554 uint16_t vp_idx; 1564 uint16_t vp_idx;
1555} fc_port_t; 1565} fc_port_t;
1556 1566
@@ -2037,6 +2047,8 @@ typedef struct vport_params {
2037#define VP_RET_CODE_NO_MEM 5 2047#define VP_RET_CODE_NO_MEM 5
2038#define VP_RET_CODE_NOT_FOUND 6 2048#define VP_RET_CODE_NOT_FOUND 6
2039 2049
2050struct qla_hw_data;
2051
2040/* 2052/*
2041 * ISP operations 2053 * ISP operations
2042 */ 2054 */
@@ -2055,10 +2067,11 @@ struct isp_operations {
2055 char * (*fw_version_str) (struct scsi_qla_host *, char *); 2067 char * (*fw_version_str) (struct scsi_qla_host *, char *);
2056 2068
2057 irq_handler_t intr_handler; 2069 irq_handler_t intr_handler;
2058 void (*enable_intrs) (struct scsi_qla_host *); 2070 void (*enable_intrs) (struct qla_hw_data *);
2059 void (*disable_intrs) (struct scsi_qla_host *); 2071 void (*disable_intrs) (struct qla_hw_data *);
2060 2072
2061 int (*abort_command) (struct scsi_qla_host *, srb_t *); 2073 int (*abort_command) (struct scsi_qla_host *, srb_t *,
2074 struct req_que *);
2062 int (*target_reset) (struct fc_port *, unsigned int); 2075 int (*target_reset) (struct fc_port *, unsigned int);
2063 int (*lun_reset) (struct fc_port *, unsigned int); 2076 int (*lun_reset) (struct fc_port *, unsigned int);
2064 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, 2077 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2089,6 +2102,10 @@ struct isp_operations {
2089 uint32_t); 2102 uint32_t);
2090 2103
2091 int (*get_flash_version) (struct scsi_qla_host *, void *); 2104 int (*get_flash_version) (struct scsi_qla_host *, void *);
2105 int (*start_scsi) (srb_t *);
2106 void (*wrt_req_reg) (struct qla_hw_data *, uint16_t, uint16_t);
2107 void (*wrt_rsp_reg) (struct qla_hw_data *, uint16_t, uint16_t);
2108 uint16_t (*rd_req_reg) (struct qla_hw_data *, uint16_t);
2092}; 2109};
2093 2110
2094/* MSI-X Support *************************************************************/ 2111/* MSI-X Support *************************************************************/
@@ -2100,16 +2117,18 @@ struct isp_operations {
2100#define QLA_MSIX_DEFAULT 0x00 2117#define QLA_MSIX_DEFAULT 0x00
2101#define QLA_MSIX_RSP_Q 0x01 2118#define QLA_MSIX_RSP_Q 0x01
2102 2119
2103#define QLA_MSIX_ENTRIES 2
2104#define QLA_MIDX_DEFAULT 0 2120#define QLA_MIDX_DEFAULT 0
2105#define QLA_MIDX_RSP_Q 1 2121#define QLA_MIDX_RSP_Q 1
2122#define QLA_PCI_MSIX_CONTROL 0xa2
2106 2123
2107struct scsi_qla_host; 2124struct scsi_qla_host;
2125struct rsp_que;
2108 2126
2109struct qla_msix_entry { 2127struct qla_msix_entry {
2110 int have_irq; 2128 int have_irq;
2111 uint32_t msix_vector; 2129 uint32_t vector;
2112 uint16_t msix_entry; 2130 uint16_t entry;
2131 struct rsp_que *rsp;
2113}; 2132};
2114 2133
2115#define WATCH_INTERVAL 1 /* number of seconds */ 2134#define WATCH_INTERVAL 1 /* number of seconds */
@@ -2160,208 +2179,137 @@ struct qla_statistics {
2160 uint64_t output_bytes; 2179 uint64_t output_bytes;
2161}; 2180};
2162 2181
2163/* 2182/* Multi queue support */
2164 * Linux Host Adapter structure 2183#define MBC_INITIALIZE_MULTIQ 0x1f
2165 */ 2184#define QLA_QUE_PAGE 0X1000
2166typedef struct scsi_qla_host { 2185#define QLA_MQ_SIZE 32
2167 struct list_head list; 2186#define QLA_MAX_HOST_QUES 16
2187#define QLA_MAX_QUEUES 256
2188#define ISP_QUE_REG(ha, id) \
2189 ((ha->mqenable) ? \
2190 ((void *)(ha->mqiobase) +\
2191 (QLA_QUE_PAGE * id)) :\
2192 ((void *)(ha->iobase)))
2193#define QLA_REQ_QUE_ID(tag) \
2194 ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
2195#define QLA_DEFAULT_QUE_QOS 5
2196#define QLA_PRECONFIG_VPORTS 32
2197#define QLA_MAX_VPORTS_QLA24XX 128
2198#define QLA_MAX_VPORTS_QLA25XX 256
2199/* Response queue data structure */
2200struct rsp_que {
2201 dma_addr_t dma;
2202 response_t *ring;
2203 response_t *ring_ptr;
2204 uint16_t ring_index;
2205 uint16_t out_ptr;
2206 uint16_t length;
2207 uint16_t options;
2208 uint16_t rid;
2209 uint16_t id;
2210 uint16_t vp_idx;
2211 struct qla_hw_data *hw;
2212 struct qla_msix_entry *msix;
2213 struct req_que *req;
2214};
2168 2215
2169 /* Commonly used flags and state information. */ 2216/* Request queue data structure */
2170 struct Scsi_Host *host; 2217struct req_que {
2171 struct pci_dev *pdev; 2218 dma_addr_t dma;
2219 request_t *ring;
2220 request_t *ring_ptr;
2221 uint16_t ring_index;
2222 uint16_t in_ptr;
2223 uint16_t cnt;
2224 uint16_t length;
2225 uint16_t options;
2226 uint16_t rid;
2227 uint16_t id;
2228 uint16_t qos;
2229 uint16_t vp_idx;
2230 struct rsp_que *rsp;
2231 srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
2232 uint32_t current_outstanding_cmd;
2233 int max_q_depth;
2234};
2172 2235
2173 unsigned long host_no; 2236/*
2237 * Qlogic host adapter specific data structure.
2238*/
2239struct qla_hw_data {
2240 struct pci_dev *pdev;
2241 /* SRB cache. */
2242#define SRB_MIN_REQ 128
2243 mempool_t *srb_mempool;
2174 2244
2175 volatile struct { 2245 volatile struct {
2176 uint32_t init_done :1;
2177 uint32_t online :1;
2178 uint32_t mbox_int :1; 2246 uint32_t mbox_int :1;
2179 uint32_t mbox_busy :1; 2247 uint32_t mbox_busy :1;
2180 uint32_t rscn_queue_overflow :1;
2181 uint32_t reset_active :1;
2182
2183 uint32_t management_server_logged_in :1;
2184 uint32_t process_response_queue :1;
2185 2248
2186 uint32_t disable_risc_code_load :1; 2249 uint32_t disable_risc_code_load :1;
2187 uint32_t enable_64bit_addressing :1; 2250 uint32_t enable_64bit_addressing :1;
2188 uint32_t enable_lip_reset :1; 2251 uint32_t enable_lip_reset :1;
2189 uint32_t enable_lip_full_login :1;
2190 uint32_t enable_target_reset :1; 2252 uint32_t enable_target_reset :1;
2253 uint32_t enable_lip_full_login :1;
2191 uint32_t enable_led_scheme :1; 2254 uint32_t enable_led_scheme :1;
2192 uint32_t inta_enabled :1; 2255 uint32_t inta_enabled :1;
2193 uint32_t msi_enabled :1; 2256 uint32_t msi_enabled :1;
2194 uint32_t msix_enabled :1; 2257 uint32_t msix_enabled :1;
2195 uint32_t disable_serdes :1; 2258 uint32_t disable_serdes :1;
2196 uint32_t gpsc_supported :1; 2259 uint32_t gpsc_supported :1;
2197 uint32_t vsan_enabled :1; 2260 uint32_t vsan_enabled :1;
2198 uint32_t npiv_supported :1; 2261 uint32_t npiv_supported :1;
2199 uint32_t fce_enabled :1; 2262 uint32_t fce_enabled :1;
2200 uint32_t hw_event_marker_found :1; 2263 uint32_t hw_event_marker_found:1;
2201 } flags; 2264 } flags;
2202 2265
2203 atomic_t loop_state;
2204#define LOOP_TIMEOUT 1
2205#define LOOP_DOWN 2
2206#define LOOP_UP 3
2207#define LOOP_UPDATE 4
2208#define LOOP_READY 5
2209#define LOOP_DEAD 6
2210
2211 unsigned long dpc_flags;
2212#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
2213#define RESET_ACTIVE 1
2214#define ISP_ABORT_NEEDED 2 /* Initiate ISP abort. */
2215#define ABORT_ISP_ACTIVE 3 /* ISP abort in progress. */
2216#define LOOP_RESYNC_NEEDED 4 /* Device Resync needed. */
2217#define LOOP_RESYNC_ACTIVE 5
2218#define LOCAL_LOOP_UPDATE 6 /* Perform a local loop update. */
2219#define RSCN_UPDATE 7 /* Perform an RSCN update. */
2220#define MAILBOX_RETRY 8
2221#define ISP_RESET_NEEDED 9 /* Initiate a ISP reset. */
2222#define FAILOVER_EVENT_NEEDED 10
2223#define FAILOVER_EVENT 11
2224#define FAILOVER_NEEDED 12
2225#define SCSI_RESTART_NEEDED 13 /* Processes SCSI retry queue. */
2226#define PORT_RESTART_NEEDED 14 /* Processes Retry queue. */
2227#define RESTART_QUEUES_NEEDED 15 /* Restarts the Lun queue. */
2228#define ABORT_QUEUES_NEEDED 16
2229#define RELOGIN_NEEDED 17
2230#define LOGIN_RETRY_NEEDED 18 /* Initiate required fabric logins. */
2231#define REGISTER_FC4_NEEDED 19 /* SNS FC4 registration required. */
2232#define ISP_ABORT_RETRY 20 /* ISP aborted. */
2233#define FCPORT_RESCAN_NEEDED 21 /* IO descriptor processing needed */
2234#define IODESC_PROCESS_NEEDED 22 /* IO descriptor processing needed */
2235#define IOCTL_ERROR_RECOVERY 23
2236#define LOOP_RESET_NEEDED 24
2237#define BEACON_BLINK_NEEDED 25
2238#define REGISTER_FDMI_NEEDED 26
2239#define FCPORT_UPDATE_NEEDED 27
2240#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
2241#define UNLOADING 29
2242#define NPIV_CONFIG_NEEDED 30
2243
2244 uint32_t device_flags;
2245#define DFLG_LOCAL_DEVICES BIT_0
2246#define DFLG_RETRY_LOCAL_DEVICES BIT_1
2247#define DFLG_FABRIC_DEVICES BIT_2
2248#define SWITCH_FOUND BIT_3
2249#define DFLG_NO_CABLE BIT_4
2250
2251#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
2252#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
2253 uint32_t device_type;
2254#define DT_ISP2100 BIT_0
2255#define DT_ISP2200 BIT_1
2256#define DT_ISP2300 BIT_2
2257#define DT_ISP2312 BIT_3
2258#define DT_ISP2322 BIT_4
2259#define DT_ISP6312 BIT_5
2260#define DT_ISP6322 BIT_6
2261#define DT_ISP2422 BIT_7
2262#define DT_ISP2432 BIT_8
2263#define DT_ISP5422 BIT_9
2264#define DT_ISP5432 BIT_10
2265#define DT_ISP2532 BIT_11
2266#define DT_ISP8432 BIT_12
2267#define DT_ISP_LAST (DT_ISP8432 << 1)
2268
2269#define DT_IIDMA BIT_26
2270#define DT_FWI2 BIT_27
2271#define DT_ZIO_SUPPORTED BIT_28
2272#define DT_OEM_001 BIT_29
2273#define DT_ISP2200A BIT_30
2274#define DT_EXTENDED_IDS BIT_31
2275
2276#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1))
2277#define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100)
2278#define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200)
2279#define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300)
2280#define IS_QLA2312(ha) (DT_MASK(ha) & DT_ISP2312)
2281#define IS_QLA2322(ha) (DT_MASK(ha) & DT_ISP2322)
2282#define IS_QLA6312(ha) (DT_MASK(ha) & DT_ISP6312)
2283#define IS_QLA6322(ha) (DT_MASK(ha) & DT_ISP6322)
2284#define IS_QLA2422(ha) (DT_MASK(ha) & DT_ISP2422)
2285#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
2286#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
2287#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
2288#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2289#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
2290
2291#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2292 IS_QLA6312(ha) || IS_QLA6322(ha))
2293#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
2294#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
2295#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2296#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2297#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2298 IS_QLA84XX(ha))
2299
2300#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2301#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
2302#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
2303#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
2304#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
2305
2306 /* SRB cache. */
2307#define SRB_MIN_REQ 128
2308 mempool_t *srb_mempool;
2309
2310 /* This spinlock is used to protect "io transactions", you must 2266 /* This spinlock is used to protect "io transactions", you must
2311 * acquire it before doing any IO to the card, eg with RD_REG*() and 2267 * acquire it before doing any IO to the card, eg with RD_REG*() and
2312 * WRT_REG*() for the duration of your entire commandtransaction. 2268 * WRT_REG*() for the duration of your entire commandtransaction.
2313 * 2269 *
2314 * This spinlock is of lower priority than the io request lock. 2270 * This spinlock is of lower priority than the io request lock.
2315 */ 2271 */
2316
2317 spinlock_t hardware_lock ____cacheline_aligned;
2318 2272
2273 spinlock_t hardware_lock ____cacheline_aligned;
2319 int bars; 2274 int bars;
2320 int mem_only; 2275 int mem_only;
2321 device_reg_t __iomem *iobase; /* Base I/O address */ 2276 device_reg_t __iomem *iobase; /* Base I/O address */
2322 resource_size_t pio_address; 2277 resource_size_t pio_address;
2323#define MIN_IOBASE_LEN 0x100
2324
2325 /* ISP ring lock, rings, and indexes */
2326 dma_addr_t request_dma; /* Physical address. */
2327 request_t *request_ring; /* Base virtual address */
2328 request_t *request_ring_ptr; /* Current address. */
2329 uint16_t req_ring_index; /* Current index. */
2330 uint16_t req_q_cnt; /* Number of available entries. */
2331 uint16_t request_q_length;
2332
2333 dma_addr_t response_dma; /* Physical address. */
2334 response_t *response_ring; /* Base virtual address */
2335 response_t *response_ring_ptr; /* Current address. */
2336 uint16_t rsp_ring_index; /* Current index. */
2337 uint16_t response_q_length;
2338
2339 struct isp_operations *isp_ops;
2340 2278
2341 /* Outstandings ISP commands. */ 2279#define MIN_IOBASE_LEN 0x100
2342 srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS]; 2280/* Multi queue data structs */
2343 uint32_t current_outstanding_cmd; 2281 device_reg_t *mqiobase;
2344 srb_t *status_srb; /* Status continuation entry. */ 2282 uint16_t msix_count;
2283 uint8_t mqenable;
2284 struct req_que **req_q_map;
2285 struct rsp_que **rsp_q_map;
2286 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2287 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2288 uint16_t max_queues;
2289 struct qla_npiv_entry *npiv_info;
2290 uint16_t nvram_npiv_size;
2291
2292 uint16_t switch_cap;
2293#define FLOGI_SEQ_DEL BIT_8
2294#define FLOGI_MID_SUPPORT BIT_10
2295#define FLOGI_VSAN_SUPPORT BIT_12
2296#define FLOGI_SP_SUPPORT BIT_13
2297 /* Timeout timers. */
2298 uint8_t loop_down_abort_time; /* port down timer */
2299 atomic_t loop_down_timer; /* loop down timer */
2300 uint8_t link_down_timeout; /* link down timeout */
2301 uint16_t max_loop_id;
2345 2302
2346 /* ISP configuration data. */
2347 uint16_t loop_id; /* Host adapter loop id */
2348 uint16_t switch_cap;
2349#define FLOGI_SEQ_DEL BIT_8
2350#define FLOGI_MID_SUPPORT BIT_10
2351#define FLOGI_VSAN_SUPPORT BIT_12
2352#define FLOGI_SP_SUPPORT BIT_13
2353 uint16_t fb_rev; 2303 uint16_t fb_rev;
2354
2355 port_id_t d_id; /* Host adapter port id */
2356 uint16_t max_public_loop_ids; 2304 uint16_t max_public_loop_ids;
2357 uint16_t min_external_loopid; /* First external loop Id */ 2305 uint16_t min_external_loopid; /* First external loop Id */
2358 2306
2359#define PORT_SPEED_UNKNOWN 0xFFFF 2307#define PORT_SPEED_UNKNOWN 0xFFFF
2360#define PORT_SPEED_1GB 0x00 2308#define PORT_SPEED_1GB 0x00
2361#define PORT_SPEED_2GB 0x01 2309#define PORT_SPEED_2GB 0x01
2362#define PORT_SPEED_4GB 0x03 2310#define PORT_SPEED_4GB 0x03
2363#define PORT_SPEED_8GB 0x04 2311#define PORT_SPEED_8GB 0x04
2364 uint16_t link_data_rate; /* F/W operating speed */ 2312 uint16_t link_data_rate; /* F/W operating speed */
2365 2313
2366 uint8_t current_topology; 2314 uint8_t current_topology;
2367 uint8_t prev_topology; 2315 uint8_t prev_topology;
@@ -2370,15 +2318,69 @@ typedef struct scsi_qla_host {
2370#define ISP_CFG_FL 4 2318#define ISP_CFG_FL 4
2371#define ISP_CFG_F 8 2319#define ISP_CFG_F 8
2372 2320
2373 uint8_t operating_mode; /* F/W operating mode */ 2321 uint8_t operating_mode; /* F/W operating mode */
2374#define LOOP 0 2322#define LOOP 0
2375#define P2P 1 2323#define P2P 1
2376#define LOOP_P2P 2 2324#define LOOP_P2P 2
2377#define P2P_LOOP 3 2325#define P2P_LOOP 3
2378
2379 uint8_t marker_needed;
2380
2381 uint8_t interrupts_on; 2326 uint8_t interrupts_on;
2327 uint32_t isp_abort_cnt;
2328
2329#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
2330#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
2331 uint32_t device_type;
2332#define DT_ISP2100 BIT_0
2333#define DT_ISP2200 BIT_1
2334#define DT_ISP2300 BIT_2
2335#define DT_ISP2312 BIT_3
2336#define DT_ISP2322 BIT_4
2337#define DT_ISP6312 BIT_5
2338#define DT_ISP6322 BIT_6
2339#define DT_ISP2422 BIT_7
2340#define DT_ISP2432 BIT_8
2341#define DT_ISP5422 BIT_9
2342#define DT_ISP5432 BIT_10
2343#define DT_ISP2532 BIT_11
2344#define DT_ISP8432 BIT_12
2345#define DT_ISP_LAST (DT_ISP8432 << 1)
2346
2347#define DT_IIDMA BIT_26
2348#define DT_FWI2 BIT_27
2349#define DT_ZIO_SUPPORTED BIT_28
2350#define DT_OEM_001 BIT_29
2351#define DT_ISP2200A BIT_30
2352#define DT_EXTENDED_IDS BIT_31
2353#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1))
2354#define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100)
2355#define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200)
2356#define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300)
2357#define IS_QLA2312(ha) (DT_MASK(ha) & DT_ISP2312)
2358#define IS_QLA2322(ha) (DT_MASK(ha) & DT_ISP2322)
2359#define IS_QLA6312(ha) (DT_MASK(ha) & DT_ISP6312)
2360#define IS_QLA6322(ha) (DT_MASK(ha) & DT_ISP6322)
2361#define IS_QLA2422(ha) (DT_MASK(ha) & DT_ISP2422)
2362#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
2363#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
2364#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
2365#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2366#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
2367
2368#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2369 IS_QLA6312(ha) || IS_QLA6322(ha))
2370#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
2371#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
2372#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2373#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2374#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2375 IS_QLA84XX(ha))
2376#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2377 IS_QLA25XX(ha))
2378
2379#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2380#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
2381#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
2382#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
2383#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
2382 2384
2383 /* HBA serial number */ 2385 /* HBA serial number */
2384 uint8_t serial0; 2386 uint8_t serial0;
@@ -2386,8 +2388,8 @@ typedef struct scsi_qla_host {
2386 uint8_t serial2; 2388 uint8_t serial2;
2387 2389
2388 /* NVRAM configuration data */ 2390 /* NVRAM configuration data */
2389#define MAX_NVRAM_SIZE 4096 2391#define MAX_NVRAM_SIZE 4096
2390#define VPD_OFFSET MAX_NVRAM_SIZE / 2 2392#define VPD_OFFSET MAX_NVRAM_SIZE / 2
2391 uint16_t nvram_size; 2393 uint16_t nvram_size;
2392 uint16_t nvram_base; 2394 uint16_t nvram_base;
2393 void *nvram; 2395 void *nvram;
@@ -2401,22 +2403,8 @@ typedef struct scsi_qla_host {
2401 uint16_t r_a_tov; 2403 uint16_t r_a_tov;
2402 int port_down_retry_count; 2404 int port_down_retry_count;
2403 uint8_t mbx_count; 2405 uint8_t mbx_count;
2404 uint16_t last_loop_id;
2405 uint16_t mgmt_svr_loop_id;
2406
2407 uint32_t login_retry_count;
2408 int max_q_depth;
2409
2410 struct list_head work_list;
2411
2412 /* Fibre Channel Device List. */
2413 struct list_head fcports;
2414
2415 /* RSCN queue. */
2416 uint32_t rscn_queue[MAX_RSCN_COUNT];
2417 uint8_t rscn_in_ptr;
2418 uint8_t rscn_out_ptr;
2419 2406
2407 uint32_t login_retry_count;
2420 /* SNS command interfaces. */ 2408 /* SNS command interfaces. */
2421 ms_iocb_entry_t *ms_iocb; 2409 ms_iocb_entry_t *ms_iocb;
2422 dma_addr_t ms_iocb_dma; 2410 dma_addr_t ms_iocb_dma;
@@ -2426,28 +2414,20 @@ typedef struct scsi_qla_host {
2426 struct sns_cmd_pkt *sns_cmd; 2414 struct sns_cmd_pkt *sns_cmd;
2427 dma_addr_t sns_cmd_dma; 2415 dma_addr_t sns_cmd_dma;
2428 2416
2429#define SFP_DEV_SIZE 256 2417#define SFP_DEV_SIZE 256
2430#define SFP_BLOCK_SIZE 64 2418#define SFP_BLOCK_SIZE 64
2431 void *sfp_data; 2419 void *sfp_data;
2432 dma_addr_t sfp_data_dma; 2420 dma_addr_t sfp_data_dma;
2433 2421
2434 struct task_struct *dpc_thread; 2422 struct task_struct *dpc_thread;
2435 uint8_t dpc_active; /* DPC routine is active */ 2423 uint8_t dpc_active; /* DPC routine is active */
2436 2424
2437 /* Timeout timers. */
2438 uint8_t loop_down_abort_time; /* port down timer */
2439 atomic_t loop_down_timer; /* loop down timer */
2440 uint8_t link_down_timeout; /* link down timeout */
2441
2442 uint32_t timer_active;
2443 struct timer_list timer;
2444
2445 dma_addr_t gid_list_dma; 2425 dma_addr_t gid_list_dma;
2446 struct gid_list_info *gid_list; 2426 struct gid_list_info *gid_list;
2447 int gid_list_info_size; 2427 int gid_list_info_size;
2448 2428
2449 /* Small DMA pool allocations -- maximum 256 bytes in length. */ 2429 /* Small DMA pool allocations -- maximum 256 bytes in length. */
2450#define DMA_POOL_SIZE 256 2430#define DMA_POOL_SIZE 256
2451 struct dma_pool *s_dma_pool; 2431 struct dma_pool *s_dma_pool;
2452 2432
2453 dma_addr_t init_cb_dma; 2433 dma_addr_t init_cb_dma;
@@ -2459,17 +2439,17 @@ typedef struct scsi_qla_host {
2459 2439
2460 mbx_cmd_t *mcp; 2440 mbx_cmd_t *mcp;
2461 unsigned long mbx_cmd_flags; 2441 unsigned long mbx_cmd_flags;
2462#define MBX_INTERRUPT 1 2442#define MBX_INTERRUPT 1
2463#define MBX_INTR_WAIT 2 2443#define MBX_INTR_WAIT 2
2464#define MBX_UPDATE_FLASH_ACTIVE 3 2444#define MBX_UPDATE_FLASH_ACTIVE 3
2465 2445
2466 struct mutex vport_lock; /* Virtual port synchronization */ 2446 struct mutex vport_lock; /* Virtual port synchronization */
2467 struct completion mbx_cmd_comp; /* Serialize mbx access */ 2447 struct completion mbx_cmd_comp; /* Serialize mbx access */
2468 struct completion mbx_intr_comp; /* Used for completion notification */ 2448 struct completion mbx_intr_comp; /* Used for completion notification */
2469 2449
2470 uint32_t mbx_flags; 2450 uint32_t mbx_flags;
2471#define MBX_IN_PROGRESS BIT_0 2451#define MBX_IN_PROGRESS BIT_0
2472#define MBX_BUSY BIT_1 /* Got the Access */ 2452#define MBX_BUSY BIT_1 /* Got the Access */
2473#define MBX_SLEEPING_ON_SEM BIT_2 2453#define MBX_SLEEPING_ON_SEM BIT_2
2474#define MBX_POLLING_FOR_COMP BIT_3 2454#define MBX_POLLING_FOR_COMP BIT_3
2475#define MBX_COMPLETED BIT_4 2455#define MBX_COMPLETED BIT_4
@@ -2488,7 +2468,7 @@ typedef struct scsi_qla_host {
2488#define RISC_START_ADDRESS_2300 0x800 2468#define RISC_START_ADDRESS_2300 0x800
2489#define RISC_START_ADDRESS_2400 0x100000 2469#define RISC_START_ADDRESS_2400 0x100000
2490 2470
2491 uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ 2471 uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
2492 uint8_t fw_seriallink_options[4]; 2472 uint8_t fw_seriallink_options[4];
2493 uint16_t fw_seriallink_options24[4]; 2473 uint16_t fw_seriallink_options24[4];
2494 2474
@@ -2509,10 +2489,10 @@ typedef struct scsi_qla_host {
2509 uint64_t fce_wr, fce_rd; 2489 uint64_t fce_wr, fce_rd;
2510 struct mutex fce_mutex; 2490 struct mutex fce_mutex;
2511 2491
2492 uint32_t hw_event_start;
2512 uint32_t hw_event_ptr; 2493 uint32_t hw_event_ptr;
2513 uint32_t hw_event_pause_errors; 2494 uint32_t hw_event_pause_errors;
2514 2495
2515 uint8_t host_str[16];
2516 uint32_t pci_attr; 2496 uint32_t pci_attr;
2517 uint16_t chip_revision; 2497 uint16_t chip_revision;
2518 2498
@@ -2523,11 +2503,6 @@ typedef struct scsi_qla_host {
2523 char model_desc[80]; 2503 char model_desc[80];
2524 uint8_t adapter_id[16+1]; 2504 uint8_t adapter_id[16+1];
2525 2505
2526 uint8_t *node_name;
2527 uint8_t *port_name;
2528 uint8_t fabric_node_name[WWN_SIZE];
2529 uint32_t isp_abort_cnt;
2530
2531 /* Option ROM information. */ 2506 /* Option ROM information. */
2532 char *optrom_buffer; 2507 char *optrom_buffer;
2533 uint32_t optrom_size; 2508 uint32_t optrom_size;
@@ -2538,13 +2513,13 @@ typedef struct scsi_qla_host {
2538 uint32_t optrom_region_start; 2513 uint32_t optrom_region_start;
2539 uint32_t optrom_region_size; 2514 uint32_t optrom_region_size;
2540 2515
2541 /* PCI expansion ROM image information. */ 2516/* PCI expansion ROM image information. */
2542#define ROM_CODE_TYPE_BIOS 0 2517#define ROM_CODE_TYPE_BIOS 0
2543#define ROM_CODE_TYPE_FCODE 1 2518#define ROM_CODE_TYPE_FCODE 1
2544#define ROM_CODE_TYPE_EFI 3 2519#define ROM_CODE_TYPE_EFI 3
2545 uint8_t bios_revision[2]; 2520 uint8_t bios_revision[2];
2546 uint8_t efi_revision[2]; 2521 uint8_t efi_revision[2];
2547 uint8_t fcode_revision[16]; 2522 uint8_t fcode_revision[16];
2548 uint32_t fw_revision[4]; 2523 uint32_t fw_revision[4];
2549 2524
2550 uint32_t fdt_wrt_disable; 2525 uint32_t fdt_wrt_disable;
@@ -2553,39 +2528,144 @@ typedef struct scsi_qla_host {
2553 uint32_t fdt_unprotect_sec_cmd; 2528 uint32_t fdt_unprotect_sec_cmd;
2554 uint32_t fdt_protect_sec_cmd; 2529 uint32_t fdt_protect_sec_cmd;
2555 2530
2556 uint32_t flt_region_flt; 2531 uint32_t flt_region_flt;
2557 uint32_t flt_region_fdt; 2532 uint32_t flt_region_fdt;
2558 uint32_t flt_region_boot; 2533 uint32_t flt_region_boot;
2559 uint32_t flt_region_fw; 2534 uint32_t flt_region_fw;
2560 uint32_t flt_region_vpd_nvram; 2535 uint32_t flt_region_vpd_nvram;
2561 uint32_t flt_region_hw_event; 2536 uint32_t flt_region_hw_event;
2562 uint32_t flt_region_npiv_conf; 2537 uint32_t flt_region_npiv_conf;
2563 2538
2564 /* Needed for BEACON */ 2539 /* Needed for BEACON */
2565 uint16_t beacon_blink_led; 2540 uint16_t beacon_blink_led;
2566 uint8_t beacon_color_state; 2541 uint8_t beacon_color_state;
2567#define QLA_LED_GRN_ON 0x01 2542#define QLA_LED_GRN_ON 0x01
2568#define QLA_LED_YLW_ON 0x02 2543#define QLA_LED_YLW_ON 0x02
2569#define QLA_LED_ABR_ON 0x04 2544#define QLA_LED_ABR_ON 0x04
2570#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */ 2545#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */
2571 /* ISP2322: red, green, amber. */ 2546 /* ISP2322: red, green, amber. */
2572 2547 uint16_t zio_mode;
2573 uint16_t zio_mode; 2548 uint16_t zio_timer;
2574 uint16_t zio_timer;
2575 struct fc_host_statistics fc_host_stat; 2549 struct fc_host_statistics fc_host_stat;
2576 2550
2577 struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES]; 2551 struct qla_msix_entry *msix_entries;
2552
2553 struct list_head vp_list; /* list of VP */
2554 unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
2555 sizeof(unsigned long)];
2556 uint16_t num_vhosts; /* number of vports created */
2557 uint16_t num_vsans; /* number of vsan created */
2558 uint16_t max_npiv_vports; /* 63 or 125 per topoloty */
2559 int cur_vport_count;
2560
2561 struct qla_chip_state_84xx *cs84xx;
2562 struct qla_statistics qla_stats;
2563 struct isp_operations *isp_ops;
2564};
2565
2566/*
2567 * Qlogic scsi host structure
2568 */
2569typedef struct scsi_qla_host {
2570 struct list_head list;
2571 struct list_head vp_fcports; /* list of fcports */
2572 struct list_head work_list;
2573 /* Commonly used flags and state information. */
2574 struct Scsi_Host *host;
2575 unsigned long host_no;
2576 uint8_t host_str[16];
2577
2578 volatile struct {
2579 uint32_t init_done :1;
2580 uint32_t online :1;
2581 uint32_t rscn_queue_overflow :1;
2582 uint32_t reset_active :1;
2583
2584 uint32_t management_server_logged_in :1;
2585 uint32_t process_response_queue :1;
2586 } flags;
2587
2588 atomic_t loop_state;
2589#define LOOP_TIMEOUT 1
2590#define LOOP_DOWN 2
2591#define LOOP_UP 3
2592#define LOOP_UPDATE 4
2593#define LOOP_READY 5
2594#define LOOP_DEAD 6
2595
2596 unsigned long dpc_flags;
2597#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
2598#define RESET_ACTIVE 1
2599#define ISP_ABORT_NEEDED 2 /* Initiate ISP abort. */
2600#define ABORT_ISP_ACTIVE 3 /* ISP abort in progress. */
2601#define LOOP_RESYNC_NEEDED 4 /* Device Resync needed. */
2602#define LOOP_RESYNC_ACTIVE 5
2603#define LOCAL_LOOP_UPDATE 6 /* Perform a local loop update. */
2604#define RSCN_UPDATE 7 /* Perform an RSCN update. */
2605#define MAILBOX_RETRY 8
2606#define ISP_RESET_NEEDED 9 /* Initiate a ISP reset. */
2607#define FAILOVER_EVENT_NEEDED 10
2608#define FAILOVER_EVENT 11
2609#define FAILOVER_NEEDED 12
2610#define SCSI_RESTART_NEEDED 13 /* Processes SCSI retry queue. */
2611#define PORT_RESTART_NEEDED 14 /* Processes Retry queue. */
2612#define RESTART_QUEUES_NEEDED 15 /* Restarts the Lun queue. */
2613#define ABORT_QUEUES_NEEDED 16
2614#define RELOGIN_NEEDED 17
2615#define LOGIN_RETRY_NEEDED 18 /* Initiate required fabric logins. */
2616#define REGISTER_FC4_NEEDED 19 /* SNS FC4 registration required. */
2617#define ISP_ABORT_RETRY 20 /* ISP aborted. */
2618#define FCPORT_RESCAN_NEEDED 21 /* IO descriptor processing needed */
2619#define IODESC_PROCESS_NEEDED 22 /* IO descriptor processing needed */
2620#define IOCTL_ERROR_RECOVERY 23
2621#define LOOP_RESET_NEEDED 24
2622#define BEACON_BLINK_NEEDED 25
2623#define REGISTER_FDMI_NEEDED 26
2624#define FCPORT_UPDATE_NEEDED 27
2625#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
2626#define UNLOADING 29
2627#define NPIV_CONFIG_NEEDED 30
2628
2629 uint32_t device_flags;
2630#define DFLG_LOCAL_DEVICES BIT_0
2631#define DFLG_RETRY_LOCAL_DEVICES BIT_1
2632#define DFLG_FABRIC_DEVICES BIT_2
2633#define SWITCH_FOUND BIT_3
2634#define DFLG_NO_CABLE BIT_4
2635
2636 srb_t *status_srb; /* Status continuation entry. */
2637
2638 /* ISP configuration data. */
2639 uint16_t loop_id; /* Host adapter loop id */
2640
2641 port_id_t d_id; /* Host adapter port id */
2642 uint8_t marker_needed;
2643 uint16_t mgmt_svr_loop_id;
2644
2645
2646
2647 /* RSCN queue. */
2648 uint32_t rscn_queue[MAX_RSCN_COUNT];
2649 uint8_t rscn_in_ptr;
2650 uint8_t rscn_out_ptr;
2651
2652 /* Timeout timers. */
2653 uint8_t loop_down_abort_time; /* port down timer */
2654 atomic_t loop_down_timer; /* loop down timer */
2655 uint8_t link_down_timeout; /* link down timeout */
2656
2657 uint32_t timer_active;
2658 struct timer_list timer;
2659
2660 uint8_t node_name[WWN_SIZE];
2661 uint8_t port_name[WWN_SIZE];
2662 uint8_t fabric_node_name[WWN_SIZE];
2663 uint32_t vp_abort_cnt;
2578 2664
2579 struct list_head vp_list; /* list of VP */
2580 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 2665 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
2581 unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) / sizeof(unsigned long)];
2582 uint16_t num_vhosts; /* number of vports created */
2583 uint16_t num_vsans; /* number of vsan created */
2584 uint16_t vp_idx; /* vport ID */ 2666 uint16_t vp_idx; /* vport ID */
2585 2667
2586 struct scsi_qla_host *parent; /* holds pport */
2587 unsigned long vp_flags; 2668 unsigned long vp_flags;
2588 struct list_head vp_fcports; /* list of fcports */
2589#define VP_IDX_ACQUIRED 0 /* bit no 0 */ 2669#define VP_IDX_ACQUIRED 0 /* bit no 0 */
2590#define VP_CREATE_NEEDED 1 2670#define VP_CREATE_NEEDED 1
2591#define VP_BIND_NEEDED 2 2671#define VP_BIND_NEEDED 2
@@ -2604,14 +2684,10 @@ typedef struct scsi_qla_host {
2604#define VP_ERR_FAB_NORESOURCES 3 2684#define VP_ERR_FAB_NORESOURCES 3
2605#define VP_ERR_FAB_LOGOUT 4 2685#define VP_ERR_FAB_LOGOUT 4
2606#define VP_ERR_ADAP_NORESOURCES 5 2686#define VP_ERR_ADAP_NORESOURCES 5
2607 uint16_t max_npiv_vports; /* 63 or 125 per topoloty */ 2687 struct qla_hw_data *hw;
2608 int cur_vport_count; 2688 int req_ques[QLA_MAX_HOST_QUES];
2609
2610 struct qla_chip_state_84xx *cs84xx;
2611 struct qla_statistics qla_stats;
2612} scsi_qla_host_t; 2689} scsi_qla_host_t;
2613 2690
2614
2615/* 2691/*
2616 * Macros to help code, maintain, etc. 2692 * Macros to help code, maintain, etc.
2617 */ 2693 */
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 561a4411719d..0e366a1b44b3 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -15,10 +15,11 @@ static atomic_t qla2x00_dfs_root_count;
15static int 15static int
16qla2x00_dfs_fce_show(struct seq_file *s, void *unused) 16qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
17{ 17{
18 scsi_qla_host_t *ha = s->private; 18 scsi_qla_host_t *vha = s->private;
19 uint32_t cnt; 19 uint32_t cnt;
20 uint32_t *fce; 20 uint32_t *fce;
21 uint64_t fce_start; 21 uint64_t fce_start;
22 struct qla_hw_data *ha = vha->hw;
22 23
23 mutex_lock(&ha->fce_mutex); 24 mutex_lock(&ha->fce_mutex);
24 25
@@ -51,7 +52,8 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
51static int 52static int
52qla2x00_dfs_fce_open(struct inode *inode, struct file *file) 53qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
53{ 54{
54 scsi_qla_host_t *ha = inode->i_private; 55 scsi_qla_host_t *vha = inode->i_private;
56 struct qla_hw_data *ha = vha->hw;
55 int rval; 57 int rval;
56 58
57 if (!ha->flags.fce_enabled) 59 if (!ha->flags.fce_enabled)
@@ -60,7 +62,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
60 mutex_lock(&ha->fce_mutex); 62 mutex_lock(&ha->fce_mutex);
61 63
62 /* Pause tracing to flush FCE buffers. */ 64 /* Pause tracing to flush FCE buffers. */
63 rval = qla2x00_disable_fce_trace(ha, &ha->fce_wr, &ha->fce_rd); 65 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
64 if (rval) 66 if (rval)
65 qla_printk(KERN_WARNING, ha, 67 qla_printk(KERN_WARNING, ha,
66 "DebugFS: Unable to disable FCE (%d).\n", rval); 68 "DebugFS: Unable to disable FCE (%d).\n", rval);
@@ -75,7 +77,8 @@ out:
75static int 77static int
76qla2x00_dfs_fce_release(struct inode *inode, struct file *file) 78qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
77{ 79{
78 scsi_qla_host_t *ha = inode->i_private; 80 scsi_qla_host_t *vha = inode->i_private;
81 struct qla_hw_data *ha = vha->hw;
79 int rval; 82 int rval;
80 83
81 if (ha->flags.fce_enabled) 84 if (ha->flags.fce_enabled)
@@ -86,7 +89,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
86 /* Re-enable FCE tracing. */ 89 /* Re-enable FCE tracing. */
87 ha->flags.fce_enabled = 1; 90 ha->flags.fce_enabled = 1;
88 memset(ha->fce, 0, fce_calc_size(ha->fce_bufs)); 91 memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
89 rval = qla2x00_enable_fce_trace(ha, ha->fce_dma, ha->fce_bufs, 92 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
90 ha->fce_mb, &ha->fce_bufs); 93 ha->fce_mb, &ha->fce_bufs);
91 if (rval) { 94 if (rval) {
92 qla_printk(KERN_WARNING, ha, 95 qla_printk(KERN_WARNING, ha,
@@ -107,8 +110,9 @@ static const struct file_operations dfs_fce_ops = {
107}; 110};
108 111
109int 112int
110qla2x00_dfs_setup(scsi_qla_host_t *ha) 113qla2x00_dfs_setup(scsi_qla_host_t *vha)
111{ 114{
115 struct qla_hw_data *ha = vha->hw;
112 if (!IS_QLA25XX(ha)) 116 if (!IS_QLA25XX(ha))
113 goto out; 117 goto out;
114 if (!ha->fce) 118 if (!ha->fce)
@@ -130,7 +134,7 @@ create_dir:
130 goto create_nodes; 134 goto create_nodes;
131 135
132 mutex_init(&ha->fce_mutex); 136 mutex_init(&ha->fce_mutex);
133 ha->dfs_dir = debugfs_create_dir(ha->host_str, qla2x00_dfs_root); 137 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
134 if (!ha->dfs_dir) { 138 if (!ha->dfs_dir) {
135 qla_printk(KERN_NOTICE, ha, 139 qla_printk(KERN_NOTICE, ha,
136 "DebugFS: Unable to create ha directory.\n"); 140 "DebugFS: Unable to create ha directory.\n");
@@ -152,8 +156,9 @@ out:
152} 156}
153 157
154int 158int
155qla2x00_dfs_remove(scsi_qla_host_t *ha) 159qla2x00_dfs_remove(scsi_qla_host_t *vha)
156{ 160{
161 struct qla_hw_data *ha = vha->hw;
157 if (ha->dfs_fce) { 162 if (ha->dfs_fce) {
158 debugfs_remove(ha->dfs_fce); 163 debugfs_remove(ha->dfs_fce);
159 ha->dfs_fce = NULL; 164 ha->dfs_fce = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d1d14202575a..ee1f1e794c2d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -299,7 +299,8 @@ struct init_cb_24xx {
299 uint32_t response_q_address[2]; 299 uint32_t response_q_address[2];
300 uint32_t prio_request_q_address[2]; 300 uint32_t prio_request_q_address[2];
301 301
302 uint8_t reserved_2[8]; 302 uint16_t msix;
303 uint8_t reserved_2[6];
303 304
304 uint16_t atio_q_inpointer; 305 uint16_t atio_q_inpointer;
305 uint16_t atio_q_length; 306 uint16_t atio_q_length;
@@ -372,8 +373,9 @@ struct init_cb_24xx {
372 * BIT 17-31 = Reserved 373 * BIT 17-31 = Reserved
373 */ 374 */
374 uint32_t firmware_options_3; 375 uint32_t firmware_options_3;
375 376 uint16_t qos;
376 uint8_t reserved_3[24]; 377 uint16_t rid;
378 uint8_t reserved_3[20];
377}; 379};
378 380
379/* 381/*
@@ -754,7 +756,8 @@ struct abort_entry_24xx {
754 756
755 uint32_t handle_to_abort; /* System handle to abort. */ 757 uint32_t handle_to_abort; /* System handle to abort. */
756 758
757 uint8_t reserved_1[32]; 759 uint16_t req_que_no;
760 uint8_t reserved_1[30];
758 761
759 uint8_t port_id[3]; /* PortID of destination port. */ 762 uint8_t port_id[3]; /* PortID of destination port. */
760 uint8_t vp_index; 763 uint8_t vp_index;
@@ -1258,7 +1261,8 @@ struct qla_npiv_header {
1258struct qla_npiv_entry { 1261struct qla_npiv_entry {
1259 uint16_t flags; 1262 uint16_t flags;
1260 uint16_t vf_id; 1263 uint16_t vf_id;
1261 uint16_t qos; 1264 uint8_t q_qos;
1265 uint8_t f_qos;
1262 uint16_t unused1; 1266 uint16_t unused1;
1263 uint8_t port_name[WWN_SIZE]; 1267 uint8_t port_name[WWN_SIZE];
1264 uint8_t node_name[WWN_SIZE]; 1268 uint8_t node_name[WWN_SIZE];
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 753dbe6cce6e..0011e31205db 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -63,6 +63,7 @@ extern int ql2xallocfwdump;
63extern int ql2xextended_error_logging; 63extern int ql2xextended_error_logging;
64extern int ql2xqfullrampup; 64extern int ql2xqfullrampup;
65extern int ql2xiidmaenable; 65extern int ql2xiidmaenable;
66extern int ql2xmaxqueues;
66 67
67extern int qla2x00_loop_reset(scsi_qla_host_t *); 68extern int qla2x00_loop_reset(scsi_qla_host_t *);
68extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 69extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -72,7 +73,10 @@ extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
72 uint16_t, uint16_t); 73 uint16_t, uint16_t);
73 74
74extern void qla2x00_abort_fcport_cmds(fc_port_t *); 75extern void qla2x00_abort_fcport_cmds(fc_port_t *);
75 76extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
77 struct qla_hw_data *);
78extern void qla2x00_free_host(struct scsi_qla_host *);
79extern void qla2x00_relogin(struct scsi_qla_host *);
76/* 80/*
77 * Global Functions in qla_mid.c source file. 81 * Global Functions in qla_mid.c source file.
78 */ 82 */
@@ -94,7 +98,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
94extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); 98extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
95extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); 99extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
96 100
97extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *); 101extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *);
98 102
99extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); 103extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
100 104
@@ -105,10 +109,11 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
105 109
106extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); 110extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
107 111
108extern void qla2xxx_wake_dpc(scsi_qla_host_t *); 112extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
109extern void qla2x00_alert_all_vps(scsi_qla_host_t *, uint16_t *); 113extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
110extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *); 114extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *,
111extern void qla2x00_vp_abort_isp(scsi_qla_host_t *); 115 uint16_t *);
116extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
112 117
113/* 118/*
114 * Global Function Prototypes in qla_iocb.c source file. 119 * Global Function Prototypes in qla_iocb.c source file.
@@ -119,8 +124,10 @@ extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
119extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t); 124extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
120extern int qla2x00_start_scsi(srb_t *sp); 125extern int qla2x00_start_scsi(srb_t *sp);
121extern int qla24xx_start_scsi(srb_t *sp); 126extern int qla24xx_start_scsi(srb_t *sp);
122int qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t); 127int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
123int __qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t); 128 uint16_t, uint16_t, uint8_t);
129int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
130 uint16_t, uint16_t, uint8_t);
124 131
125/* 132/*
126 * Global Function Prototypes in qla_mbx.c source file. 133 * Global Function Prototypes in qla_mbx.c source file.
@@ -154,7 +161,7 @@ extern int
154qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); 161qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
155 162
156extern int 163extern int
157qla2x00_abort_command(scsi_qla_host_t *, srb_t *); 164qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
158 165
159extern int 166extern int
160qla2x00_abort_target(struct fc_port *, unsigned int); 167qla2x00_abort_target(struct fc_port *, unsigned int);
@@ -225,7 +232,7 @@ extern int
225qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 232qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
226 dma_addr_t); 233 dma_addr_t);
227 234
228extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); 235extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
229extern int qla24xx_abort_target(struct fc_port *, unsigned int); 236extern int qla24xx_abort_target(struct fc_port *, unsigned int);
230extern int qla24xx_lun_reset(struct fc_port *, unsigned int); 237extern int qla24xx_lun_reset(struct fc_port *, unsigned int);
231 238
@@ -264,10 +271,10 @@ extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *);
264extern irqreturn_t qla2100_intr_handler(int, void *); 271extern irqreturn_t qla2100_intr_handler(int, void *);
265extern irqreturn_t qla2300_intr_handler(int, void *); 272extern irqreturn_t qla2300_intr_handler(int, void *);
266extern irqreturn_t qla24xx_intr_handler(int, void *); 273extern irqreturn_t qla24xx_intr_handler(int, void *);
267extern void qla2x00_process_response_queue(struct scsi_qla_host *); 274extern void qla2x00_process_response_queue(struct rsp_que *);
268extern void qla24xx_process_response_queue(struct scsi_qla_host *); 275extern void qla24xx_process_response_queue(struct rsp_que *);
269 276
270extern int qla2x00_request_irqs(scsi_qla_host_t *); 277extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
271extern void qla2x00_free_irqs(scsi_qla_host_t *); 278extern void qla2x00_free_irqs(scsi_qla_host_t *);
272 279
273/* 280/*
@@ -367,4 +374,27 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
367 */ 374 */
368extern int qla2x00_dfs_setup(scsi_qla_host_t *); 375extern int qla2x00_dfs_setup(scsi_qla_host_t *);
369extern int qla2x00_dfs_remove(scsi_qla_host_t *); 376extern int qla2x00_dfs_remove(scsi_qla_host_t *);
377
378/* Globa function prototypes for multi-q */
379extern int qla25xx_request_irq(struct rsp_que *);
380extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *,
381 uint8_t);
382extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *,
383 uint8_t);
384extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
385 uint16_t, uint8_t, uint8_t);
386extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
387 uint16_t);
388extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
389extern void qla2x00_init_response_q_entries(struct rsp_que *);
390extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
391extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
392extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
393extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t);
394extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
395extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
396extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
397extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
398extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
399extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
370#endif /* _QLA_GBL_H */ 400#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index c2a4bfbcb05b..0a6f72973996 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -22,8 +22,9 @@ static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
22 * Returns a pointer to the @ha's ms_iocb. 22 * Returns a pointer to the @ha's ms_iocb.
23 */ 23 */
24void * 24void *
25qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) 25qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
26{ 26{
27 struct qla_hw_data *ha = vha->hw;
27 ms_iocb_entry_t *ms_pkt; 28 ms_iocb_entry_t *ms_pkt;
28 29
29 ms_pkt = ha->ms_iocb; 30 ms_pkt = ha->ms_iocb;
@@ -59,8 +60,9 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
59 * Returns a pointer to the @ha's ms_iocb. 60 * Returns a pointer to the @ha's ms_iocb.
60 */ 61 */
61void * 62void *
62qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) 63qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
63{ 64{
65 struct qla_hw_data *ha = vha->hw;
64 struct ct_entry_24xx *ct_pkt; 66 struct ct_entry_24xx *ct_pkt;
65 67
66 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 68 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
@@ -82,7 +84,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
82 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 84 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
83 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 85 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
84 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 86 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
85 ct_pkt->vp_index = ha->vp_idx; 87 ct_pkt->vp_index = vha->vp_idx;
86 88
87 return (ct_pkt); 89 return (ct_pkt);
88} 90}
@@ -110,16 +112,17 @@ qla2x00_prep_ct_req(struct ct_sns_req *ct_req, uint16_t cmd, uint16_t rsp_size)
110} 112}
111 113
112static int 114static int
113qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt, 115qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
114 struct ct_sns_rsp *ct_rsp, const char *routine) 116 struct ct_sns_rsp *ct_rsp, const char *routine)
115{ 117{
116 int rval; 118 int rval;
117 uint16_t comp_status; 119 uint16_t comp_status;
120 struct qla_hw_data *ha = vha->hw;
118 121
119 rval = QLA_FUNCTION_FAILED; 122 rval = QLA_FUNCTION_FAILED;
120 if (ms_pkt->entry_status != 0) { 123 if (ms_pkt->entry_status != 0) {
121 DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n", 124 DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
122 ha->host_no, routine, ms_pkt->entry_status)); 125 vha->host_no, routine, ms_pkt->entry_status));
123 } else { 126 } else {
124 if (IS_FWI2_CAPABLE(ha)) 127 if (IS_FWI2_CAPABLE(ha))
125 comp_status = le16_to_cpu( 128 comp_status = le16_to_cpu(
@@ -133,7 +136,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
133 if (ct_rsp->header.response != 136 if (ct_rsp->header.response !=
134 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { 137 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
135 DEBUG2_3(printk("scsi(%ld): %s failed, " 138 DEBUG2_3(printk("scsi(%ld): %s failed, "
136 "rejected request:\n", ha->host_no, 139 "rejected request:\n", vha->host_no,
137 routine)); 140 routine));
138 DEBUG2_3(qla2x00_dump_buffer( 141 DEBUG2_3(qla2x00_dump_buffer(
139 (uint8_t *)&ct_rsp->header, 142 (uint8_t *)&ct_rsp->header,
@@ -144,7 +147,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
144 break; 147 break;
145 default: 148 default:
146 DEBUG2_3(printk("scsi(%ld): %s failed, completion " 149 DEBUG2_3(printk("scsi(%ld): %s failed, completion "
147 "status (%x).\n", ha->host_no, routine, 150 "status (%x).\n", vha->host_no, routine,
148 comp_status)); 151 comp_status));
149 break; 152 break;
150 } 153 }
@@ -160,21 +163,21 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
160 * Returns 0 on success. 163 * Returns 0 on success.
161 */ 164 */
162int 165int
163qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport) 166qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
164{ 167{
165 int rval; 168 int rval;
166 169
167 ms_iocb_entry_t *ms_pkt; 170 ms_iocb_entry_t *ms_pkt;
168 struct ct_sns_req *ct_req; 171 struct ct_sns_req *ct_req;
169 struct ct_sns_rsp *ct_rsp; 172 struct ct_sns_rsp *ct_rsp;
173 struct qla_hw_data *ha = vha->hw;
170 174
171 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 175 if (IS_QLA2100(ha) || IS_QLA2200(ha))
172 return (qla2x00_sns_ga_nxt(ha, fcport)); 176 return qla2x00_sns_ga_nxt(vha, fcport);
173 }
174 177
175 /* Issue GA_NXT */ 178 /* Issue GA_NXT */
176 /* Prepare common MS IOCB */ 179 /* Prepare common MS IOCB */
177 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GA_NXT_REQ_SIZE, 180 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE,
178 GA_NXT_RSP_SIZE); 181 GA_NXT_RSP_SIZE);
179 182
180 /* Prepare CT request */ 183 /* Prepare CT request */
@@ -188,13 +191,13 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
188 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa; 191 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
189 192
190 /* Execute MS IOCB */ 193 /* Execute MS IOCB */
191 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 194 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
192 sizeof(ms_iocb_entry_t)); 195 sizeof(ms_iocb_entry_t));
193 if (rval != QLA_SUCCESS) { 196 if (rval != QLA_SUCCESS) {
194 /*EMPTY*/ 197 /*EMPTY*/
195 DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n", 198 DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n",
196 ha->host_no, rval)); 199 vha->host_no, rval));
197 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "GA_NXT") != 200 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
198 QLA_SUCCESS) { 201 QLA_SUCCESS) {
199 rval = QLA_FUNCTION_FAILED; 202 rval = QLA_FUNCTION_FAILED;
200 } else { 203 } else {
@@ -216,7 +219,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
216 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 219 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
217 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 220 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
218 "portid=%02x%02x%02x.\n", 221 "portid=%02x%02x%02x.\n",
219 ha->host_no, 222 vha->host_no,
220 fcport->node_name[0], fcport->node_name[1], 223 fcport->node_name[0], fcport->node_name[1],
221 fcport->node_name[2], fcport->node_name[3], 224 fcport->node_name[2], fcport->node_name[3],
222 fcport->node_name[4], fcport->node_name[5], 225 fcport->node_name[4], fcport->node_name[5],
@@ -242,7 +245,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
242 * Returns 0 on success. 245 * Returns 0 on success.
243 */ 246 */
244int 247int
245qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list) 248qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
246{ 249{
247 int rval; 250 int rval;
248 uint16_t i; 251 uint16_t i;
@@ -252,16 +255,16 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
252 struct ct_sns_rsp *ct_rsp; 255 struct ct_sns_rsp *ct_rsp;
253 256
254 struct ct_sns_gid_pt_data *gid_data; 257 struct ct_sns_gid_pt_data *gid_data;
258 struct qla_hw_data *ha = vha->hw;
255 259
256 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 260 if (IS_QLA2100(ha) || IS_QLA2200(ha))
257 return (qla2x00_sns_gid_pt(ha, list)); 261 return qla2x00_sns_gid_pt(vha, list);
258 }
259 262
260 gid_data = NULL; 263 gid_data = NULL;
261 264
262 /* Issue GID_PT */ 265 /* Issue GID_PT */
263 /* Prepare common MS IOCB */ 266 /* Prepare common MS IOCB */
264 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GID_PT_REQ_SIZE, 267 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
265 GID_PT_RSP_SIZE); 268 GID_PT_RSP_SIZE);
266 269
267 /* Prepare CT request */ 270 /* Prepare CT request */
@@ -273,13 +276,13 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
273 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE; 276 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
274 277
275 /* Execute MS IOCB */ 278 /* Execute MS IOCB */
276 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 279 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
277 sizeof(ms_iocb_entry_t)); 280 sizeof(ms_iocb_entry_t));
278 if (rval != QLA_SUCCESS) { 281 if (rval != QLA_SUCCESS) {
279 /*EMPTY*/ 282 /*EMPTY*/
280 DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n", 283 DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n",
281 ha->host_no, rval)); 284 vha->host_no, rval));
282 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "GID_PT") != 285 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
283 QLA_SUCCESS) { 286 QLA_SUCCESS) {
284 rval = QLA_FUNCTION_FAILED; 287 rval = QLA_FUNCTION_FAILED;
285 } else { 288 } else {
@@ -320,7 +323,7 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
320 * Returns 0 on success. 323 * Returns 0 on success.
321 */ 324 */
322int 325int
323qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list) 326qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
324{ 327{
325 int rval; 328 int rval;
326 uint16_t i; 329 uint16_t i;
@@ -328,15 +331,15 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
328 ms_iocb_entry_t *ms_pkt; 331 ms_iocb_entry_t *ms_pkt;
329 struct ct_sns_req *ct_req; 332 struct ct_sns_req *ct_req;
330 struct ct_sns_rsp *ct_rsp; 333 struct ct_sns_rsp *ct_rsp;
334 struct qla_hw_data *ha = vha->hw;
331 335
332 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 336 if (IS_QLA2100(ha) || IS_QLA2200(ha))
333 return (qla2x00_sns_gpn_id(ha, list)); 337 return qla2x00_sns_gpn_id(vha, list);
334 }
335 338
336 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 339 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
337 /* Issue GPN_ID */ 340 /* Issue GPN_ID */
338 /* Prepare common MS IOCB */ 341 /* Prepare common MS IOCB */
339 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GPN_ID_REQ_SIZE, 342 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
340 GPN_ID_RSP_SIZE); 343 GPN_ID_RSP_SIZE);
341 344
342 /* Prepare CT request */ 345 /* Prepare CT request */
@@ -350,13 +353,13 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
350 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; 353 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
351 354
352 /* Execute MS IOCB */ 355 /* Execute MS IOCB */
353 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 356 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
354 sizeof(ms_iocb_entry_t)); 357 sizeof(ms_iocb_entry_t));
355 if (rval != QLA_SUCCESS) { 358 if (rval != QLA_SUCCESS) {
356 /*EMPTY*/ 359 /*EMPTY*/
357 DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed " 360 DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed "
358 "(%d).\n", ha->host_no, rval)); 361 "(%d).\n", vha->host_no, rval));
359 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 362 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
360 "GPN_ID") != QLA_SUCCESS) { 363 "GPN_ID") != QLA_SUCCESS) {
361 rval = QLA_FUNCTION_FAILED; 364 rval = QLA_FUNCTION_FAILED;
362 } else { 365 } else {
@@ -381,23 +384,22 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
381 * Returns 0 on success. 384 * Returns 0 on success.
382 */ 385 */
383int 386int
384qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list) 387qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
385{ 388{
386 int rval; 389 int rval;
387 uint16_t i; 390 uint16_t i;
388 391 struct qla_hw_data *ha = vha->hw;
389 ms_iocb_entry_t *ms_pkt; 392 ms_iocb_entry_t *ms_pkt;
390 struct ct_sns_req *ct_req; 393 struct ct_sns_req *ct_req;
391 struct ct_sns_rsp *ct_rsp; 394 struct ct_sns_rsp *ct_rsp;
392 395
393 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 396 if (IS_QLA2100(ha) || IS_QLA2200(ha))
394 return (qla2x00_sns_gnn_id(ha, list)); 397 return qla2x00_sns_gnn_id(vha, list);
395 }
396 398
397 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 399 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
398 /* Issue GNN_ID */ 400 /* Issue GNN_ID */
399 /* Prepare common MS IOCB */ 401 /* Prepare common MS IOCB */
400 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GNN_ID_REQ_SIZE, 402 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
401 GNN_ID_RSP_SIZE); 403 GNN_ID_RSP_SIZE);
402 404
403 /* Prepare CT request */ 405 /* Prepare CT request */
@@ -411,13 +413,13 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
411 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; 413 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
412 414
413 /* Execute MS IOCB */ 415 /* Execute MS IOCB */
414 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 416 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
415 sizeof(ms_iocb_entry_t)); 417 sizeof(ms_iocb_entry_t));
416 if (rval != QLA_SUCCESS) { 418 if (rval != QLA_SUCCESS) {
417 /*EMPTY*/ 419 /*EMPTY*/
418 DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed " 420 DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed "
419 "(%d).\n", ha->host_no, rval)); 421 "(%d).\n", vha->host_no, rval));
420 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 422 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
421 "GNN_ID") != QLA_SUCCESS) { 423 "GNN_ID") != QLA_SUCCESS) {
422 rval = QLA_FUNCTION_FAILED; 424 rval = QLA_FUNCTION_FAILED;
423 } else { 425 } else {
@@ -429,7 +431,7 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
429 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 431 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
430 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 432 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
431 "portid=%02x%02x%02x.\n", 433 "portid=%02x%02x%02x.\n",
432 ha->host_no, 434 vha->host_no,
433 list[i].node_name[0], list[i].node_name[1], 435 list[i].node_name[0], list[i].node_name[1],
434 list[i].node_name[2], list[i].node_name[3], 436 list[i].node_name[2], list[i].node_name[3],
435 list[i].node_name[4], list[i].node_name[5], 437 list[i].node_name[4], list[i].node_name[5],
@@ -457,21 +459,20 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
457 * Returns 0 on success. 459 * Returns 0 on success.
458 */ 460 */
459int 461int
460qla2x00_rft_id(scsi_qla_host_t *ha) 462qla2x00_rft_id(scsi_qla_host_t *vha)
461{ 463{
462 int rval; 464 int rval;
463 465 struct qla_hw_data *ha = vha->hw;
464 ms_iocb_entry_t *ms_pkt; 466 ms_iocb_entry_t *ms_pkt;
465 struct ct_sns_req *ct_req; 467 struct ct_sns_req *ct_req;
466 struct ct_sns_rsp *ct_rsp; 468 struct ct_sns_rsp *ct_rsp;
467 469
468 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 470 if (IS_QLA2100(ha) || IS_QLA2200(ha))
469 return (qla2x00_sns_rft_id(ha)); 471 return qla2x00_sns_rft_id(vha);
470 }
471 472
472 /* Issue RFT_ID */ 473 /* Issue RFT_ID */
473 /* Prepare common MS IOCB */ 474 /* Prepare common MS IOCB */
474 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFT_ID_REQ_SIZE, 475 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFT_ID_REQ_SIZE,
475 RFT_ID_RSP_SIZE); 476 RFT_ID_RSP_SIZE);
476 477
477 /* Prepare CT request */ 478 /* Prepare CT request */
@@ -480,25 +481,25 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
480 ct_rsp = &ha->ct_sns->p.rsp; 481 ct_rsp = &ha->ct_sns->p.rsp;
481 482
482 /* Prepare CT arguments -- port_id, FC-4 types */ 483 /* Prepare CT arguments -- port_id, FC-4 types */
483 ct_req->req.rft_id.port_id[0] = ha->d_id.b.domain; 484 ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
484 ct_req->req.rft_id.port_id[1] = ha->d_id.b.area; 485 ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
485 ct_req->req.rft_id.port_id[2] = ha->d_id.b.al_pa; 486 ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
486 487
487 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ 488 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
488 489
489 /* Execute MS IOCB */ 490 /* Execute MS IOCB */
490 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 491 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
491 sizeof(ms_iocb_entry_t)); 492 sizeof(ms_iocb_entry_t));
492 if (rval != QLA_SUCCESS) { 493 if (rval != QLA_SUCCESS) {
493 /*EMPTY*/ 494 /*EMPTY*/
494 DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n", 495 DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n",
495 ha->host_no, rval)); 496 vha->host_no, rval));
496 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RFT_ID") != 497 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
497 QLA_SUCCESS) { 498 QLA_SUCCESS) {
498 rval = QLA_FUNCTION_FAILED; 499 rval = QLA_FUNCTION_FAILED;
499 } else { 500 } else {
500 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 501 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
501 ha->host_no)); 502 vha->host_no));
502 } 503 }
503 504
504 return (rval); 505 return (rval);
@@ -511,23 +512,23 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
511 * Returns 0 on success. 512 * Returns 0 on success.
512 */ 513 */
513int 514int
514qla2x00_rff_id(scsi_qla_host_t *ha) 515qla2x00_rff_id(scsi_qla_host_t *vha)
515{ 516{
516 int rval; 517 int rval;
517 518 struct qla_hw_data *ha = vha->hw;
518 ms_iocb_entry_t *ms_pkt; 519 ms_iocb_entry_t *ms_pkt;
519 struct ct_sns_req *ct_req; 520 struct ct_sns_req *ct_req;
520 struct ct_sns_rsp *ct_rsp; 521 struct ct_sns_rsp *ct_rsp;
521 522
522 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 523 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
523 DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on " 524 DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on "
524 "ISP2100/ISP2200.\n", ha->host_no)); 525 "ISP2100/ISP2200.\n", vha->host_no));
525 return (QLA_SUCCESS); 526 return (QLA_SUCCESS);
526 } 527 }
527 528
528 /* Issue RFF_ID */ 529 /* Issue RFF_ID */
529 /* Prepare common MS IOCB */ 530 /* Prepare common MS IOCB */
530 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFF_ID_REQ_SIZE, 531 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE,
531 RFF_ID_RSP_SIZE); 532 RFF_ID_RSP_SIZE);
532 533
533 /* Prepare CT request */ 534 /* Prepare CT request */
@@ -536,26 +537,26 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
536 ct_rsp = &ha->ct_sns->p.rsp; 537 ct_rsp = &ha->ct_sns->p.rsp;
537 538
538 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ 539 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
539 ct_req->req.rff_id.port_id[0] = ha->d_id.b.domain; 540 ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
540 ct_req->req.rff_id.port_id[1] = ha->d_id.b.area; 541 ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
541 ct_req->req.rff_id.port_id[2] = ha->d_id.b.al_pa; 542 ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
542 543
543 ct_req->req.rff_id.fc4_feature = BIT_1; 544 ct_req->req.rff_id.fc4_feature = BIT_1;
544 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ 545 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
545 546
546 /* Execute MS IOCB */ 547 /* Execute MS IOCB */
547 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 548 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
548 sizeof(ms_iocb_entry_t)); 549 sizeof(ms_iocb_entry_t));
549 if (rval != QLA_SUCCESS) { 550 if (rval != QLA_SUCCESS) {
550 /*EMPTY*/ 551 /*EMPTY*/
551 DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n", 552 DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n",
552 ha->host_no, rval)); 553 vha->host_no, rval));
553 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RFF_ID") != 554 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
554 QLA_SUCCESS) { 555 QLA_SUCCESS) {
555 rval = QLA_FUNCTION_FAILED; 556 rval = QLA_FUNCTION_FAILED;
556 } else { 557 } else {
557 DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n", 558 DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n",
558 ha->host_no)); 559 vha->host_no));
559 } 560 }
560 561
561 return (rval); 562 return (rval);
@@ -568,21 +569,20 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
568 * Returns 0 on success. 569 * Returns 0 on success.
569 */ 570 */
570int 571int
571qla2x00_rnn_id(scsi_qla_host_t *ha) 572qla2x00_rnn_id(scsi_qla_host_t *vha)
572{ 573{
573 int rval; 574 int rval;
574 575 struct qla_hw_data *ha = vha->hw;
575 ms_iocb_entry_t *ms_pkt; 576 ms_iocb_entry_t *ms_pkt;
576 struct ct_sns_req *ct_req; 577 struct ct_sns_req *ct_req;
577 struct ct_sns_rsp *ct_rsp; 578 struct ct_sns_rsp *ct_rsp;
578 579
579 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 580 if (IS_QLA2100(ha) || IS_QLA2200(ha))
580 return (qla2x00_sns_rnn_id(ha)); 581 return qla2x00_sns_rnn_id(vha);
581 }
582 582
583 /* Issue RNN_ID */ 583 /* Issue RNN_ID */
584 /* Prepare common MS IOCB */ 584 /* Prepare common MS IOCB */
585 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RNN_ID_REQ_SIZE, 585 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE,
586 RNN_ID_RSP_SIZE); 586 RNN_ID_RSP_SIZE);
587 587
588 /* Prepare CT request */ 588 /* Prepare CT request */
@@ -591,33 +591,34 @@ qla2x00_rnn_id(scsi_qla_host_t *ha)
591 ct_rsp = &ha->ct_sns->p.rsp; 591 ct_rsp = &ha->ct_sns->p.rsp;
592 592
593 /* Prepare CT arguments -- port_id, node_name */ 593 /* Prepare CT arguments -- port_id, node_name */
594 ct_req->req.rnn_id.port_id[0] = ha->d_id.b.domain; 594 ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
595 ct_req->req.rnn_id.port_id[1] = ha->d_id.b.area; 595 ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
596 ct_req->req.rnn_id.port_id[2] = ha->d_id.b.al_pa; 596 ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
597 597
598 memcpy(ct_req->req.rnn_id.node_name, ha->node_name, WWN_SIZE); 598 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
599 599
600 /* Execute MS IOCB */ 600 /* Execute MS IOCB */
601 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 601 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
602 sizeof(ms_iocb_entry_t)); 602 sizeof(ms_iocb_entry_t));
603 if (rval != QLA_SUCCESS) { 603 if (rval != QLA_SUCCESS) {
604 /*EMPTY*/ 604 /*EMPTY*/
605 DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n", 605 DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n",
606 ha->host_no, rval)); 606 vha->host_no, rval));
607 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RNN_ID") != 607 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
608 QLA_SUCCESS) { 608 QLA_SUCCESS) {
609 rval = QLA_FUNCTION_FAILED; 609 rval = QLA_FUNCTION_FAILED;
610 } else { 610 } else {
611 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 611 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
612 ha->host_no)); 612 vha->host_no));
613 } 613 }
614 614
615 return (rval); 615 return (rval);
616} 616}
617 617
618void 618void
619qla2x00_get_sym_node_name(scsi_qla_host_t *ha, uint8_t *snn) 619qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
620{ 620{
621 struct qla_hw_data *ha = vha->hw;
621 sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number, 622 sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number,
622 ha->fw_major_version, ha->fw_minor_version, 623 ha->fw_major_version, ha->fw_minor_version,
623 ha->fw_subminor_version, qla2x00_version_str); 624 ha->fw_subminor_version, qla2x00_version_str);
@@ -630,23 +631,24 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *ha, uint8_t *snn)
630 * Returns 0 on success. 631 * Returns 0 on success.
631 */ 632 */
632int 633int
633qla2x00_rsnn_nn(scsi_qla_host_t *ha) 634qla2x00_rsnn_nn(scsi_qla_host_t *vha)
634{ 635{
635 int rval; 636 int rval;
637 struct qla_hw_data *ha = vha->hw;
636 ms_iocb_entry_t *ms_pkt; 638 ms_iocb_entry_t *ms_pkt;
637 struct ct_sns_req *ct_req; 639 struct ct_sns_req *ct_req;
638 struct ct_sns_rsp *ct_rsp; 640 struct ct_sns_rsp *ct_rsp;
639 641
640 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 642 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
641 DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on " 643 DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on "
642 "ISP2100/ISP2200.\n", ha->host_no)); 644 "ISP2100/ISP2200.\n", vha->host_no));
643 return (QLA_SUCCESS); 645 return (QLA_SUCCESS);
644 } 646 }
645 647
646 /* Issue RSNN_NN */ 648 /* Issue RSNN_NN */
647 /* Prepare common MS IOCB */ 649 /* Prepare common MS IOCB */
648 /* Request size adjusted after CT preparation */ 650 /* Request size adjusted after CT preparation */
649 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE); 651 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE);
650 652
651 /* Prepare CT request */ 653 /* Prepare CT request */
652 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD, 654 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD,
@@ -654,10 +656,10 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
654 ct_rsp = &ha->ct_sns->p.rsp; 656 ct_rsp = &ha->ct_sns->p.rsp;
655 657
656 /* Prepare CT arguments -- node_name, symbolic node_name, size */ 658 /* Prepare CT arguments -- node_name, symbolic node_name, size */
657 memcpy(ct_req->req.rsnn_nn.node_name, ha->node_name, WWN_SIZE); 659 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
658 660
659 /* Prepare the Symbolic Node Name */ 661 /* Prepare the Symbolic Node Name */
660 qla2x00_get_sym_node_name(ha, ct_req->req.rsnn_nn.sym_node_name); 662 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name);
661 663
662 /* Calculate SNN length */ 664 /* Calculate SNN length */
663 ct_req->req.rsnn_nn.name_len = 665 ct_req->req.rsnn_nn.name_len =
@@ -669,18 +671,18 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
669 ms_pkt->dseg_req_length = ms_pkt->req_bytecount; 671 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
670 672
671 /* Execute MS IOCB */ 673 /* Execute MS IOCB */
672 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 674 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
673 sizeof(ms_iocb_entry_t)); 675 sizeof(ms_iocb_entry_t));
674 if (rval != QLA_SUCCESS) { 676 if (rval != QLA_SUCCESS) {
675 /*EMPTY*/ 677 /*EMPTY*/
676 DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n", 678 DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n",
677 ha->host_no, rval)); 679 vha->host_no, rval));
678 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RSNN_NN") != 680 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
679 QLA_SUCCESS) { 681 QLA_SUCCESS) {
680 rval = QLA_FUNCTION_FAILED; 682 rval = QLA_FUNCTION_FAILED;
681 } else { 683 } else {
682 DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n", 684 DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n",
683 ha->host_no)); 685 vha->host_no));
684 } 686 }
685 687
686 return (rval); 688 return (rval);
@@ -696,11 +698,12 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
696 * Returns a pointer to the @ha's sns_cmd. 698 * Returns a pointer to the @ha's sns_cmd.
697 */ 699 */
698static inline struct sns_cmd_pkt * 700static inline struct sns_cmd_pkt *
699qla2x00_prep_sns_cmd(scsi_qla_host_t *ha, uint16_t cmd, uint16_t scmd_len, 701qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
700 uint16_t data_size) 702 uint16_t data_size)
701{ 703{
702 uint16_t wc; 704 uint16_t wc;
703 struct sns_cmd_pkt *sns_cmd; 705 struct sns_cmd_pkt *sns_cmd;
706 struct qla_hw_data *ha = vha->hw;
704 707
705 sns_cmd = ha->sns_cmd; 708 sns_cmd = ha->sns_cmd;
706 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt)); 709 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
@@ -726,15 +729,15 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *ha, uint16_t cmd, uint16_t scmd_len,
726 * Returns 0 on success. 729 * Returns 0 on success.
727 */ 730 */
728static int 731static int
729qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport) 732qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
730{ 733{
731 int rval; 734 int rval;
732 735 struct qla_hw_data *ha = vha->hw;
733 struct sns_cmd_pkt *sns_cmd; 736 struct sns_cmd_pkt *sns_cmd;
734 737
735 /* Issue GA_NXT. */ 738 /* Issue GA_NXT. */
736 /* Prepare SNS command request. */ 739 /* Prepare SNS command request. */
737 sns_cmd = qla2x00_prep_sns_cmd(ha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN, 740 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
738 GA_NXT_SNS_DATA_SIZE); 741 GA_NXT_SNS_DATA_SIZE);
739 742
740 /* Prepare SNS command arguments -- port_id. */ 743 /* Prepare SNS command arguments -- port_id. */
@@ -743,16 +746,16 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
743 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain; 746 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
744 747
745 /* Execute SNS command. */ 748 /* Execute SNS command. */
746 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2, 749 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
747 sizeof(struct sns_cmd_pkt)); 750 sizeof(struct sns_cmd_pkt));
748 if (rval != QLA_SUCCESS) { 751 if (rval != QLA_SUCCESS) {
749 /*EMPTY*/ 752 /*EMPTY*/
750 DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n", 753 DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n",
751 ha->host_no, rval)); 754 vha->host_no, rval));
752 } else if (sns_cmd->p.gan_data[8] != 0x80 || 755 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
753 sns_cmd->p.gan_data[9] != 0x02) { 756 sns_cmd->p.gan_data[9] != 0x02) {
754 DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, " 757 DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, "
755 "ga_nxt_rsp:\n", ha->host_no)); 758 "ga_nxt_rsp:\n", vha->host_no));
756 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16)); 759 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16));
757 rval = QLA_FUNCTION_FAILED; 760 rval = QLA_FUNCTION_FAILED;
758 } else { 761 } else {
@@ -772,7 +775,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
772 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 775 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
773 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 776 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
774 "portid=%02x%02x%02x.\n", 777 "portid=%02x%02x%02x.\n",
775 ha->host_no, 778 vha->host_no,
776 fcport->node_name[0], fcport->node_name[1], 779 fcport->node_name[0], fcport->node_name[1],
777 fcport->node_name[2], fcport->node_name[3], 780 fcport->node_name[2], fcport->node_name[3],
778 fcport->node_name[4], fcport->node_name[5], 781 fcport->node_name[4], fcport->node_name[5],
@@ -800,33 +803,33 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
800 * Returns 0 on success. 803 * Returns 0 on success.
801 */ 804 */
802static int 805static int
803qla2x00_sns_gid_pt(scsi_qla_host_t *ha, sw_info_t *list) 806qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
804{ 807{
805 int rval; 808 int rval;
806 809 struct qla_hw_data *ha = vha->hw;
807 uint16_t i; 810 uint16_t i;
808 uint8_t *entry; 811 uint8_t *entry;
809 struct sns_cmd_pkt *sns_cmd; 812 struct sns_cmd_pkt *sns_cmd;
810 813
811 /* Issue GID_PT. */ 814 /* Issue GID_PT. */
812 /* Prepare SNS command request. */ 815 /* Prepare SNS command request. */
813 sns_cmd = qla2x00_prep_sns_cmd(ha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN, 816 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
814 GID_PT_SNS_DATA_SIZE); 817 GID_PT_SNS_DATA_SIZE);
815 818
816 /* Prepare SNS command arguments -- port_type. */ 819 /* Prepare SNS command arguments -- port_type. */
817 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE; 820 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
818 821
819 /* Execute SNS command. */ 822 /* Execute SNS command. */
820 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2, 823 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
821 sizeof(struct sns_cmd_pkt)); 824 sizeof(struct sns_cmd_pkt));
822 if (rval != QLA_SUCCESS) { 825 if (rval != QLA_SUCCESS) {
823 /*EMPTY*/ 826 /*EMPTY*/
824 DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n", 827 DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n",
825 ha->host_no, rval)); 828 vha->host_no, rval));
826 } else if (sns_cmd->p.gid_data[8] != 0x80 || 829 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
827 sns_cmd->p.gid_data[9] != 0x02) { 830 sns_cmd->p.gid_data[9] != 0x02) {
828 DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, " 831 DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, "
829 "gid_rsp:\n", ha->host_no)); 832 "gid_rsp:\n", vha->host_no));
830 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16)); 833 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16));
831 rval = QLA_FUNCTION_FAILED; 834 rval = QLA_FUNCTION_FAILED;
832 } else { 835 } else {
@@ -867,17 +870,17 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
867 * Returns 0 on success. 870 * Returns 0 on success.
868 */ 871 */
869static int 872static int
870qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list) 873qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
871{ 874{
872 int rval; 875 int rval;
873 876 struct qla_hw_data *ha = vha->hw;
874 uint16_t i; 877 uint16_t i;
875 struct sns_cmd_pkt *sns_cmd; 878 struct sns_cmd_pkt *sns_cmd;
876 879
877 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 880 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
878 /* Issue GPN_ID */ 881 /* Issue GPN_ID */
879 /* Prepare SNS command request. */ 882 /* Prepare SNS command request. */
880 sns_cmd = qla2x00_prep_sns_cmd(ha, GPN_ID_CMD, 883 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
881 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE); 884 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
882 885
883 /* Prepare SNS command arguments -- port_id. */ 886 /* Prepare SNS command arguments -- port_id. */
@@ -886,16 +889,16 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
886 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 889 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
887 890
888 /* Execute SNS command. */ 891 /* Execute SNS command. */
889 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, 892 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
890 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 893 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
891 if (rval != QLA_SUCCESS) { 894 if (rval != QLA_SUCCESS) {
892 /*EMPTY*/ 895 /*EMPTY*/
893 DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed " 896 DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed "
894 "(%d).\n", ha->host_no, rval)); 897 "(%d).\n", vha->host_no, rval));
895 } else if (sns_cmd->p.gpn_data[8] != 0x80 || 898 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
896 sns_cmd->p.gpn_data[9] != 0x02) { 899 sns_cmd->p.gpn_data[9] != 0x02) {
897 DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected " 900 DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected "
898 "request, gpn_rsp:\n", ha->host_no)); 901 "request, gpn_rsp:\n", vha->host_no));
899 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16)); 902 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16));
900 rval = QLA_FUNCTION_FAILED; 903 rval = QLA_FUNCTION_FAILED;
901 } else { 904 } else {
@@ -922,17 +925,17 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
922 * Returns 0 on success. 925 * Returns 0 on success.
923 */ 926 */
924static int 927static int
925qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list) 928qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
926{ 929{
927 int rval; 930 int rval;
928 931 struct qla_hw_data *ha = vha->hw;
929 uint16_t i; 932 uint16_t i;
930 struct sns_cmd_pkt *sns_cmd; 933 struct sns_cmd_pkt *sns_cmd;
931 934
932 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 935 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
933 /* Issue GNN_ID */ 936 /* Issue GNN_ID */
934 /* Prepare SNS command request. */ 937 /* Prepare SNS command request. */
935 sns_cmd = qla2x00_prep_sns_cmd(ha, GNN_ID_CMD, 938 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
936 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE); 939 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
937 940
938 /* Prepare SNS command arguments -- port_id. */ 941 /* Prepare SNS command arguments -- port_id. */
@@ -941,16 +944,16 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
941 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 944 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
942 945
943 /* Execute SNS command. */ 946 /* Execute SNS command. */
944 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, 947 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
945 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 948 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
946 if (rval != QLA_SUCCESS) { 949 if (rval != QLA_SUCCESS) {
947 /*EMPTY*/ 950 /*EMPTY*/
948 DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed " 951 DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed "
949 "(%d).\n", ha->host_no, rval)); 952 "(%d).\n", vha->host_no, rval));
950 } else if (sns_cmd->p.gnn_data[8] != 0x80 || 953 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
951 sns_cmd->p.gnn_data[9] != 0x02) { 954 sns_cmd->p.gnn_data[9] != 0x02) {
952 DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected " 955 DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected "
953 "request, gnn_rsp:\n", ha->host_no)); 956 "request, gnn_rsp:\n", vha->host_no));
954 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16)); 957 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16));
955 rval = QLA_FUNCTION_FAILED; 958 rval = QLA_FUNCTION_FAILED;
956 } else { 959 } else {
@@ -962,7 +965,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
962 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 965 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
963 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 966 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
964 "portid=%02x%02x%02x.\n", 967 "portid=%02x%02x%02x.\n",
965 ha->host_no, 968 vha->host_no,
966 list[i].node_name[0], list[i].node_name[1], 969 list[i].node_name[0], list[i].node_name[1],
967 list[i].node_name[2], list[i].node_name[3], 970 list[i].node_name[2], list[i].node_name[3],
968 list[i].node_name[4], list[i].node_name[5], 971 list[i].node_name[4], list[i].node_name[5],
@@ -992,40 +995,40 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
992 * Returns 0 on success. 995 * Returns 0 on success.
993 */ 996 */
994static int 997static int
995qla2x00_sns_rft_id(scsi_qla_host_t *ha) 998qla2x00_sns_rft_id(scsi_qla_host_t *vha)
996{ 999{
997 int rval; 1000 int rval;
998 1001 struct qla_hw_data *ha = vha->hw;
999 struct sns_cmd_pkt *sns_cmd; 1002 struct sns_cmd_pkt *sns_cmd;
1000 1003
1001 /* Issue RFT_ID. */ 1004 /* Issue RFT_ID. */
1002 /* Prepare SNS command request. */ 1005 /* Prepare SNS command request. */
1003 sns_cmd = qla2x00_prep_sns_cmd(ha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN, 1006 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1004 RFT_ID_SNS_DATA_SIZE); 1007 RFT_ID_SNS_DATA_SIZE);
1005 1008
1006 /* Prepare SNS command arguments -- port_id, FC-4 types */ 1009 /* Prepare SNS command arguments -- port_id, FC-4 types */
1007 sns_cmd->p.cmd.param[0] = ha->d_id.b.al_pa; 1010 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1008 sns_cmd->p.cmd.param[1] = ha->d_id.b.area; 1011 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1009 sns_cmd->p.cmd.param[2] = ha->d_id.b.domain; 1012 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1010 1013
1011 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */ 1014 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1012 1015
1013 /* Execute SNS command. */ 1016 /* Execute SNS command. */
1014 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2, 1017 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1015 sizeof(struct sns_cmd_pkt)); 1018 sizeof(struct sns_cmd_pkt));
1016 if (rval != QLA_SUCCESS) { 1019 if (rval != QLA_SUCCESS) {
1017 /*EMPTY*/ 1020 /*EMPTY*/
1018 DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n", 1021 DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n",
1019 ha->host_no, rval)); 1022 vha->host_no, rval));
1020 } else if (sns_cmd->p.rft_data[8] != 0x80 || 1023 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1021 sns_cmd->p.rft_data[9] != 0x02) { 1024 sns_cmd->p.rft_data[9] != 0x02) {
1022 DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, " 1025 DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, "
1023 "rft_rsp:\n", ha->host_no)); 1026 "rft_rsp:\n", vha->host_no));
1024 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16)); 1027 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16));
1025 rval = QLA_FUNCTION_FAILED; 1028 rval = QLA_FUNCTION_FAILED;
1026 } else { 1029 } else {
1027 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 1030 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
1028 ha->host_no)); 1031 vha->host_no));
1029 } 1032 }
1030 1033
1031 return (rval); 1034 return (rval);
@@ -1041,47 +1044,47 @@ qla2x00_sns_rft_id(scsi_qla_host_t *ha)
1041 * Returns 0 on success. 1044 * Returns 0 on success.
1042 */ 1045 */
1043static int 1046static int
1044qla2x00_sns_rnn_id(scsi_qla_host_t *ha) 1047qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1045{ 1048{
1046 int rval; 1049 int rval;
1047 1050 struct qla_hw_data *ha = vha->hw;
1048 struct sns_cmd_pkt *sns_cmd; 1051 struct sns_cmd_pkt *sns_cmd;
1049 1052
1050 /* Issue RNN_ID. */ 1053 /* Issue RNN_ID. */
1051 /* Prepare SNS command request. */ 1054 /* Prepare SNS command request. */
1052 sns_cmd = qla2x00_prep_sns_cmd(ha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN, 1055 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1053 RNN_ID_SNS_DATA_SIZE); 1056 RNN_ID_SNS_DATA_SIZE);
1054 1057
1055 /* Prepare SNS command arguments -- port_id, nodename. */ 1058 /* Prepare SNS command arguments -- port_id, nodename. */
1056 sns_cmd->p.cmd.param[0] = ha->d_id.b.al_pa; 1059 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1057 sns_cmd->p.cmd.param[1] = ha->d_id.b.area; 1060 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1058 sns_cmd->p.cmd.param[2] = ha->d_id.b.domain; 1061 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1059 1062
1060 sns_cmd->p.cmd.param[4] = ha->node_name[7]; 1063 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1061 sns_cmd->p.cmd.param[5] = ha->node_name[6]; 1064 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1062 sns_cmd->p.cmd.param[6] = ha->node_name[5]; 1065 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1063 sns_cmd->p.cmd.param[7] = ha->node_name[4]; 1066 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1064 sns_cmd->p.cmd.param[8] = ha->node_name[3]; 1067 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1065 sns_cmd->p.cmd.param[9] = ha->node_name[2]; 1068 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1066 sns_cmd->p.cmd.param[10] = ha->node_name[1]; 1069 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1067 sns_cmd->p.cmd.param[11] = ha->node_name[0]; 1070 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1068 1071
1069 /* Execute SNS command. */ 1072 /* Execute SNS command. */
1070 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2, 1073 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1071 sizeof(struct sns_cmd_pkt)); 1074 sizeof(struct sns_cmd_pkt));
1072 if (rval != QLA_SUCCESS) { 1075 if (rval != QLA_SUCCESS) {
1073 /*EMPTY*/ 1076 /*EMPTY*/
1074 DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n", 1077 DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n",
1075 ha->host_no, rval)); 1078 vha->host_no, rval));
1076 } else if (sns_cmd->p.rnn_data[8] != 0x80 || 1079 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1077 sns_cmd->p.rnn_data[9] != 0x02) { 1080 sns_cmd->p.rnn_data[9] != 0x02) {
1078 DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, " 1081 DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, "
1079 "rnn_rsp:\n", ha->host_no)); 1082 "rnn_rsp:\n", vha->host_no));
1080 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16)); 1083 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16));
1081 rval = QLA_FUNCTION_FAILED; 1084 rval = QLA_FUNCTION_FAILED;
1082 } else { 1085 } else {
1083 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 1086 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
1084 ha->host_no)); 1087 vha->host_no));
1085 } 1088 }
1086 1089
1087 return (rval); 1090 return (rval);
@@ -1094,25 +1097,25 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
1094 * Returns 0 on success. 1097 * Returns 0 on success.
1095 */ 1098 */
1096static int 1099static int
1097qla2x00_mgmt_svr_login(scsi_qla_host_t *ha) 1100qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1098{ 1101{
1099 int ret; 1102 int ret;
1100 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1103 uint16_t mb[MAILBOX_REGISTER_COUNT];
1101 1104 struct qla_hw_data *ha = vha->hw;
1102 ret = QLA_SUCCESS; 1105 ret = QLA_SUCCESS;
1103 if (ha->flags.management_server_logged_in) 1106 if (vha->flags.management_server_logged_in)
1104 return ret; 1107 return ret;
1105 1108
1106 ha->isp_ops->fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1107 mb, BIT_1); 1110 mb, BIT_1);
1108 if (mb[0] != MBS_COMMAND_COMPLETE) { 1111 if (mb[0] != MBS_COMMAND_COMPLETE) {
1109 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
1110 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
1111 __func__, ha->host_no, ha->mgmt_svr_loop_id, mb[0], mb[1], 1114 __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1],
1112 mb[2], mb[6], mb[7])); 1115 mb[2], mb[6], mb[7]));
1113 ret = QLA_FUNCTION_FAILED; 1116 ret = QLA_FUNCTION_FAILED;
1114 } else 1117 } else
1115 ha->flags.management_server_logged_in = 1; 1118 vha->flags.management_server_logged_in = 1;
1116 1119
1117 return ret; 1120 return ret;
1118} 1121}
@@ -1126,17 +1129,17 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
1126 * Returns a pointer to the @ha's ms_iocb. 1129 * Returns a pointer to the @ha's ms_iocb.
1127 */ 1130 */
1128void * 1131void *
1129qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, 1132qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1130 uint32_t rsp_size) 1133 uint32_t rsp_size)
1131{ 1134{
1132 ms_iocb_entry_t *ms_pkt; 1135 ms_iocb_entry_t *ms_pkt;
1133 1136 struct qla_hw_data *ha = vha->hw;
1134 ms_pkt = ha->ms_iocb; 1137 ms_pkt = ha->ms_iocb;
1135 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); 1138 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1136 1139
1137 ms_pkt->entry_type = MS_IOCB_TYPE; 1140 ms_pkt->entry_type = MS_IOCB_TYPE;
1138 ms_pkt->entry_count = 1; 1141 ms_pkt->entry_count = 1;
1139 SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id); 1142 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1140 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); 1143 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
1141 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1144 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1142 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1145 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
@@ -1164,17 +1167,18 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1164 * Returns a pointer to the @ha's ms_iocb. 1167 * Returns a pointer to the @ha's ms_iocb.
1165 */ 1168 */
1166void * 1169void *
1167qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, 1170qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1168 uint32_t rsp_size) 1171 uint32_t rsp_size)
1169{ 1172{
1170 struct ct_entry_24xx *ct_pkt; 1173 struct ct_entry_24xx *ct_pkt;
1174 struct qla_hw_data *ha = vha->hw;
1171 1175
1172 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1176 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1173 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 1177 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1174 1178
1175 ct_pkt->entry_type = CT_IOCB_TYPE; 1179 ct_pkt->entry_type = CT_IOCB_TYPE;
1176 ct_pkt->entry_count = 1; 1180 ct_pkt->entry_count = 1;
1177 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); 1181 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1178 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1182 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1179 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1183 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1180 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 1184 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
@@ -1188,14 +1192,15 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1188 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1192 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1189 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1193 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1190 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 1194 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1191 ct_pkt->vp_index = ha->vp_idx; 1195 ct_pkt->vp_index = vha->vp_idx;
1192 1196
1193 return ct_pkt; 1197 return ct_pkt;
1194} 1198}
1195 1199
1196static inline ms_iocb_entry_t * 1200static inline ms_iocb_entry_t *
1197qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size) 1201qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1198{ 1202{
1203 struct qla_hw_data *ha = vha->hw;
1199 ms_iocb_entry_t *ms_pkt = ha->ms_iocb; 1204 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1200 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1205 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1201 1206
@@ -1240,7 +1245,7 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_req *ct_req, uint16_t cmd,
1240 * Returns 0 on success. 1245 * Returns 0 on success.
1241 */ 1246 */
1242static int 1247static int
1243qla2x00_fdmi_rhba(scsi_qla_host_t *ha) 1248qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1244{ 1249{
1245 int rval, alen; 1250 int rval, alen;
1246 uint32_t size, sn; 1251 uint32_t size, sn;
@@ -1250,11 +1255,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1250 struct ct_sns_rsp *ct_rsp; 1255 struct ct_sns_rsp *ct_rsp;
1251 uint8_t *entries; 1256 uint8_t *entries;
1252 struct ct_fdmi_hba_attr *eiter; 1257 struct ct_fdmi_hba_attr *eiter;
1258 struct qla_hw_data *ha = vha->hw;
1253 1259
1254 /* Issue RHBA */ 1260 /* Issue RHBA */
1255 /* Prepare common MS IOCB */ 1261 /* Prepare common MS IOCB */
1256 /* Request size adjusted after CT preparation */ 1262 /* Request size adjusted after CT preparation */
1257 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE); 1263 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1258 1264
1259 /* Prepare CT request */ 1265 /* Prepare CT request */
1260 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD, 1266 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
@@ -1262,9 +1268,9 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1262 ct_rsp = &ha->ct_sns->p.rsp; 1268 ct_rsp = &ha->ct_sns->p.rsp;
1263 1269
1264 /* Prepare FDMI command arguments -- attribute block, attributes. */ 1270 /* Prepare FDMI command arguments -- attribute block, attributes. */
1265 memcpy(ct_req->req.rhba.hba_identifier, ha->port_name, WWN_SIZE); 1271 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1266 ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1); 1272 ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
1267 memcpy(ct_req->req.rhba.port_name, ha->port_name, WWN_SIZE); 1273 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1268 size = 2 * WWN_SIZE + 4 + 4; 1274 size = 2 * WWN_SIZE + 4 + 4;
1269 1275
1270 /* Attributes */ 1276 /* Attributes */
@@ -1276,11 +1282,11 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1276 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1282 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1277 eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME); 1283 eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
1278 eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE); 1284 eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
1279 memcpy(eiter->a.node_name, ha->node_name, WWN_SIZE); 1285 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1280 size += 4 + WWN_SIZE; 1286 size += 4 + WWN_SIZE;
1281 1287
1282 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n", 1288 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
1283 __func__, ha->host_no, 1289 __func__, vha->host_no,
1284 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2], 1290 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
1285 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5], 1291 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
1286 eiter->a.node_name[6], eiter->a.node_name[7])); 1292 eiter->a.node_name[6], eiter->a.node_name[7]));
@@ -1294,7 +1300,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1294 eiter->len = cpu_to_be16(4 + alen); 1300 eiter->len = cpu_to_be16(4 + alen);
1295 size += 4 + alen; 1301 size += 4 + alen;
1296 1302
1297 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, ha->host_no, 1303 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no,
1298 eiter->a.manufacturer)); 1304 eiter->a.manufacturer));
1299 1305
1300 /* Serial number. */ 1306 /* Serial number. */
@@ -1307,7 +1313,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1307 eiter->len = cpu_to_be16(4 + alen); 1313 eiter->len = cpu_to_be16(4 + alen);
1308 size += 4 + alen; 1314 size += 4 + alen;
1309 1315
1310 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, ha->host_no, 1316 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no,
1311 eiter->a.serial_num)); 1317 eiter->a.serial_num));
1312 1318
1313 /* Model name. */ 1319 /* Model name. */
@@ -1319,7 +1325,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1319 eiter->len = cpu_to_be16(4 + alen); 1325 eiter->len = cpu_to_be16(4 + alen);
1320 size += 4 + alen; 1326 size += 4 + alen;
1321 1327
1322 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, ha->host_no, 1328 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no,
1323 eiter->a.model)); 1329 eiter->a.model));
1324 1330
1325 /* Model description. */ 1331 /* Model description. */
@@ -1332,7 +1338,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1332 eiter->len = cpu_to_be16(4 + alen); 1338 eiter->len = cpu_to_be16(4 + alen);
1333 size += 4 + alen; 1339 size += 4 + alen;
1334 1340
1335 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, ha->host_no, 1341 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no,
1336 eiter->a.model_desc)); 1342 eiter->a.model_desc));
1337 1343
1338 /* Hardware version. */ 1344 /* Hardware version. */
@@ -1344,7 +1350,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1344 eiter->len = cpu_to_be16(4 + alen); 1350 eiter->len = cpu_to_be16(4 + alen);
1345 size += 4 + alen; 1351 size += 4 + alen;
1346 1352
1347 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, ha->host_no, 1353 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no,
1348 eiter->a.hw_version)); 1354 eiter->a.hw_version));
1349 1355
1350 /* Driver version. */ 1356 /* Driver version. */
@@ -1356,7 +1362,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1356 eiter->len = cpu_to_be16(4 + alen); 1362 eiter->len = cpu_to_be16(4 + alen);
1357 size += 4 + alen; 1363 size += 4 + alen;
1358 1364
1359 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, ha->host_no, 1365 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no,
1360 eiter->a.driver_version)); 1366 eiter->a.driver_version));
1361 1367
1362 /* Option ROM version. */ 1368 /* Option ROM version. */
@@ -1368,27 +1374,27 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1368 eiter->len = cpu_to_be16(4 + alen); 1374 eiter->len = cpu_to_be16(4 + alen);
1369 size += 4 + alen; 1375 size += 4 + alen;
1370 1376
1371 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, ha->host_no, 1377 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no,
1372 eiter->a.orom_version)); 1378 eiter->a.orom_version));
1373 1379
1374 /* Firmware version */ 1380 /* Firmware version */
1375 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1381 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1376 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); 1382 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1377 ha->isp_ops->fw_version_str(ha, eiter->a.fw_version); 1383 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version);
1378 alen = strlen(eiter->a.fw_version); 1384 alen = strlen(eiter->a.fw_version);
1379 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1385 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1380 eiter->len = cpu_to_be16(4 + alen); 1386 eiter->len = cpu_to_be16(4 + alen);
1381 size += 4 + alen; 1387 size += 4 + alen;
1382 1388
1383 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, ha->host_no, 1389 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no,
1384 eiter->a.fw_version)); 1390 eiter->a.fw_version));
1385 1391
1386 /* Update MS request size. */ 1392 /* Update MS request size. */
1387 qla2x00_update_ms_fdmi_iocb(ha, size + 16); 1393 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1388 1394
1389 DEBUG13(printk("%s(%ld): RHBA identifier=" 1395 DEBUG13(printk("%s(%ld): RHBA identifier="
1390 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1396 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
1391 ha->host_no, ct_req->req.rhba.hba_identifier[0], 1397 vha->host_no, ct_req->req.rhba.hba_identifier[0],
1392 ct_req->req.rhba.hba_identifier[1], 1398 ct_req->req.rhba.hba_identifier[1],
1393 ct_req->req.rhba.hba_identifier[2], 1399 ct_req->req.rhba.hba_identifier[2],
1394 ct_req->req.rhba.hba_identifier[3], 1400 ct_req->req.rhba.hba_identifier[3],
@@ -1399,25 +1405,25 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1399 DEBUG13(qla2x00_dump_buffer(entries, size)); 1405 DEBUG13(qla2x00_dump_buffer(entries, size));
1400 1406
1401 /* Execute MS IOCB */ 1407 /* Execute MS IOCB */
1402 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1408 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1403 sizeof(ms_iocb_entry_t)); 1409 sizeof(ms_iocb_entry_t));
1404 if (rval != QLA_SUCCESS) { 1410 if (rval != QLA_SUCCESS) {
1405 /*EMPTY*/ 1411 /*EMPTY*/
1406 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n", 1412 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
1407 ha->host_no, rval)); 1413 vha->host_no, rval));
1408 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RHBA") != 1414 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1409 QLA_SUCCESS) { 1415 QLA_SUCCESS) {
1410 rval = QLA_FUNCTION_FAILED; 1416 rval = QLA_FUNCTION_FAILED;
1411 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 1417 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1412 ct_rsp->header.explanation_code == 1418 ct_rsp->header.explanation_code ==
1413 CT_EXPL_ALREADY_REGISTERED) { 1419 CT_EXPL_ALREADY_REGISTERED) {
1414 DEBUG2_13(printk("%s(%ld): HBA already registered.\n", 1420 DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
1415 __func__, ha->host_no)); 1421 __func__, vha->host_no));
1416 rval = QLA_ALREADY_REGISTERED; 1422 rval = QLA_ALREADY_REGISTERED;
1417 } 1423 }
1418 } else { 1424 } else {
1419 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n", 1425 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
1420 ha->host_no)); 1426 vha->host_no));
1421 } 1427 }
1422 1428
1423 return rval; 1429 return rval;
@@ -1430,17 +1436,17 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1430 * Returns 0 on success. 1436 * Returns 0 on success.
1431 */ 1437 */
1432static int 1438static int
1433qla2x00_fdmi_dhba(scsi_qla_host_t *ha) 1439qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
1434{ 1440{
1435 int rval; 1441 int rval;
1436 1442 struct qla_hw_data *ha = vha->hw;
1437 ms_iocb_entry_t *ms_pkt; 1443 ms_iocb_entry_t *ms_pkt;
1438 struct ct_sns_req *ct_req; 1444 struct ct_sns_req *ct_req;
1439 struct ct_sns_rsp *ct_rsp; 1445 struct ct_sns_rsp *ct_rsp;
1440 1446
1441 /* Issue RPA */ 1447 /* Issue RPA */
1442 /* Prepare common MS IOCB */ 1448 /* Prepare common MS IOCB */
1443 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE, 1449 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
1444 DHBA_RSP_SIZE); 1450 DHBA_RSP_SIZE);
1445 1451
1446 /* Prepare CT request */ 1452 /* Prepare CT request */
@@ -1449,28 +1455,28 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
1449 ct_rsp = &ha->ct_sns->p.rsp; 1455 ct_rsp = &ha->ct_sns->p.rsp;
1450 1456
1451 /* Prepare FDMI command arguments -- portname. */ 1457 /* Prepare FDMI command arguments -- portname. */
1452 memcpy(ct_req->req.dhba.port_name, ha->port_name, WWN_SIZE); 1458 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
1453 1459
1454 DEBUG13(printk("%s(%ld): DHBA portname=" 1460 DEBUG13(printk("%s(%ld): DHBA portname="
1455 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, ha->host_no, 1461 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no,
1456 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1], 1462 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
1457 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3], 1463 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
1458 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5], 1464 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
1459 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7])); 1465 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
1460 1466
1461 /* Execute MS IOCB */ 1467 /* Execute MS IOCB */
1462 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1468 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1463 sizeof(ms_iocb_entry_t)); 1469 sizeof(ms_iocb_entry_t));
1464 if (rval != QLA_SUCCESS) { 1470 if (rval != QLA_SUCCESS) {
1465 /*EMPTY*/ 1471 /*EMPTY*/
1466 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n", 1472 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
1467 ha->host_no, rval)); 1473 vha->host_no, rval));
1468 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "DHBA") != 1474 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
1469 QLA_SUCCESS) { 1475 QLA_SUCCESS) {
1470 rval = QLA_FUNCTION_FAILED; 1476 rval = QLA_FUNCTION_FAILED;
1471 } else { 1477 } else {
1472 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n", 1478 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
1473 ha->host_no)); 1479 vha->host_no));
1474 } 1480 }
1475 1481
1476 return rval; 1482 return rval;
@@ -1483,11 +1489,11 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
1483 * Returns 0 on success. 1489 * Returns 0 on success.
1484 */ 1490 */
1485static int 1491static int
1486qla2x00_fdmi_rpa(scsi_qla_host_t *ha) 1492qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1487{ 1493{
1488 int rval, alen; 1494 int rval, alen;
1489 uint32_t size, max_frame_size; 1495 uint32_t size, max_frame_size;
1490 1496 struct qla_hw_data *ha = vha->hw;
1491 ms_iocb_entry_t *ms_pkt; 1497 ms_iocb_entry_t *ms_pkt;
1492 struct ct_sns_req *ct_req; 1498 struct ct_sns_req *ct_req;
1493 struct ct_sns_rsp *ct_rsp; 1499 struct ct_sns_rsp *ct_rsp;
@@ -1498,7 +1504,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1498 /* Issue RPA */ 1504 /* Issue RPA */
1499 /* Prepare common MS IOCB */ 1505 /* Prepare common MS IOCB */
1500 /* Request size adjusted after CT preparation */ 1506 /* Request size adjusted after CT preparation */
1501 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE); 1507 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1502 1508
1503 /* Prepare CT request */ 1509 /* Prepare CT request */
1504 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD, 1510 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
@@ -1506,7 +1512,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1506 ct_rsp = &ha->ct_sns->p.rsp; 1512 ct_rsp = &ha->ct_sns->p.rsp;
1507 1513
1508 /* Prepare FDMI command arguments -- attribute block, attributes. */ 1514 /* Prepare FDMI command arguments -- attribute block, attributes. */
1509 memcpy(ct_req->req.rpa.port_name, ha->port_name, WWN_SIZE); 1515 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1510 size = WWN_SIZE + 4; 1516 size = WWN_SIZE + 4;
1511 1517
1512 /* Attributes */ 1518 /* Attributes */
@@ -1521,8 +1527,9 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1521 eiter->a.fc4_types[2] = 0x01; 1527 eiter->a.fc4_types[2] = 0x01;
1522 size += 4 + 32; 1528 size += 4 + 32;
1523 1529
1524 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, ha->host_no, 1530 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__,
1525 eiter->a.fc4_types[2], eiter->a.fc4_types[1])); 1531 vha->host_no, eiter->a.fc4_types[2],
1532 eiter->a.fc4_types[1]));
1526 1533
1527 /* Supported speed. */ 1534 /* Supported speed. */
1528 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1535 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1544,7 +1551,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1544 FDMI_PORT_SPEED_1GB); 1551 FDMI_PORT_SPEED_1GB);
1545 size += 4 + 4; 1552 size += 4 + 4;
1546 1553
1547 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no, 1554 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no,
1548 eiter->a.sup_speed)); 1555 eiter->a.sup_speed));
1549 1556
1550 /* Current speed. */ 1557 /* Current speed. */
@@ -1575,7 +1582,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1575 } 1582 }
1576 size += 4 + 4; 1583 size += 4 + 4;
1577 1584
1578 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, ha->host_no, 1585 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no,
1579 eiter->a.cur_speed)); 1586 eiter->a.cur_speed));
1580 1587
1581 /* Max frame size. */ 1588 /* Max frame size. */
@@ -1588,7 +1595,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1588 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 1595 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
1589 size += 4 + 4; 1596 size += 4 + 4;
1590 1597
1591 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, ha->host_no, 1598 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no,
1592 eiter->a.max_frame_size)); 1599 eiter->a.max_frame_size));
1593 1600
1594 /* OS device name. */ 1601 /* OS device name. */
@@ -1600,32 +1607,32 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1600 eiter->len = cpu_to_be16(4 + alen); 1607 eiter->len = cpu_to_be16(4 + alen);
1601 size += 4 + alen; 1608 size += 4 + alen;
1602 1609
1603 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no, 1610 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no,
1604 eiter->a.os_dev_name)); 1611 eiter->a.os_dev_name));
1605 1612
1606 /* Hostname. */ 1613 /* Hostname. */
1607 if (strlen(fc_host_system_hostname(ha->host))) { 1614 if (strlen(fc_host_system_hostname(vha->host))) {
1608 ct_req->req.rpa.attrs.count = 1615 ct_req->req.rpa.attrs.count =
1609 __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT); 1616 __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1610 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1617 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1611 eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME); 1618 eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME);
1612 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name), 1619 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1613 "%s", fc_host_system_hostname(ha->host)); 1620 "%s", fc_host_system_hostname(vha->host));
1614 alen = strlen(eiter->a.host_name); 1621 alen = strlen(eiter->a.host_name);
1615 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1622 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1616 eiter->len = cpu_to_be16(4 + alen); 1623 eiter->len = cpu_to_be16(4 + alen);
1617 size += 4 + alen; 1624 size += 4 + alen;
1618 1625
1619 DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__, 1626 DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__,
1620 ha->host_no, eiter->a.host_name)); 1627 vha->host_no, eiter->a.host_name));
1621 } 1628 }
1622 1629
1623 /* Update MS request size. */ 1630 /* Update MS request size. */
1624 qla2x00_update_ms_fdmi_iocb(ha, size + 16); 1631 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1625 1632
1626 DEBUG13(printk("%s(%ld): RPA portname=" 1633 DEBUG13(printk("%s(%ld): RPA portname="
1627 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1634 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
1628 ha->host_no, ct_req->req.rpa.port_name[0], 1635 vha->host_no, ct_req->req.rpa.port_name[0],
1629 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2], 1636 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
1630 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4], 1637 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
1631 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6], 1638 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
@@ -1633,18 +1640,18 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1633 DEBUG13(qla2x00_dump_buffer(entries, size)); 1640 DEBUG13(qla2x00_dump_buffer(entries, size));
1634 1641
1635 /* Execute MS IOCB */ 1642 /* Execute MS IOCB */
1636 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1643 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1637 sizeof(ms_iocb_entry_t)); 1644 sizeof(ms_iocb_entry_t));
1638 if (rval != QLA_SUCCESS) { 1645 if (rval != QLA_SUCCESS) {
1639 /*EMPTY*/ 1646 /*EMPTY*/
1640 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n", 1647 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
1641 ha->host_no, rval)); 1648 vha->host_no, rval));
1642 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RPA") != 1649 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1643 QLA_SUCCESS) { 1650 QLA_SUCCESS) {
1644 rval = QLA_FUNCTION_FAILED; 1651 rval = QLA_FUNCTION_FAILED;
1645 } else { 1652 } else {
1646 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n", 1653 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
1647 ha->host_no)); 1654 vha->host_no));
1648 } 1655 }
1649 1656
1650 return rval; 1657 return rval;
@@ -1657,34 +1664,28 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1657 * Returns 0 on success. 1664 * Returns 0 on success.
1658 */ 1665 */
1659int 1666int
1660qla2x00_fdmi_register(scsi_qla_host_t *ha) 1667qla2x00_fdmi_register(scsi_qla_host_t *vha)
1661{ 1668{
1662 int rval; 1669 int rval;
1663 1670
1664 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 1671 rval = qla2x00_mgmt_svr_login(vha);
1665 DEBUG2(printk("scsi(%ld): FDMI unsupported on "
1666 "ISP2100/ISP2200.\n", ha->host_no));
1667 return QLA_SUCCESS;
1668 }
1669
1670 rval = qla2x00_mgmt_svr_login(ha);
1671 if (rval) 1672 if (rval)
1672 return rval; 1673 return rval;
1673 1674
1674 rval = qla2x00_fdmi_rhba(ha); 1675 rval = qla2x00_fdmi_rhba(vha);
1675 if (rval) { 1676 if (rval) {
1676 if (rval != QLA_ALREADY_REGISTERED) 1677 if (rval != QLA_ALREADY_REGISTERED)
1677 return rval; 1678 return rval;
1678 1679
1679 rval = qla2x00_fdmi_dhba(ha); 1680 rval = qla2x00_fdmi_dhba(vha);
1680 if (rval) 1681 if (rval)
1681 return rval; 1682 return rval;
1682 1683
1683 rval = qla2x00_fdmi_rhba(ha); 1684 rval = qla2x00_fdmi_rhba(vha);
1684 if (rval) 1685 if (rval)
1685 return rval; 1686 return rval;
1686 } 1687 }
1687 rval = qla2x00_fdmi_rpa(ha); 1688 rval = qla2x00_fdmi_rpa(vha);
1688 1689
1689 return rval; 1690 return rval;
1690} 1691}
@@ -1697,11 +1698,11 @@ qla2x00_fdmi_register(scsi_qla_host_t *ha)
1697 * Returns 0 on success. 1698 * Returns 0 on success.
1698 */ 1699 */
1699int 1700int
1700qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list) 1701qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1701{ 1702{
1702 int rval; 1703 int rval;
1703 uint16_t i; 1704 uint16_t i;
1704 1705 struct qla_hw_data *ha = vha->hw;
1705 ms_iocb_entry_t *ms_pkt; 1706 ms_iocb_entry_t *ms_pkt;
1706 struct ct_sns_req *ct_req; 1707 struct ct_sns_req *ct_req;
1707 struct ct_sns_rsp *ct_rsp; 1708 struct ct_sns_rsp *ct_rsp;
@@ -1712,7 +1713,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1712 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 1713 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
1713 /* Issue GFPN_ID */ 1714 /* Issue GFPN_ID */
1714 /* Prepare common MS IOCB */ 1715 /* Prepare common MS IOCB */
1715 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GFPN_ID_REQ_SIZE, 1716 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
1716 GFPN_ID_RSP_SIZE); 1717 GFPN_ID_RSP_SIZE);
1717 1718
1718 /* Prepare CT request */ 1719 /* Prepare CT request */
@@ -1726,13 +1727,13 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1726 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; 1727 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
1727 1728
1728 /* Execute MS IOCB */ 1729 /* Execute MS IOCB */
1729 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1730 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1730 sizeof(ms_iocb_entry_t)); 1731 sizeof(ms_iocb_entry_t));
1731 if (rval != QLA_SUCCESS) { 1732 if (rval != QLA_SUCCESS) {
1732 /*EMPTY*/ 1733 /*EMPTY*/
1733 DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB " 1734 DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB "
1734 "failed (%d).\n", ha->host_no, rval)); 1735 "failed (%d).\n", vha->host_no, rval));
1735 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 1736 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1736 "GFPN_ID") != QLA_SUCCESS) { 1737 "GFPN_ID") != QLA_SUCCESS) {
1737 rval = QLA_FUNCTION_FAILED; 1738 rval = QLA_FUNCTION_FAILED;
1738 } else { 1739 } else {
@@ -1750,17 +1751,17 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1750} 1751}
1751 1752
1752static inline void * 1753static inline void *
1753qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size, 1754qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1754 uint32_t rsp_size) 1755 uint32_t rsp_size)
1755{ 1756{
1756 struct ct_entry_24xx *ct_pkt; 1757 struct ct_entry_24xx *ct_pkt;
1757 1758 struct qla_hw_data *ha = vha->hw;
1758 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1759 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1759 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 1760 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1760 1761
1761 ct_pkt->entry_type = CT_IOCB_TYPE; 1762 ct_pkt->entry_type = CT_IOCB_TYPE;
1762 ct_pkt->entry_count = 1; 1763 ct_pkt->entry_count = 1;
1763 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); 1764 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1764 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1765 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1765 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1766 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1766 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 1767 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
@@ -1774,7 +1775,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1774 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1775 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1775 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1776 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1776 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 1777 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1777 ct_pkt->vp_index = ha->vp_idx; 1778 ct_pkt->vp_index = vha->vp_idx;
1778 1779
1779 return ct_pkt; 1780 return ct_pkt;
1780} 1781}
@@ -1803,11 +1804,11 @@ qla24xx_prep_ct_fm_req(struct ct_sns_req *ct_req, uint16_t cmd,
1803 * Returns 0 on success. 1804 * Returns 0 on success.
1804 */ 1805 */
1805int 1806int
1806qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list) 1807qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1807{ 1808{
1808 int rval; 1809 int rval;
1809 uint16_t i; 1810 uint16_t i;
1810 1811 struct qla_hw_data *ha = vha->hw;
1811 ms_iocb_entry_t *ms_pkt; 1812 ms_iocb_entry_t *ms_pkt;
1812 struct ct_sns_req *ct_req; 1813 struct ct_sns_req *ct_req;
1813 struct ct_sns_rsp *ct_rsp; 1814 struct ct_sns_rsp *ct_rsp;
@@ -1817,14 +1818,14 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1817 if (!ha->flags.gpsc_supported) 1818 if (!ha->flags.gpsc_supported)
1818 return QLA_FUNCTION_FAILED; 1819 return QLA_FUNCTION_FAILED;
1819 1820
1820 rval = qla2x00_mgmt_svr_login(ha); 1821 rval = qla2x00_mgmt_svr_login(vha);
1821 if (rval) 1822 if (rval)
1822 return rval; 1823 return rval;
1823 1824
1824 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 1825 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
1825 /* Issue GFPN_ID */ 1826 /* Issue GFPN_ID */
1826 /* Prepare common MS IOCB */ 1827 /* Prepare common MS IOCB */
1827 ms_pkt = qla24xx_prep_ms_fm_iocb(ha, GPSC_REQ_SIZE, 1828 ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
1828 GPSC_RSP_SIZE); 1829 GPSC_RSP_SIZE);
1829 1830
1830 /* Prepare CT request */ 1831 /* Prepare CT request */
@@ -1837,13 +1838,13 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1837 WWN_SIZE); 1838 WWN_SIZE);
1838 1839
1839 /* Execute MS IOCB */ 1840 /* Execute MS IOCB */
1840 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1841 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1841 sizeof(ms_iocb_entry_t)); 1842 sizeof(ms_iocb_entry_t));
1842 if (rval != QLA_SUCCESS) { 1843 if (rval != QLA_SUCCESS) {
1843 /*EMPTY*/ 1844 /*EMPTY*/
1844 DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB " 1845 DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB "
1845 "failed (%d).\n", ha->host_no, rval)); 1846 "failed (%d).\n", vha->host_no, rval));
1846 } else if ((rval = qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 1847 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1847 "GPSC")) != QLA_SUCCESS) { 1848 "GPSC")) != QLA_SUCCESS) {
1848 /* FM command unsupported? */ 1849 /* FM command unsupported? */
1849 if (rval == QLA_INVALID_COMMAND && 1850 if (rval == QLA_INVALID_COMMAND &&
@@ -1853,7 +1854,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1853 CT_REASON_COMMAND_UNSUPPORTED)) { 1854 CT_REASON_COMMAND_UNSUPPORTED)) {
1854 DEBUG2(printk("scsi(%ld): GPSC command " 1855 DEBUG2(printk("scsi(%ld): GPSC command "
1855 "unsupported, disabling query...\n", 1856 "unsupported, disabling query...\n",
1856 ha->host_no)); 1857 vha->host_no));
1857 ha->flags.gpsc_supported = 0; 1858 ha->flags.gpsc_supported = 0;
1858 rval = QLA_FUNCTION_FAILED; 1859 rval = QLA_FUNCTION_FAILED;
1859 break; 1860 break;
@@ -1878,7 +1879,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1878 1879
1879 DEBUG2_3(printk("scsi(%ld): GPSC ext entry - " 1880 DEBUG2_3(printk("scsi(%ld): GPSC ext entry - "
1880 "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " 1881 "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
1881 "speed=%04x.\n", ha->host_no, 1882 "speed=%04x.\n", vha->host_no,
1882 list[i].fabric_port_name[0], 1883 list[i].fabric_port_name[0],
1883 list[i].fabric_port_name[1], 1884 list[i].fabric_port_name[1],
1884 list[i].fabric_port_name[2], 1885 list[i].fabric_port_name[2],
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4218f20f5ed5..52ed56ecf195 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_gbl.h"
8 9
9#include <linux/delay.h> 10#include <linux/delay.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
@@ -21,7 +22,6 @@
21static int qla2x00_isp_firmware(scsi_qla_host_t *); 22static int qla2x00_isp_firmware(scsi_qla_host_t *);
22static void qla2x00_resize_request_q(scsi_qla_host_t *); 23static void qla2x00_resize_request_q(scsi_qla_host_t *);
23static int qla2x00_setup_chip(scsi_qla_host_t *); 24static int qla2x00_setup_chip(scsi_qla_host_t *);
24static void qla2x00_init_response_q_entries(scsi_qla_host_t *);
25static int qla2x00_init_rings(scsi_qla_host_t *); 25static int qla2x00_init_rings(scsi_qla_host_t *);
26static int qla2x00_fw_ready(scsi_qla_host_t *); 26static int qla2x00_fw_ready(scsi_qla_host_t *);
27static int qla2x00_configure_hba(scsi_qla_host_t *); 27static int qla2x00_configure_hba(scsi_qla_host_t *);
@@ -35,10 +35,11 @@ static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
35 35
36static int qla2x00_restart_isp(scsi_qla_host_t *); 36static int qla2x00_restart_isp(scsi_qla_host_t *);
37 37
38static int qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev); 38static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
39 39
40static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 40static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
41static int qla84xx_init_chip(scsi_qla_host_t *); 41static int qla84xx_init_chip(scsi_qla_host_t *);
42static int qla25xx_init_queues(struct qla_hw_data *);
42 43
43/****************************************************************************/ 44/****************************************************************************/
44/* QLogic ISP2x00 Hardware Support Functions. */ 45/* QLogic ISP2x00 Hardware Support Functions. */
@@ -55,77 +56,81 @@ static int qla84xx_init_chip(scsi_qla_host_t *);
55* 0 = success 56* 0 = success
56*/ 57*/
57int 58int
58qla2x00_initialize_adapter(scsi_qla_host_t *ha) 59qla2x00_initialize_adapter(scsi_qla_host_t *vha)
59{ 60{
60 int rval; 61 int rval;
61 62 struct qla_hw_data *ha = vha->hw;
63 struct req_que *req = ha->req_q_map[0];
62 /* Clear adapter flags. */ 64 /* Clear adapter flags. */
63 ha->flags.online = 0; 65 vha->flags.online = 0;
64 ha->flags.reset_active = 0; 66 vha->flags.reset_active = 0;
65 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 67 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
66 atomic_set(&ha->loop_state, LOOP_DOWN); 68 atomic_set(&vha->loop_state, LOOP_DOWN);
67 ha->device_flags = DFLG_NO_CABLE; 69 vha->device_flags = DFLG_NO_CABLE;
68 ha->dpc_flags = 0; 70 vha->dpc_flags = 0;
69 ha->flags.management_server_logged_in = 0; 71 vha->flags.management_server_logged_in = 0;
70 ha->marker_needed = 0; 72 vha->marker_needed = 0;
71 ha->mbx_flags = 0; 73 ha->mbx_flags = 0;
72 ha->isp_abort_cnt = 0; 74 ha->isp_abort_cnt = 0;
73 ha->beacon_blink_led = 0; 75 ha->beacon_blink_led = 0;
74 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 76 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
77
78 set_bit(0, ha->req_qid_map);
79 set_bit(0, ha->rsp_qid_map);
75 80
76 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
77 rval = ha->isp_ops->pci_config(ha); 82 rval = ha->isp_ops->pci_config(vha);
78 if (rval) { 83 if (rval) {
79 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 84 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
80 ha->host_no)); 85 vha->host_no));
81 return (rval); 86 return (rval);
82 } 87 }
83 88
84 ha->isp_ops->reset_chip(ha); 89 ha->isp_ops->reset_chip(vha);
85 90
86 rval = qla2xxx_get_flash_info(ha); 91 rval = qla2xxx_get_flash_info(vha);
87 if (rval) { 92 if (rval) {
88 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", 93 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
89 ha->host_no)); 94 vha->host_no));
90 return (rval); 95 return (rval);
91 } 96 }
92 97
93 ha->isp_ops->get_flash_version(ha, ha->request_ring); 98 ha->isp_ops->get_flash_version(vha, req->ring);
94 99
95 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 100 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
96 101
97 ha->isp_ops->nvram_config(ha); 102 ha->isp_ops->nvram_config(vha);
98 103
99 if (ha->flags.disable_serdes) { 104 if (ha->flags.disable_serdes) {
100 /* Mask HBA via NVRAM settings? */ 105 /* Mask HBA via NVRAM settings? */
101 qla_printk(KERN_INFO, ha, "Masking HBA WWPN " 106 qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
102 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 107 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
103 ha->port_name[0], ha->port_name[1], 108 vha->port_name[0], vha->port_name[1],
104 ha->port_name[2], ha->port_name[3], 109 vha->port_name[2], vha->port_name[3],
105 ha->port_name[4], ha->port_name[5], 110 vha->port_name[4], vha->port_name[5],
106 ha->port_name[6], ha->port_name[7]); 111 vha->port_name[6], vha->port_name[7]);
107 return QLA_FUNCTION_FAILED; 112 return QLA_FUNCTION_FAILED;
108 } 113 }
109 114
110 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 115 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
111 116
112 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) { 117 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
113 rval = ha->isp_ops->chip_diag(ha); 118 rval = ha->isp_ops->chip_diag(vha);
114 if (rval) 119 if (rval)
115 return (rval); 120 return (rval);
116 rval = qla2x00_setup_chip(ha); 121 rval = qla2x00_setup_chip(vha);
117 if (rval) 122 if (rval)
118 return (rval); 123 return (rval);
119 } 124 }
120 if (IS_QLA84XX(ha)) { 125 if (IS_QLA84XX(ha)) {
121 ha->cs84xx = qla84xx_get_chip(ha); 126 ha->cs84xx = qla84xx_get_chip(vha);
122 if (!ha->cs84xx) { 127 if (!ha->cs84xx) {
123 qla_printk(KERN_ERR, ha, 128 qla_printk(KERN_ERR, ha,
124 "Unable to configure ISP84XX.\n"); 129 "Unable to configure ISP84XX.\n");
125 return QLA_FUNCTION_FAILED; 130 return QLA_FUNCTION_FAILED;
126 } 131 }
127 } 132 }
128 rval = qla2x00_init_rings(ha); 133 rval = qla2x00_init_rings(vha);
129 134
130 return (rval); 135 return (rval);
131} 136}
@@ -137,10 +142,11 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
137 * Returns 0 on success. 142 * Returns 0 on success.
138 */ 143 */
139int 144int
140qla2100_pci_config(scsi_qla_host_t *ha) 145qla2100_pci_config(scsi_qla_host_t *vha)
141{ 146{
142 uint16_t w; 147 uint16_t w;
143 unsigned long flags; 148 unsigned long flags;
149 struct qla_hw_data *ha = vha->hw;
144 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 150 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
145 151
146 pci_set_master(ha->pdev); 152 pci_set_master(ha->pdev);
@@ -167,11 +173,12 @@ qla2100_pci_config(scsi_qla_host_t *ha)
167 * Returns 0 on success. 173 * Returns 0 on success.
168 */ 174 */
169int 175int
170qla2300_pci_config(scsi_qla_host_t *ha) 176qla2300_pci_config(scsi_qla_host_t *vha)
171{ 177{
172 uint16_t w; 178 uint16_t w;
173 unsigned long flags = 0; 179 unsigned long flags = 0;
174 uint32_t cnt; 180 uint32_t cnt;
181 struct qla_hw_data *ha = vha->hw;
175 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 182 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
176 183
177 pci_set_master(ha->pdev); 184 pci_set_master(ha->pdev);
@@ -248,10 +255,11 @@ qla2300_pci_config(scsi_qla_host_t *ha)
248 * Returns 0 on success. 255 * Returns 0 on success.
249 */ 256 */
250int 257int
251qla24xx_pci_config(scsi_qla_host_t *ha) 258qla24xx_pci_config(scsi_qla_host_t *vha)
252{ 259{
253 uint16_t w; 260 uint16_t w;
254 unsigned long flags = 0; 261 unsigned long flags = 0;
262 struct qla_hw_data *ha = vha->hw;
255 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 263 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
256 264
257 pci_set_master(ha->pdev); 265 pci_set_master(ha->pdev);
@@ -291,9 +299,10 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
291 * Returns 0 on success. 299 * Returns 0 on success.
292 */ 300 */
293int 301int
294qla25xx_pci_config(scsi_qla_host_t *ha) 302qla25xx_pci_config(scsi_qla_host_t *vha)
295{ 303{
296 uint16_t w; 304 uint16_t w;
305 struct qla_hw_data *ha = vha->hw;
297 306
298 pci_set_master(ha->pdev); 307 pci_set_master(ha->pdev);
299 pci_try_set_mwi(ha->pdev); 308 pci_try_set_mwi(ha->pdev);
@@ -321,32 +330,33 @@ qla25xx_pci_config(scsi_qla_host_t *ha)
321 * Returns 0 on success. 330 * Returns 0 on success.
322 */ 331 */
323static int 332static int
324qla2x00_isp_firmware(scsi_qla_host_t *ha) 333qla2x00_isp_firmware(scsi_qla_host_t *vha)
325{ 334{
326 int rval; 335 int rval;
327 uint16_t loop_id, topo, sw_cap; 336 uint16_t loop_id, topo, sw_cap;
328 uint8_t domain, area, al_pa; 337 uint8_t domain, area, al_pa;
338 struct qla_hw_data *ha = vha->hw;
329 339
330 /* Assume loading risc code */ 340 /* Assume loading risc code */
331 rval = QLA_FUNCTION_FAILED; 341 rval = QLA_FUNCTION_FAILED;
332 342
333 if (ha->flags.disable_risc_code_load) { 343 if (ha->flags.disable_risc_code_load) {
334 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", 344 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
335 ha->host_no)); 345 vha->host_no));
336 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n"); 346 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
337 347
338 /* Verify checksum of loaded RISC code. */ 348 /* Verify checksum of loaded RISC code. */
339 rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address); 349 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
340 if (rval == QLA_SUCCESS) { 350 if (rval == QLA_SUCCESS) {
341 /* And, verify we are not in ROM code. */ 351 /* And, verify we are not in ROM code. */
342 rval = qla2x00_get_adapter_id(ha, &loop_id, &al_pa, 352 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
343 &area, &domain, &topo, &sw_cap); 353 &area, &domain, &topo, &sw_cap);
344 } 354 }
345 } 355 }
346 356
347 if (rval) { 357 if (rval) {
348 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", 358 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
349 ha->host_no)); 359 vha->host_no));
350 } 360 }
351 361
352 return (rval); 362 return (rval);
@@ -359,9 +369,10 @@ qla2x00_isp_firmware(scsi_qla_host_t *ha)
359 * Returns 0 on success. 369 * Returns 0 on success.
360 */ 370 */
361void 371void
362qla2x00_reset_chip(scsi_qla_host_t *ha) 372qla2x00_reset_chip(scsi_qla_host_t *vha)
363{ 373{
364 unsigned long flags = 0; 374 unsigned long flags = 0;
375 struct qla_hw_data *ha = vha->hw;
365 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 376 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
366 uint32_t cnt; 377 uint32_t cnt;
367 uint16_t cmd; 378 uint16_t cmd;
@@ -499,10 +510,11 @@ qla2x00_reset_chip(scsi_qla_host_t *ha)
499 * Returns 0 on success. 510 * Returns 0 on success.
500 */ 511 */
501static inline void 512static inline void
502qla24xx_reset_risc(scsi_qla_host_t *ha) 513qla24xx_reset_risc(scsi_qla_host_t *vha)
503{ 514{
504 int hw_evt = 0; 515 int hw_evt = 0;
505 unsigned long flags = 0; 516 unsigned long flags = 0;
517 struct qla_hw_data *ha = vha->hw;
506 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 518 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
507 uint32_t cnt, d2; 519 uint32_t cnt, d2;
508 uint16_t wd; 520 uint16_t wd;
@@ -541,7 +553,7 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
541 barrier(); 553 barrier();
542 } 554 }
543 if (cnt == 0 || hw_evt) 555 if (cnt == 0 || hw_evt)
544 qla2xxx_hw_event_log(ha, HW_EVENT_RESET_ERR, 556 qla2xxx_hw_event_log(vha, HW_EVENT_RESET_ERR,
545 RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2), 557 RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2),
546 RD_REG_WORD(&reg->mailbox3)); 558 RD_REG_WORD(&reg->mailbox3));
547 559
@@ -571,12 +583,13 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
571 * Returns 0 on success. 583 * Returns 0 on success.
572 */ 584 */
573void 585void
574qla24xx_reset_chip(scsi_qla_host_t *ha) 586qla24xx_reset_chip(scsi_qla_host_t *vha)
575{ 587{
588 struct qla_hw_data *ha = vha->hw;
576 ha->isp_ops->disable_intrs(ha); 589 ha->isp_ops->disable_intrs(ha);
577 590
578 /* Perform RISC reset. */ 591 /* Perform RISC reset. */
579 qla24xx_reset_risc(ha); 592 qla24xx_reset_risc(vha);
580} 593}
581 594
582/** 595/**
@@ -586,20 +599,22 @@ qla24xx_reset_chip(scsi_qla_host_t *ha)
586 * Returns 0 on success. 599 * Returns 0 on success.
587 */ 600 */
588int 601int
589qla2x00_chip_diag(scsi_qla_host_t *ha) 602qla2x00_chip_diag(scsi_qla_host_t *vha)
590{ 603{
591 int rval; 604 int rval;
605 struct qla_hw_data *ha = vha->hw;
592 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 606 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
593 unsigned long flags = 0; 607 unsigned long flags = 0;
594 uint16_t data; 608 uint16_t data;
595 uint32_t cnt; 609 uint32_t cnt;
596 uint16_t mb[5]; 610 uint16_t mb[5];
611 struct req_que *req = ha->req_q_map[0];
597 612
598 /* Assume a failed state */ 613 /* Assume a failed state */
599 rval = QLA_FUNCTION_FAILED; 614 rval = QLA_FUNCTION_FAILED;
600 615
601 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", 616 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
602 ha->host_no, (u_long)&reg->flash_address)); 617 vha->host_no, (u_long)&reg->flash_address));
603 618
604 spin_lock_irqsave(&ha->hardware_lock, flags); 619 spin_lock_irqsave(&ha->hardware_lock, flags);
605 620
@@ -662,17 +677,17 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
662 ha->product_id[3] = mb[4]; 677 ha->product_id[3] = mb[4];
663 678
664 /* Adjust fw RISC transfer size */ 679 /* Adjust fw RISC transfer size */
665 if (ha->request_q_length > 1024) 680 if (req->length > 1024)
666 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 681 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
667 else 682 else
668 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 683 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
669 ha->request_q_length; 684 req->length;
670 685
671 if (IS_QLA2200(ha) && 686 if (IS_QLA2200(ha) &&
672 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 687 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
673 /* Limit firmware transfer size with a 2200A */ 688 /* Limit firmware transfer size with a 2200A */
674 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", 689 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
675 ha->host_no)); 690 vha->host_no));
676 691
677 ha->device_type |= DT_ISP2200A; 692 ha->device_type |= DT_ISP2200A;
678 ha->fw_transfer_size = 128; 693 ha->fw_transfer_size = 128;
@@ -681,11 +696,11 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
681 /* Wrap Incoming Mailboxes Test. */ 696 /* Wrap Incoming Mailboxes Test. */
682 spin_unlock_irqrestore(&ha->hardware_lock, flags); 697 spin_unlock_irqrestore(&ha->hardware_lock, flags);
683 698
684 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", ha->host_no)); 699 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
685 rval = qla2x00_mbx_reg_test(ha); 700 rval = qla2x00_mbx_reg_test(vha);
686 if (rval) { 701 if (rval) {
687 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 702 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
688 ha->host_no)); 703 vha->host_no));
689 qla_printk(KERN_WARNING, ha, 704 qla_printk(KERN_WARNING, ha,
690 "Failed mailbox send register test\n"); 705 "Failed mailbox send register test\n");
691 } 706 }
@@ -698,7 +713,7 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
698chip_diag_failed: 713chip_diag_failed:
699 if (rval) 714 if (rval)
700 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " 715 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
701 "****\n", ha->host_no)); 716 "****\n", vha->host_no));
702 717
703 spin_unlock_irqrestore(&ha->hardware_lock, flags); 718 spin_unlock_irqrestore(&ha->hardware_lock, flags);
704 719
@@ -712,19 +727,21 @@ chip_diag_failed:
712 * Returns 0 on success. 727 * Returns 0 on success.
713 */ 728 */
714int 729int
715qla24xx_chip_diag(scsi_qla_host_t *ha) 730qla24xx_chip_diag(scsi_qla_host_t *vha)
716{ 731{
717 int rval; 732 int rval;
733 struct qla_hw_data *ha = vha->hw;
734 struct req_que *req = ha->req_q_map[0];
718 735
719 /* Perform RISC reset. */ 736 /* Perform RISC reset. */
720 qla24xx_reset_risc(ha); 737 qla24xx_reset_risc(vha);
721 738
722 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->request_q_length; 739 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
723 740
724 rval = qla2x00_mbx_reg_test(ha); 741 rval = qla2x00_mbx_reg_test(vha);
725 if (rval) { 742 if (rval) {
726 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 743 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
727 ha->host_no)); 744 vha->host_no));
728 qla_printk(KERN_WARNING, ha, 745 qla_printk(KERN_WARNING, ha,
729 "Failed mailbox send register test\n"); 746 "Failed mailbox send register test\n");
730 } else { 747 } else {
@@ -736,13 +753,16 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
736} 753}
737 754
738void 755void
739qla2x00_alloc_fw_dump(scsi_qla_host_t *ha) 756qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
740{ 757{
741 int rval; 758 int rval;
742 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 759 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
743 eft_size, fce_size; 760 eft_size, fce_size, mq_size;
744 dma_addr_t tc_dma; 761 dma_addr_t tc_dma;
745 void *tc; 762 void *tc;
763 struct qla_hw_data *ha = vha->hw;
764 struct req_que *req = ha->req_q_map[0];
765 struct rsp_que *rsp = ha->rsp_q_map[0];
746 766
747 if (ha->fw_dump) { 767 if (ha->fw_dump) {
748 qla_printk(KERN_WARNING, ha, 768 qla_printk(KERN_WARNING, ha,
@@ -751,7 +771,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
751 } 771 }
752 772
753 ha->fw_dumped = 0; 773 ha->fw_dumped = 0;
754 fixed_size = mem_size = eft_size = fce_size = 0; 774 fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
755 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 775 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
756 fixed_size = sizeof(struct qla2100_fw_dump); 776 fixed_size = sizeof(struct qla2100_fw_dump);
757 } else if (IS_QLA23XX(ha)) { 777 } else if (IS_QLA23XX(ha)) {
@@ -760,10 +780,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
760 sizeof(uint16_t); 780 sizeof(uint16_t);
761 } else if (IS_FWI2_CAPABLE(ha)) { 781 } else if (IS_FWI2_CAPABLE(ha)) {
762 fixed_size = IS_QLA25XX(ha) ? 782 fixed_size = IS_QLA25XX(ha) ?
763 offsetof(struct qla25xx_fw_dump, ext_mem): 783 offsetof(struct qla25xx_fw_dump, ext_mem) :
764 offsetof(struct qla24xx_fw_dump, ext_mem); 784 offsetof(struct qla24xx_fw_dump, ext_mem);
765 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 785 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
766 sizeof(uint32_t); 786 sizeof(uint32_t);
787 if (ha->mqenable)
788 mq_size = sizeof(struct qla2xxx_mq_chain);
767 789
768 /* Allocate memory for Fibre Channel Event Buffer. */ 790 /* Allocate memory for Fibre Channel Event Buffer. */
769 if (!IS_QLA25XX(ha)) 791 if (!IS_QLA25XX(ha))
@@ -778,7 +800,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
778 } 800 }
779 801
780 memset(tc, 0, FCE_SIZE); 802 memset(tc, 0, FCE_SIZE);
781 rval = qla2x00_enable_fce_trace(ha, tc_dma, FCE_NUM_BUFFERS, 803 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
782 ha->fce_mb, &ha->fce_bufs); 804 ha->fce_mb, &ha->fce_bufs);
783 if (rval) { 805 if (rval) {
784 qla_printk(KERN_WARNING, ha, "Unable to initialize " 806 qla_printk(KERN_WARNING, ha, "Unable to initialize "
@@ -807,7 +829,7 @@ try_eft:
807 } 829 }
808 830
809 memset(tc, 0, EFT_SIZE); 831 memset(tc, 0, EFT_SIZE);
810 rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS); 832 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
811 if (rval) { 833 if (rval) {
812 qla_printk(KERN_WARNING, ha, "Unable to initialize " 834 qla_printk(KERN_WARNING, ha, "Unable to initialize "
813 "EFT (%d).\n", rval); 835 "EFT (%d).\n", rval);
@@ -824,12 +846,12 @@ try_eft:
824 ha->eft = tc; 846 ha->eft = tc;
825 } 847 }
826cont_alloc: 848cont_alloc:
827 req_q_size = ha->request_q_length * sizeof(request_t); 849 req_q_size = req->length * sizeof(request_t);
828 rsp_q_size = ha->response_q_length * sizeof(response_t); 850 rsp_q_size = rsp->length * sizeof(response_t);
829 851
830 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 852 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
831 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
832 eft_size + fce_size; 854 mq_size + eft_size + fce_size;
833 855
834 ha->fw_dump = vmalloc(dump_size); 856 ha->fw_dump = vmalloc(dump_size);
835 if (!ha->fw_dump) { 857 if (!ha->fw_dump) {
@@ -844,7 +866,6 @@ cont_alloc:
844 } 866 }
845 return; 867 return;
846 } 868 }
847
848 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", 869 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
849 dump_size / 1024); 870 dump_size / 1024);
850 871
@@ -875,27 +896,29 @@ cont_alloc:
875 * Returns 0 on success. 896 * Returns 0 on success.
876 */ 897 */
877static void 898static void
878qla2x00_resize_request_q(scsi_qla_host_t *ha) 899qla2x00_resize_request_q(scsi_qla_host_t *vha)
879{ 900{
880 int rval; 901 int rval;
881 uint16_t fw_iocb_cnt = 0; 902 uint16_t fw_iocb_cnt = 0;
882 uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM; 903 uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM;
883 dma_addr_t request_dma; 904 dma_addr_t request_dma;
884 request_t *request_ring; 905 request_t *request_ring;
906 struct qla_hw_data *ha = vha->hw;
907 struct req_que *req = ha->req_q_map[0];
885 908
886 /* Valid only on recent ISPs. */ 909 /* Valid only on recent ISPs. */
887 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 910 if (IS_QLA2100(ha) || IS_QLA2200(ha))
888 return; 911 return;
889 912
890 /* Retrieve IOCB counts available to the firmware. */ 913 /* Retrieve IOCB counts available to the firmware. */
891 rval = qla2x00_get_resource_cnts(ha, NULL, NULL, NULL, &fw_iocb_cnt, 914 rval = qla2x00_get_resource_cnts(vha, NULL, NULL, NULL, &fw_iocb_cnt,
892 &ha->max_npiv_vports); 915 &ha->max_npiv_vports);
893 if (rval) 916 if (rval)
894 return; 917 return;
895 /* No point in continuing if current settings are sufficient. */ 918 /* No point in continuing if current settings are sufficient. */
896 if (fw_iocb_cnt < 1024) 919 if (fw_iocb_cnt < 1024)
897 return; 920 return;
898 if (ha->request_q_length >= request_q_length) 921 if (req->length >= request_q_length)
899 return; 922 return;
900 923
901 /* Attempt to claim larger area for request queue. */ 924 /* Attempt to claim larger area for request queue. */
@@ -909,17 +932,17 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
909 qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n", 932 qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n",
910 (ha->fw_memory_size + 1) / 1024); 933 (ha->fw_memory_size + 1) / 1024);
911 qla_printk(KERN_INFO, ha, "Resizing request queue depth " 934 qla_printk(KERN_INFO, ha, "Resizing request queue depth "
912 "(%d -> %d)...\n", ha->request_q_length, request_q_length); 935 "(%d -> %d)...\n", req->length, request_q_length);
913 936
914 /* Clear old allocations. */ 937 /* Clear old allocations. */
915 dma_free_coherent(&ha->pdev->dev, 938 dma_free_coherent(&ha->pdev->dev,
916 (ha->request_q_length + 1) * sizeof(request_t), ha->request_ring, 939 (req->length + 1) * sizeof(request_t), req->ring,
917 ha->request_dma); 940 req->dma);
918 941
919 /* Begin using larger queue. */ 942 /* Begin using larger queue. */
920 ha->request_q_length = request_q_length; 943 req->length = request_q_length;
921 ha->request_ring = request_ring; 944 req->ring = request_ring;
922 ha->request_dma = request_dma; 945 req->dma = request_dma;
923} 946}
924 947
925/** 948/**
@@ -929,10 +952,11 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
929 * Returns 0 on success. 952 * Returns 0 on success.
930 */ 953 */
931static int 954static int
932qla2x00_setup_chip(scsi_qla_host_t *ha) 955qla2x00_setup_chip(scsi_qla_host_t *vha)
933{ 956{
934 int rval; 957 int rval;
935 uint32_t srisc_address = 0; 958 uint32_t srisc_address = 0;
959 struct qla_hw_data *ha = vha->hw;
936 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 960 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
937 unsigned long flags; 961 unsigned long flags;
938 962
@@ -945,28 +969,27 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
945 } 969 }
946 970
947 /* Load firmware sequences */ 971 /* Load firmware sequences */
948 rval = ha->isp_ops->load_risc(ha, &srisc_address); 972 rval = ha->isp_ops->load_risc(vha, &srisc_address);
949 if (rval == QLA_SUCCESS) { 973 if (rval == QLA_SUCCESS) {
950 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 974 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
951 "code.\n", ha->host_no)); 975 "code.\n", vha->host_no));
952 976
953 rval = qla2x00_verify_checksum(ha, srisc_address); 977 rval = qla2x00_verify_checksum(vha, srisc_address);
954 if (rval == QLA_SUCCESS) { 978 if (rval == QLA_SUCCESS) {
955 /* Start firmware execution. */ 979 /* Start firmware execution. */
956 DEBUG(printk("scsi(%ld): Checksum OK, start " 980 DEBUG(printk("scsi(%ld): Checksum OK, start "
957 "firmware.\n", ha->host_no)); 981 "firmware.\n", vha->host_no));
958 982
959 rval = qla2x00_execute_fw(ha, srisc_address); 983 rval = qla2x00_execute_fw(vha, srisc_address);
960 /* Retrieve firmware information. */ 984 /* Retrieve firmware information. */
961 if (rval == QLA_SUCCESS && ha->fw_major_version == 0) { 985 if (rval == QLA_SUCCESS && ha->fw_major_version == 0) {
962 qla2x00_get_fw_version(ha, 986 qla2x00_get_fw_version(vha,
963 &ha->fw_major_version, 987 &ha->fw_major_version,
964 &ha->fw_minor_version, 988 &ha->fw_minor_version,
965 &ha->fw_subminor_version, 989 &ha->fw_subminor_version,
966 &ha->fw_attributes, &ha->fw_memory_size); 990 &ha->fw_attributes, &ha->fw_memory_size);
967 ha->flags.npiv_supported = 0; 991 ha->flags.npiv_supported = 0;
968 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) || 992 if (IS_QLA2XXX_MIDTYPE(ha) &&
969 IS_QLA84XX(ha)) &&
970 (ha->fw_attributes & BIT_2)) { 993 (ha->fw_attributes & BIT_2)) {
971 ha->flags.npiv_supported = 1; 994 ha->flags.npiv_supported = 1;
972 if ((!ha->max_npiv_vports) || 995 if ((!ha->max_npiv_vports) ||
@@ -975,15 +998,15 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
975 ha->max_npiv_vports = 998 ha->max_npiv_vports =
976 MIN_MULTI_ID_FABRIC - 1; 999 MIN_MULTI_ID_FABRIC - 1;
977 } 1000 }
978 qla2x00_resize_request_q(ha); 1001 qla2x00_resize_request_q(vha);
979 1002
980 if (ql2xallocfwdump) 1003 if (ql2xallocfwdump)
981 qla2x00_alloc_fw_dump(ha); 1004 qla2x00_alloc_fw_dump(vha);
982 } 1005 }
983 } else { 1006 } else {
984 DEBUG2(printk(KERN_INFO 1007 DEBUG2(printk(KERN_INFO
985 "scsi(%ld): ISP Firmware failed checksum.\n", 1008 "scsi(%ld): ISP Firmware failed checksum.\n",
986 ha->host_no)); 1009 vha->host_no));
987 } 1010 }
988 } 1011 }
989 1012
@@ -1002,7 +1025,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
1002 1025
1003 if (rval) { 1026 if (rval) {
1004 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1027 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
1005 ha->host_no)); 1028 vha->host_no));
1006 } 1029 }
1007 1030
1008 return (rval); 1031 return (rval);
@@ -1017,14 +1040,14 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
1017 * 1040 *
1018 * Returns 0 on success. 1041 * Returns 0 on success.
1019 */ 1042 */
1020static void 1043void
1021qla2x00_init_response_q_entries(scsi_qla_host_t *ha) 1044qla2x00_init_response_q_entries(struct rsp_que *rsp)
1022{ 1045{
1023 uint16_t cnt; 1046 uint16_t cnt;
1024 response_t *pkt; 1047 response_t *pkt;
1025 1048
1026 pkt = ha->response_ring_ptr; 1049 pkt = rsp->ring_ptr;
1027 for (cnt = 0; cnt < ha->response_q_length; cnt++) { 1050 for (cnt = 0; cnt < rsp->length; cnt++) {
1028 pkt->signature = RESPONSE_PROCESSED; 1051 pkt->signature = RESPONSE_PROCESSED;
1029 pkt++; 1052 pkt++;
1030 } 1053 }
@@ -1038,19 +1061,20 @@ qla2x00_init_response_q_entries(scsi_qla_host_t *ha)
1038 * Returns 0 on success. 1061 * Returns 0 on success.
1039 */ 1062 */
1040void 1063void
1041qla2x00_update_fw_options(scsi_qla_host_t *ha) 1064qla2x00_update_fw_options(scsi_qla_host_t *vha)
1042{ 1065{
1043 uint16_t swing, emphasis, tx_sens, rx_sens; 1066 uint16_t swing, emphasis, tx_sens, rx_sens;
1067 struct qla_hw_data *ha = vha->hw;
1044 1068
1045 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 1069 memset(ha->fw_options, 0, sizeof(ha->fw_options));
1046 qla2x00_get_fw_options(ha, ha->fw_options); 1070 qla2x00_get_fw_options(vha, ha->fw_options);
1047 1071
1048 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1072 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1049 return; 1073 return;
1050 1074
1051 /* Serial Link options. */ 1075 /* Serial Link options. */
1052 DEBUG3(printk("scsi(%ld): Serial link options:\n", 1076 DEBUG3(printk("scsi(%ld): Serial link options:\n",
1053 ha->host_no)); 1077 vha->host_no));
1054 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, 1078 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
1055 sizeof(ha->fw_seriallink_options))); 1079 sizeof(ha->fw_seriallink_options)));
1056 1080
@@ -1108,19 +1132,20 @@ qla2x00_update_fw_options(scsi_qla_host_t *ha)
1108 ha->fw_options[2] |= BIT_13; 1132 ha->fw_options[2] |= BIT_13;
1109 1133
1110 /* Update firmware options. */ 1134 /* Update firmware options. */
1111 qla2x00_set_fw_options(ha, ha->fw_options); 1135 qla2x00_set_fw_options(vha, ha->fw_options);
1112} 1136}
1113 1137
1114void 1138void
1115qla24xx_update_fw_options(scsi_qla_host_t *ha) 1139qla24xx_update_fw_options(scsi_qla_host_t *vha)
1116{ 1140{
1117 int rval; 1141 int rval;
1142 struct qla_hw_data *ha = vha->hw;
1118 1143
1119 /* Update Serial Link options. */ 1144 /* Update Serial Link options. */
1120 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 1145 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1121 return; 1146 return;
1122 1147
1123 rval = qla2x00_set_serdes_params(ha, 1148 rval = qla2x00_set_serdes_params(vha,
1124 le16_to_cpu(ha->fw_seriallink_options24[1]), 1149 le16_to_cpu(ha->fw_seriallink_options24[1]),
1125 le16_to_cpu(ha->fw_seriallink_options24[2]), 1150 le16_to_cpu(ha->fw_seriallink_options24[2]),
1126 le16_to_cpu(ha->fw_seriallink_options24[3])); 1151 le16_to_cpu(ha->fw_seriallink_options24[3]));
@@ -1131,19 +1156,22 @@ qla24xx_update_fw_options(scsi_qla_host_t *ha)
1131} 1156}
1132 1157
1133void 1158void
1134qla2x00_config_rings(struct scsi_qla_host *ha) 1159qla2x00_config_rings(struct scsi_qla_host *vha)
1135{ 1160{
1161 struct qla_hw_data *ha = vha->hw;
1136 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1162 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1163 struct req_que *req = ha->req_q_map[0];
1164 struct rsp_que *rsp = ha->rsp_q_map[0];
1137 1165
1138 /* Setup ring parameters in initialization control block. */ 1166 /* Setup ring parameters in initialization control block. */
1139 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); 1167 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1140 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0); 1168 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1141 ha->init_cb->request_q_length = cpu_to_le16(ha->request_q_length); 1169 ha->init_cb->request_q_length = cpu_to_le16(req->length);
1142 ha->init_cb->response_q_length = cpu_to_le16(ha->response_q_length); 1170 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1143 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma)); 1171 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1144 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma)); 1172 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1145 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma)); 1173 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1146 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma)); 1174 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1147 1175
1148 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); 1176 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1149 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); 1177 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
@@ -1153,27 +1181,62 @@ qla2x00_config_rings(struct scsi_qla_host *ha)
1153} 1181}
1154 1182
1155void 1183void
1156qla24xx_config_rings(struct scsi_qla_host *ha) 1184qla24xx_config_rings(struct scsi_qla_host *vha)
1157{ 1185{
1158 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1186 struct qla_hw_data *ha = vha->hw;
1187 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1188 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1189 struct qla_msix_entry *msix;
1159 struct init_cb_24xx *icb; 1190 struct init_cb_24xx *icb;
1191 uint16_t rid = 0;
1192 struct req_que *req = ha->req_q_map[0];
1193 struct rsp_que *rsp = ha->rsp_q_map[0];
1160 1194
1161 /* Setup ring parameters in initialization control block. */ 1195/* Setup ring parameters in initialization control block. */
1162 icb = (struct init_cb_24xx *)ha->init_cb; 1196 icb = (struct init_cb_24xx *)ha->init_cb;
1163 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1197 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1164 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1198 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1165 icb->request_q_length = cpu_to_le16(ha->request_q_length); 1199 icb->request_q_length = cpu_to_le16(req->length);
1166 icb->response_q_length = cpu_to_le16(ha->response_q_length); 1200 icb->response_q_length = cpu_to_le16(rsp->length);
1167 icb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma)); 1201 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1168 icb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma)); 1202 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1169 icb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma)); 1203 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1170 icb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma)); 1204 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1171 1205
1172 WRT_REG_DWORD(&reg->req_q_in, 0); 1206 if (ha->mqenable) {
1173 WRT_REG_DWORD(&reg->req_q_out, 0); 1207 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1174 WRT_REG_DWORD(&reg->rsp_q_in, 0); 1208 icb->rid = __constant_cpu_to_le16(rid);
1175 WRT_REG_DWORD(&reg->rsp_q_out, 0); 1209 if (ha->flags.msix_enabled) {
1176 RD_REG_DWORD(&reg->rsp_q_out); 1210 msix = &ha->msix_entries[1];
1211 DEBUG2_17(printk(KERN_INFO
1212 "Reistering vector 0x%x for base que\n", msix->entry));
1213 icb->msix = cpu_to_le16(msix->entry);
1214 }
1215 /* Use alternate PCI bus number */
1216 if (MSB(rid))
1217 icb->firmware_options_2 |=
1218 __constant_cpu_to_le32(BIT_19);
1219 /* Use alternate PCI devfn */
1220 if (LSB(rid))
1221 icb->firmware_options_2 |=
1222 __constant_cpu_to_le32(BIT_18);
1223
1224 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
1225 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1226 ha->rsp_q_map[0]->options = icb->firmware_options_2;
1227
1228 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
1229 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
1230 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
1231 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
1232 } else {
1233 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
1234 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
1235 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1236 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1237 }
1238 /* PCI posting */
1239 RD_REG_DWORD(&ioreg->hccr);
1177} 1240}
1178 1241
1179/** 1242/**
@@ -1186,11 +1249,14 @@ qla24xx_config_rings(struct scsi_qla_host *ha)
1186 * Returns 0 on success. 1249 * Returns 0 on success.
1187 */ 1250 */
1188static int 1251static int
1189qla2x00_init_rings(scsi_qla_host_t *ha) 1252qla2x00_init_rings(scsi_qla_host_t *vha)
1190{ 1253{
1191 int rval; 1254 int rval;
1192 unsigned long flags = 0; 1255 unsigned long flags = 0;
1193 int cnt; 1256 int cnt;
1257 struct qla_hw_data *ha = vha->hw;
1258 struct req_que *req = ha->req_q_map[0];
1259 struct rsp_que *rsp = ha->rsp_q_map[0];
1194 struct mid_init_cb_24xx *mid_init_cb = 1260 struct mid_init_cb_24xx *mid_init_cb =
1195 (struct mid_init_cb_24xx *) ha->init_cb; 1261 (struct mid_init_cb_24xx *) ha->init_cb;
1196 1262
@@ -1198,45 +1264,45 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1198 1264
1199 /* Clear outstanding commands array. */ 1265 /* Clear outstanding commands array. */
1200 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1266 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1201 ha->outstanding_cmds[cnt] = NULL; 1267 req->outstanding_cmds[cnt] = NULL;
1202 1268
1203 ha->current_outstanding_cmd = 0; 1269 req->current_outstanding_cmd = 0;
1204 1270
1205 /* Clear RSCN queue. */ 1271 /* Clear RSCN queue. */
1206 ha->rscn_in_ptr = 0; 1272 vha->rscn_in_ptr = 0;
1207 ha->rscn_out_ptr = 0; 1273 vha->rscn_out_ptr = 0;
1208 1274
1209 /* Initialize firmware. */ 1275 /* Initialize firmware. */
1210 ha->request_ring_ptr = ha->request_ring; 1276 req->ring_ptr = req->ring;
1211 ha->req_ring_index = 0; 1277 req->ring_index = 0;
1212 ha->req_q_cnt = ha->request_q_length; 1278 req->cnt = req->length;
1213 ha->response_ring_ptr = ha->response_ring; 1279 rsp->ring_ptr = rsp->ring;
1214 ha->rsp_ring_index = 0; 1280 rsp->ring_index = 0;
1215 1281
1216 /* Initialize response queue entries */ 1282 /* Initialize response queue entries */
1217 qla2x00_init_response_q_entries(ha); 1283 qla2x00_init_response_q_entries(rsp);
1218 1284
1219 ha->isp_ops->config_rings(ha); 1285 ha->isp_ops->config_rings(vha);
1220 1286
1221 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1287 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1222 1288
1223 /* Update any ISP specific firmware options before initialization. */ 1289 /* Update any ISP specific firmware options before initialization. */
1224 ha->isp_ops->update_fw_options(ha); 1290 ha->isp_ops->update_fw_options(vha);
1225 1291
1226 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no)); 1292 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
1227 1293
1228 if (ha->flags.npiv_supported) 1294 if (ha->flags.npiv_supported)
1229 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 1295 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1230 1296
1231 mid_init_cb->options = __constant_cpu_to_le16(BIT_1); 1297 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1232 1298
1233 rval = qla2x00_init_firmware(ha, ha->init_cb_size); 1299 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1234 if (rval) { 1300 if (rval) {
1235 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1301 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
1236 ha->host_no)); 1302 vha->host_no));
1237 } else { 1303 } else {
1238 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", 1304 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
1239 ha->host_no)); 1305 vha->host_no));
1240 } 1306 }
1241 1307
1242 return (rval); 1308 return (rval);
@@ -1249,13 +1315,14 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1249 * Returns 0 on success. 1315 * Returns 0 on success.
1250 */ 1316 */
1251static int 1317static int
1252qla2x00_fw_ready(scsi_qla_host_t *ha) 1318qla2x00_fw_ready(scsi_qla_host_t *vha)
1253{ 1319{
1254 int rval; 1320 int rval;
1255 unsigned long wtime, mtime, cs84xx_time; 1321 unsigned long wtime, mtime, cs84xx_time;
1256 uint16_t min_wait; /* Minimum wait time if loop is down */ 1322 uint16_t min_wait; /* Minimum wait time if loop is down */
1257 uint16_t wait_time; /* Wait time if loop is coming ready */ 1323 uint16_t wait_time; /* Wait time if loop is coming ready */
1258 uint16_t state[3]; 1324 uint16_t state[3];
1325 struct qla_hw_data *ha = vha->hw;
1259 1326
1260 rval = QLA_SUCCESS; 1327 rval = QLA_SUCCESS;
1261 1328
@@ -1277,29 +1344,29 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1277 wtime = jiffies + (wait_time * HZ); 1344 wtime = jiffies + (wait_time * HZ);
1278 1345
1279 /* Wait for ISP to finish LIP */ 1346 /* Wait for ISP to finish LIP */
1280 if (!ha->flags.init_done) 1347 if (!vha->flags.init_done)
1281 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); 1348 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
1282 1349
1283 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n", 1350 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1284 ha->host_no)); 1351 vha->host_no));
1285 1352
1286 do { 1353 do {
1287 rval = qla2x00_get_firmware_state(ha, state); 1354 rval = qla2x00_get_firmware_state(vha, state);
1288 if (rval == QLA_SUCCESS) { 1355 if (rval == QLA_SUCCESS) {
1289 if (state[0] < FSTATE_LOSS_OF_SYNC) { 1356 if (state[0] < FSTATE_LOSS_OF_SYNC) {
1290 ha->device_flags &= ~DFLG_NO_CABLE; 1357 vha->device_flags &= ~DFLG_NO_CABLE;
1291 } 1358 }
1292 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1359 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1293 DEBUG16(printk("scsi(%ld): fw_state=%x " 1360 DEBUG16(printk("scsi(%ld): fw_state=%x "
1294 "84xx=%x.\n", ha->host_no, state[0], 1361 "84xx=%x.\n", vha->host_no, state[0],
1295 state[2])); 1362 state[2]));
1296 if ((state[2] & FSTATE_LOGGED_IN) && 1363 if ((state[2] & FSTATE_LOGGED_IN) &&
1297 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1364 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1298 DEBUG16(printk("scsi(%ld): Sending " 1365 DEBUG16(printk("scsi(%ld): Sending "
1299 "verify iocb.\n", ha->host_no)); 1366 "verify iocb.\n", vha->host_no));
1300 1367
1301 cs84xx_time = jiffies; 1368 cs84xx_time = jiffies;
1302 rval = qla84xx_init_chip(ha); 1369 rval = qla84xx_init_chip(vha);
1303 if (rval != QLA_SUCCESS) 1370 if (rval != QLA_SUCCESS)
1304 break; 1371 break;
1305 1372
@@ -1309,13 +1376,13 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1309 mtime += cs84xx_time; 1376 mtime += cs84xx_time;
1310 DEBUG16(printk("scsi(%ld): Increasing " 1377 DEBUG16(printk("scsi(%ld): Increasing "
1311 "wait time by %ld. New time %ld\n", 1378 "wait time by %ld. New time %ld\n",
1312 ha->host_no, cs84xx_time, wtime)); 1379 vha->host_no, cs84xx_time, wtime));
1313 } 1380 }
1314 } else if (state[0] == FSTATE_READY) { 1381 } else if (state[0] == FSTATE_READY) {
1315 DEBUG(printk("scsi(%ld): F/W Ready - OK \n", 1382 DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
1316 ha->host_no)); 1383 vha->host_no));
1317 1384
1318 qla2x00_get_retry_cnt(ha, &ha->retry_count, 1385 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1319 &ha->login_timeout, &ha->r_a_tov); 1386 &ha->login_timeout, &ha->r_a_tov);
1320 1387
1321 rval = QLA_SUCCESS; 1388 rval = QLA_SUCCESS;
@@ -1324,7 +1391,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1324 1391
1325 rval = QLA_FUNCTION_FAILED; 1392 rval = QLA_FUNCTION_FAILED;
1326 1393
1327 if (atomic_read(&ha->loop_down_timer) && 1394 if (atomic_read(&vha->loop_down_timer) &&
1328 state[0] != FSTATE_READY) { 1395 state[0] != FSTATE_READY) {
1329 /* Loop down. Timeout on min_wait for states 1396 /* Loop down. Timeout on min_wait for states
1330 * other than Wait for Login. 1397 * other than Wait for Login.
@@ -1333,7 +1400,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1333 qla_printk(KERN_INFO, ha, 1400 qla_printk(KERN_INFO, ha,
1334 "Cable is unplugged...\n"); 1401 "Cable is unplugged...\n");
1335 1402
1336 ha->device_flags |= DFLG_NO_CABLE; 1403 vha->device_flags |= DFLG_NO_CABLE;
1337 break; 1404 break;
1338 } 1405 }
1339 } 1406 }
@@ -1350,15 +1417,15 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1350 msleep(500); 1417 msleep(500);
1351 1418
1352 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1419 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1353 ha->host_no, state[0], jiffies)); 1420 vha->host_no, state[0], jiffies));
1354 } while (1); 1421 } while (1);
1355 1422
1356 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1423 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1357 ha->host_no, state[0], jiffies)); 1424 vha->host_no, state[0], jiffies));
1358 1425
1359 if (rval) { 1426 if (rval) {
1360 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1427 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
1361 ha->host_no)); 1428 vha->host_no));
1362 } 1429 }
1363 1430
1364 return (rval); 1431 return (rval);
@@ -1378,7 +1445,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1378* Kernel context. 1445* Kernel context.
1379*/ 1446*/
1380static int 1447static int
1381qla2x00_configure_hba(scsi_qla_host_t *ha) 1448qla2x00_configure_hba(scsi_qla_host_t *vha)
1382{ 1449{
1383 int rval; 1450 int rval;
1384 uint16_t loop_id; 1451 uint16_t loop_id;
@@ -1388,19 +1455,20 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1388 uint8_t area; 1455 uint8_t area;
1389 uint8_t domain; 1456 uint8_t domain;
1390 char connect_type[22]; 1457 char connect_type[22];
1458 struct qla_hw_data *ha = vha->hw;
1391 1459
1392 /* Get host addresses. */ 1460 /* Get host addresses. */
1393 rval = qla2x00_get_adapter_id(ha, 1461 rval = qla2x00_get_adapter_id(vha,
1394 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 1462 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1395 if (rval != QLA_SUCCESS) { 1463 if (rval != QLA_SUCCESS) {
1396 if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) || 1464 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
1397 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 1465 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
1398 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 1466 DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
1399 __func__, ha->host_no)); 1467 __func__, vha->host_no));
1400 } else { 1468 } else {
1401 qla_printk(KERN_WARNING, ha, 1469 qla_printk(KERN_WARNING, ha,
1402 "ERROR -- Unable to get host loop ID.\n"); 1470 "ERROR -- Unable to get host loop ID.\n");
1403 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1471 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1404 } 1472 }
1405 return (rval); 1473 return (rval);
1406 } 1474 }
@@ -1411,7 +1479,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1411 return (QLA_FUNCTION_FAILED); 1479 return (QLA_FUNCTION_FAILED);
1412 } 1480 }
1413 1481
1414 ha->loop_id = loop_id; 1482 vha->loop_id = loop_id;
1415 1483
1416 /* initialize */ 1484 /* initialize */
1417 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 1485 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
@@ -1421,14 +1489,14 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1421 switch (topo) { 1489 switch (topo) {
1422 case 0: 1490 case 0:
1423 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", 1491 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
1424 ha->host_no)); 1492 vha->host_no));
1425 ha->current_topology = ISP_CFG_NL; 1493 ha->current_topology = ISP_CFG_NL;
1426 strcpy(connect_type, "(Loop)"); 1494 strcpy(connect_type, "(Loop)");
1427 break; 1495 break;
1428 1496
1429 case 1: 1497 case 1:
1430 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 1498 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
1431 ha->host_no)); 1499 vha->host_no));
1432 ha->switch_cap = sw_cap; 1500 ha->switch_cap = sw_cap;
1433 ha->current_topology = ISP_CFG_FL; 1501 ha->current_topology = ISP_CFG_FL;
1434 strcpy(connect_type, "(FL_Port)"); 1502 strcpy(connect_type, "(FL_Port)");
@@ -1436,7 +1504,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1436 1504
1437 case 2: 1505 case 2:
1438 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", 1506 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
1439 ha->host_no)); 1507 vha->host_no));
1440 ha->operating_mode = P2P; 1508 ha->operating_mode = P2P;
1441 ha->current_topology = ISP_CFG_N; 1509 ha->current_topology = ISP_CFG_N;
1442 strcpy(connect_type, "(N_Port-to-N_Port)"); 1510 strcpy(connect_type, "(N_Port-to-N_Port)");
@@ -1444,7 +1512,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1444 1512
1445 case 3: 1513 case 3:
1446 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 1514 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
1447 ha->host_no)); 1515 vha->host_no));
1448 ha->switch_cap = sw_cap; 1516 ha->switch_cap = sw_cap;
1449 ha->operating_mode = P2P; 1517 ha->operating_mode = P2P;
1450 ha->current_topology = ISP_CFG_F; 1518 ha->current_topology = ISP_CFG_F;
@@ -1454,7 +1522,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1454 default: 1522 default:
1455 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " 1523 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
1456 "Using NL.\n", 1524 "Using NL.\n",
1457 ha->host_no, topo)); 1525 vha->host_no, topo));
1458 ha->current_topology = ISP_CFG_NL; 1526 ha->current_topology = ISP_CFG_NL;
1459 strcpy(connect_type, "(Loop)"); 1527 strcpy(connect_type, "(Loop)");
1460 break; 1528 break;
@@ -1462,29 +1530,31 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1462 1530
1463 /* Save Host port and loop ID. */ 1531 /* Save Host port and loop ID. */
1464 /* byte order - Big Endian */ 1532 /* byte order - Big Endian */
1465 ha->d_id.b.domain = domain; 1533 vha->d_id.b.domain = domain;
1466 ha->d_id.b.area = area; 1534 vha->d_id.b.area = area;
1467 ha->d_id.b.al_pa = al_pa; 1535 vha->d_id.b.al_pa = al_pa;
1468 1536
1469 if (!ha->flags.init_done) 1537 if (!vha->flags.init_done)
1470 qla_printk(KERN_INFO, ha, 1538 qla_printk(KERN_INFO, ha,
1471 "Topology - %s, Host Loop address 0x%x\n", 1539 "Topology - %s, Host Loop address 0x%x\n",
1472 connect_type, ha->loop_id); 1540 connect_type, vha->loop_id);
1473 1541
1474 if (rval) { 1542 if (rval) {
1475 DEBUG2_3(printk("scsi(%ld): FAILED.\n", ha->host_no)); 1543 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
1476 } else { 1544 } else {
1477 DEBUG3(printk("scsi(%ld): exiting normally.\n", ha->host_no)); 1545 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
1478 } 1546 }
1479 1547
1480 return(rval); 1548 return(rval);
1481} 1549}
1482 1550
1483static inline void 1551static inline void
1484qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *def) 1552qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1553 char *def)
1485{ 1554{
1486 char *st, *en; 1555 char *st, *en;
1487 uint16_t index; 1556 uint16_t index;
1557 struct qla_hw_data *ha = vha->hw;
1488 1558
1489 if (memcmp(model, BINZERO, len) != 0) { 1559 if (memcmp(model, BINZERO, len) != 0) {
1490 strncpy(ha->model_number, model, len); 1560 strncpy(ha->model_number, model, len);
@@ -1516,16 +1586,17 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
1516 } 1586 }
1517 } 1587 }
1518 if (IS_FWI2_CAPABLE(ha)) 1588 if (IS_FWI2_CAPABLE(ha))
1519 qla2xxx_get_vpd_field(ha, "\x82", ha->model_desc, 1589 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1520 sizeof(ha->model_desc)); 1590 sizeof(ha->model_desc));
1521} 1591}
1522 1592
1523/* On sparc systems, obtain port and node WWN from firmware 1593/* On sparc systems, obtain port and node WWN from firmware
1524 * properties. 1594 * properties.
1525 */ 1595 */
1526static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv) 1596static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
1527{ 1597{
1528#ifdef CONFIG_SPARC 1598#ifdef CONFIG_SPARC
1599 struct qla_hw_data *ha = vha->hw;
1529 struct pci_dev *pdev = ha->pdev; 1600 struct pci_dev *pdev = ha->pdev;
1530 struct device_node *dp = pci_device_to_OF_node(pdev); 1601 struct device_node *dp = pci_device_to_OF_node(pdev);
1531 const u8 *val; 1602 const u8 *val;
@@ -1555,12 +1626,13 @@ static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
1555* 0 = success. 1626* 0 = success.
1556*/ 1627*/
1557int 1628int
1558qla2x00_nvram_config(scsi_qla_host_t *ha) 1629qla2x00_nvram_config(scsi_qla_host_t *vha)
1559{ 1630{
1560 int rval; 1631 int rval;
1561 uint8_t chksum = 0; 1632 uint8_t chksum = 0;
1562 uint16_t cnt; 1633 uint16_t cnt;
1563 uint8_t *dptr1, *dptr2; 1634 uint8_t *dptr1, *dptr2;
1635 struct qla_hw_data *ha = vha->hw;
1564 init_cb_t *icb = ha->init_cb; 1636 init_cb_t *icb = ha->init_cb;
1565 nvram_t *nv = ha->nvram; 1637 nvram_t *nv = ha->nvram;
1566 uint8_t *ptr = ha->nvram; 1638 uint8_t *ptr = ha->nvram;
@@ -1576,11 +1648,11 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1576 ha->nvram_base = 0x80; 1648 ha->nvram_base = 0x80;
1577 1649
1578 /* Get NVRAM data and calculate checksum. */ 1650 /* Get NVRAM data and calculate checksum. */
1579 ha->isp_ops->read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size); 1651 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
1580 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 1652 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
1581 chksum += *ptr++; 1653 chksum += *ptr++;
1582 1654
1583 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 1655 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
1584 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 1656 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
1585 1657
1586 /* Bad NVRAM data, set defaults parameters. */ 1658 /* Bad NVRAM data, set defaults parameters. */
@@ -1594,7 +1666,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1594 "invalid -- WWPN) defaults.\n"); 1666 "invalid -- WWPN) defaults.\n");
1595 1667
1596 if (chksum) 1668 if (chksum)
1597 qla2xxx_hw_event_log(ha, HW_EVENT_NVRAM_CHKSUM_ERR, 0, 1669 qla2xxx_hw_event_log(vha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
1598 MSW(chksum), LSW(chksum)); 1670 MSW(chksum), LSW(chksum));
1599 1671
1600 /* 1672 /*
@@ -1631,7 +1703,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1631 nv->port_name[3] = 224; 1703 nv->port_name[3] = 224;
1632 nv->port_name[4] = 139; 1704 nv->port_name[4] = 139;
1633 1705
1634 qla2xxx_nvram_wwn_from_ofw(ha, nv); 1706 qla2xxx_nvram_wwn_from_ofw(vha, nv);
1635 1707
1636 nv->login_timeout = 4; 1708 nv->login_timeout = 4;
1637 1709
@@ -1684,7 +1756,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1684 strcpy(ha->model_number, "QLA2300"); 1756 strcpy(ha->model_number, "QLA2300");
1685 } 1757 }
1686 } else { 1758 } else {
1687 qla2x00_set_model_info(ha, nv->model_number, 1759 qla2x00_set_model_info(vha, nv->model_number,
1688 sizeof(nv->model_number), "QLA23xx"); 1760 sizeof(nv->model_number), "QLA23xx");
1689 } 1761 }
1690 } else if (IS_QLA2200(ha)) { 1762 } else if (IS_QLA2200(ha)) {
@@ -1760,8 +1832,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1760 ha->serial0 = icb->port_name[5]; 1832 ha->serial0 = icb->port_name[5];
1761 ha->serial1 = icb->port_name[6]; 1833 ha->serial1 = icb->port_name[6];
1762 ha->serial2 = icb->port_name[7]; 1834 ha->serial2 = icb->port_name[7];
1763 ha->node_name = icb->node_name; 1835 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
1764 ha->port_name = icb->port_name; 1836 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1765 1837
1766 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 1838 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
1767 1839
@@ -1829,10 +1901,10 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1829 icb->response_accumulation_timer = 3; 1901 icb->response_accumulation_timer = 3;
1830 icb->interrupt_delay_timer = 5; 1902 icb->interrupt_delay_timer = 5;
1831 1903
1832 ha->flags.process_response_queue = 1; 1904 vha->flags.process_response_queue = 1;
1833 } else { 1905 } else {
1834 /* Enable ZIO. */ 1906 /* Enable ZIO. */
1835 if (!ha->flags.init_done) { 1907 if (!vha->flags.init_done) {
1836 ha->zio_mode = icb->add_firmware_options[0] & 1908 ha->zio_mode = icb->add_firmware_options[0] &
1837 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1909 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1838 ha->zio_timer = icb->interrupt_delay_timer ? 1910 ha->zio_timer = icb->interrupt_delay_timer ?
@@ -1840,12 +1912,12 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1840 } 1912 }
1841 icb->add_firmware_options[0] &= 1913 icb->add_firmware_options[0] &=
1842 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 1914 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
1843 ha->flags.process_response_queue = 0; 1915 vha->flags.process_response_queue = 0;
1844 if (ha->zio_mode != QLA_ZIO_DISABLED) { 1916 if (ha->zio_mode != QLA_ZIO_DISABLED) {
1845 ha->zio_mode = QLA_ZIO_MODE_6; 1917 ha->zio_mode = QLA_ZIO_MODE_6;
1846 1918
1847 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " 1919 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
1848 "delay (%d us).\n", ha->host_no, ha->zio_mode, 1920 "delay (%d us).\n", vha->host_no, ha->zio_mode,
1849 ha->zio_timer * 100)); 1921 ha->zio_timer * 100));
1850 qla_printk(KERN_INFO, ha, 1922 qla_printk(KERN_INFO, ha,
1851 "ZIO mode %d enabled; timer delay (%d us).\n", 1923 "ZIO mode %d enabled; timer delay (%d us).\n",
@@ -1853,13 +1925,13 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1853 1925
1854 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 1926 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
1855 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 1927 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
1856 ha->flags.process_response_queue = 1; 1928 vha->flags.process_response_queue = 1;
1857 } 1929 }
1858 } 1930 }
1859 1931
1860 if (rval) { 1932 if (rval) {
1861 DEBUG2_3(printk(KERN_WARNING 1933 DEBUG2_3(printk(KERN_WARNING
1862 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no)); 1934 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
1863 } 1935 }
1864 return (rval); 1936 return (rval);
1865} 1937}
@@ -1870,10 +1942,10 @@ qla2x00_rport_del(void *data)
1870 fc_port_t *fcport = data; 1942 fc_port_t *fcport = data;
1871 struct fc_rport *rport; 1943 struct fc_rport *rport;
1872 1944
1873 spin_lock_irq(fcport->ha->host->host_lock); 1945 spin_lock_irq(fcport->vha->host->host_lock);
1874 rport = fcport->drport; 1946 rport = fcport->drport;
1875 fcport->drport = NULL; 1947 fcport->drport = NULL;
1876 spin_unlock_irq(fcport->ha->host->host_lock); 1948 spin_unlock_irq(fcport->vha->host->host_lock);
1877 if (rport) 1949 if (rport)
1878 fc_remote_port_delete(rport); 1950 fc_remote_port_delete(rport);
1879} 1951}
@@ -1886,7 +1958,7 @@ qla2x00_rport_del(void *data)
1886 * Returns a pointer to the allocated fcport, or NULL, if none available. 1958 * Returns a pointer to the allocated fcport, or NULL, if none available.
1887 */ 1959 */
1888static fc_port_t * 1960static fc_port_t *
1889qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags) 1961qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1890{ 1962{
1891 fc_port_t *fcport; 1963 fc_port_t *fcport;
1892 1964
@@ -1895,8 +1967,8 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1895 return NULL; 1967 return NULL;
1896 1968
1897 /* Setup fcport template structure. */ 1969 /* Setup fcport template structure. */
1898 fcport->ha = ha; 1970 fcport->vha = vha;
1899 fcport->vp_idx = ha->vp_idx; 1971 fcport->vp_idx = vha->vp_idx;
1900 fcport->port_type = FCT_UNKNOWN; 1972 fcport->port_type = FCT_UNKNOWN;
1901 fcport->loop_id = FC_NO_LOOP_ID; 1973 fcport->loop_id = FC_NO_LOOP_ID;
1902 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1974 atomic_set(&fcport->state, FCS_UNCONFIGURED);
@@ -1919,101 +1991,97 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1919 * 2 = database was full and device was not configured. 1991 * 2 = database was full and device was not configured.
1920 */ 1992 */
1921static int 1993static int
1922qla2x00_configure_loop(scsi_qla_host_t *ha) 1994qla2x00_configure_loop(scsi_qla_host_t *vha)
1923{ 1995{
1924 int rval; 1996 int rval;
1925 unsigned long flags, save_flags; 1997 unsigned long flags, save_flags;
1926 1998 struct qla_hw_data *ha = vha->hw;
1927 rval = QLA_SUCCESS; 1999 rval = QLA_SUCCESS;
1928 2000
1929 /* Get Initiator ID */ 2001 /* Get Initiator ID */
1930 if (test_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags)) { 2002 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
1931 rval = qla2x00_configure_hba(ha); 2003 rval = qla2x00_configure_hba(vha);
1932 if (rval != QLA_SUCCESS) { 2004 if (rval != QLA_SUCCESS) {
1933 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", 2005 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
1934 ha->host_no)); 2006 vha->host_no));
1935 return (rval); 2007 return (rval);
1936 } 2008 }
1937 } 2009 }
1938 2010
1939 save_flags = flags = ha->dpc_flags; 2011 save_flags = flags = vha->dpc_flags;
1940 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", 2012 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
1941 ha->host_no, flags)); 2013 vha->host_no, flags));
1942 2014
1943 /* 2015 /*
1944 * If we have both an RSCN and PORT UPDATE pending then handle them 2016 * If we have both an RSCN and PORT UPDATE pending then handle them
1945 * both at the same time. 2017 * both at the same time.
1946 */ 2018 */
1947 clear_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2019 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1948 clear_bit(RSCN_UPDATE, &ha->dpc_flags); 2020 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
1949 2021
1950 /* Determine what we need to do */ 2022 /* Determine what we need to do */
1951 if (ha->current_topology == ISP_CFG_FL && 2023 if (ha->current_topology == ISP_CFG_FL &&
1952 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2024 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
1953 2025
1954 ha->flags.rscn_queue_overflow = 1; 2026 vha->flags.rscn_queue_overflow = 1;
1955 set_bit(RSCN_UPDATE, &flags); 2027 set_bit(RSCN_UPDATE, &flags);
1956 2028
1957 } else if (ha->current_topology == ISP_CFG_F && 2029 } else if (ha->current_topology == ISP_CFG_F &&
1958 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2030 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
1959 2031
1960 ha->flags.rscn_queue_overflow = 1; 2032 vha->flags.rscn_queue_overflow = 1;
1961 set_bit(RSCN_UPDATE, &flags); 2033 set_bit(RSCN_UPDATE, &flags);
1962 clear_bit(LOCAL_LOOP_UPDATE, &flags); 2034 clear_bit(LOCAL_LOOP_UPDATE, &flags);
1963 2035
1964 } else if (ha->current_topology == ISP_CFG_N) { 2036 } else if (ha->current_topology == ISP_CFG_N) {
1965 clear_bit(RSCN_UPDATE, &flags); 2037 clear_bit(RSCN_UPDATE, &flags);
1966 2038
1967 } else if (!ha->flags.online || 2039 } else if (!vha->flags.online ||
1968 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 2040 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
1969 2041
1970 ha->flags.rscn_queue_overflow = 1; 2042 vha->flags.rscn_queue_overflow = 1;
1971 set_bit(RSCN_UPDATE, &flags); 2043 set_bit(RSCN_UPDATE, &flags);
1972 set_bit(LOCAL_LOOP_UPDATE, &flags); 2044 set_bit(LOCAL_LOOP_UPDATE, &flags);
1973 } 2045 }
1974 2046
1975 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2047 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
1976 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2048 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1977 rval = QLA_FUNCTION_FAILED; 2049 rval = QLA_FUNCTION_FAILED;
1978 } else { 2050 else
1979 rval = qla2x00_configure_local_loop(ha); 2051 rval = qla2x00_configure_local_loop(vha);
1980 }
1981 } 2052 }
1982 2053
1983 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2054 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
1984 if (LOOP_TRANSITION(ha)) { 2055 if (LOOP_TRANSITION(vha))
1985 rval = QLA_FUNCTION_FAILED; 2056 rval = QLA_FUNCTION_FAILED;
1986 } else { 2057 else
1987 rval = qla2x00_configure_fabric(ha); 2058 rval = qla2x00_configure_fabric(vha);
1988 }
1989 } 2059 }
1990 2060
1991 if (rval == QLA_SUCCESS) { 2061 if (rval == QLA_SUCCESS) {
1992 if (atomic_read(&ha->loop_down_timer) || 2062 if (atomic_read(&vha->loop_down_timer) ||
1993 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2063 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1994 rval = QLA_FUNCTION_FAILED; 2064 rval = QLA_FUNCTION_FAILED;
1995 } else { 2065 } else {
1996 atomic_set(&ha->loop_state, LOOP_READY); 2066 atomic_set(&vha->loop_state, LOOP_READY);
1997 2067
1998 DEBUG(printk("scsi(%ld): LOOP READY\n", ha->host_no)); 2068 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
1999 } 2069 }
2000 } 2070 }
2001 2071
2002 if (rval) { 2072 if (rval) {
2003 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", 2073 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
2004 __func__, ha->host_no)); 2074 __func__, vha->host_no));
2005 } else { 2075 } else {
2006 DEBUG3(printk("%s: exiting normally\n", __func__)); 2076 DEBUG3(printk("%s: exiting normally\n", __func__));
2007 } 2077 }
2008 2078
2009 /* Restore state if a resync event occurred during processing */ 2079 /* Restore state if a resync event occurred during processing */
2010 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2080 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2011 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2081 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2012 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2082 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2013 if (test_bit(RSCN_UPDATE, &save_flags)) { 2083 if (test_bit(RSCN_UPDATE, &save_flags))
2014 ha->flags.rscn_queue_overflow = 1; 2084 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2015 set_bit(RSCN_UPDATE, &ha->dpc_flags);
2016 }
2017 } 2085 }
2018 2086
2019 return (rval); 2087 return (rval);
@@ -2032,7 +2100,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
2032 * 0 = success. 2100 * 0 = success.
2033 */ 2101 */
2034static int 2102static int
2035qla2x00_configure_local_loop(scsi_qla_host_t *ha) 2103qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2036{ 2104{
2037 int rval, rval2; 2105 int rval, rval2;
2038 int found_devs; 2106 int found_devs;
@@ -2044,18 +2112,18 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2044 char *id_iter; 2112 char *id_iter;
2045 uint16_t loop_id; 2113 uint16_t loop_id;
2046 uint8_t domain, area, al_pa; 2114 uint8_t domain, area, al_pa;
2047 scsi_qla_host_t *pha = to_qla_parent(ha); 2115 struct qla_hw_data *ha = vha->hw;
2048 2116
2049 found_devs = 0; 2117 found_devs = 0;
2050 new_fcport = NULL; 2118 new_fcport = NULL;
2051 entries = MAX_FIBRE_DEVICES; 2119 entries = MAX_FIBRE_DEVICES;
2052 2120
2053 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", ha->host_no)); 2121 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
2054 DEBUG3(qla2x00_get_fcal_position_map(ha, NULL)); 2122 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
2055 2123
2056 /* Get list of logged in devices. */ 2124 /* Get list of logged in devices. */
2057 memset(ha->gid_list, 0, GID_LIST_SIZE); 2125 memset(ha->gid_list, 0, GID_LIST_SIZE);
2058 rval = qla2x00_get_id_list(ha, ha->gid_list, ha->gid_list_dma, 2126 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2059 &entries); 2127 &entries);
2060 if (rval != QLA_SUCCESS) 2128 if (rval != QLA_SUCCESS)
2061 goto cleanup_allocation; 2129 goto cleanup_allocation;
@@ -2066,7 +2134,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2066 entries * sizeof(struct gid_list_info))); 2134 entries * sizeof(struct gid_list_info)));
2067 2135
2068 /* Allocate temporary fcport for any new fcports discovered. */ 2136 /* Allocate temporary fcport for any new fcports discovered. */
2069 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2137 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2070 if (new_fcport == NULL) { 2138 if (new_fcport == NULL) {
2071 rval = QLA_MEMORY_ALLOC_FAILED; 2139 rval = QLA_MEMORY_ALLOC_FAILED;
2072 goto cleanup_allocation; 2140 goto cleanup_allocation;
@@ -2076,17 +2144,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2076 /* 2144 /*
2077 * Mark local devices that were present with FCF_DEVICE_LOST for now. 2145 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2078 */ 2146 */
2079 list_for_each_entry(fcport, &pha->fcports, list) { 2147 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2080 if (fcport->vp_idx != ha->vp_idx)
2081 continue;
2082
2083 if (atomic_read(&fcport->state) == FCS_ONLINE && 2148 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2084 fcport->port_type != FCT_BROADCAST && 2149 fcport->port_type != FCT_BROADCAST &&
2085 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2150 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2086 2151
2087 DEBUG(printk("scsi(%ld): Marking port lost, " 2152 DEBUG(printk("scsi(%ld): Marking port lost, "
2088 "loop_id=0x%04x\n", 2153 "loop_id=0x%04x\n",
2089 ha->host_no, fcport->loop_id)); 2154 vha->host_no, fcport->loop_id));
2090 2155
2091 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2156 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2092 fcport->flags &= ~FCF_FARP_DONE; 2157 fcport->flags &= ~FCF_FARP_DONE;
@@ -2113,7 +2178,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2113 2178
2114 /* Bypass if not same domain and area of adapter. */ 2179 /* Bypass if not same domain and area of adapter. */
2115 if (area && domain && 2180 if (area && domain &&
2116 (area != ha->d_id.b.area || domain != ha->d_id.b.domain)) 2181 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2117 continue; 2182 continue;
2118 2183
2119 /* Bypass invalid local loop ID. */ 2184 /* Bypass invalid local loop ID. */
@@ -2125,26 +2190,23 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2125 new_fcport->d_id.b.area = area; 2190 new_fcport->d_id.b.area = area;
2126 new_fcport->d_id.b.al_pa = al_pa; 2191 new_fcport->d_id.b.al_pa = al_pa;
2127 new_fcport->loop_id = loop_id; 2192 new_fcport->loop_id = loop_id;
2128 new_fcport->vp_idx = ha->vp_idx; 2193 new_fcport->vp_idx = vha->vp_idx;
2129 rval2 = qla2x00_get_port_database(ha, new_fcport, 0); 2194 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2130 if (rval2 != QLA_SUCCESS) { 2195 if (rval2 != QLA_SUCCESS) {
2131 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2196 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
2132 "information -- get_port_database=%x, " 2197 "information -- get_port_database=%x, "
2133 "loop_id=0x%04x\n", 2198 "loop_id=0x%04x\n",
2134 ha->host_no, rval2, new_fcport->loop_id)); 2199 vha->host_no, rval2, new_fcport->loop_id));
2135 DEBUG2(printk("scsi(%ld): Scheduling resync...\n", 2200 DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
2136 ha->host_no)); 2201 vha->host_no));
2137 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 2202 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2138 continue; 2203 continue;
2139 } 2204 }
2140 2205
2141 /* Check for matching device in port list. */ 2206 /* Check for matching device in port list. */
2142 found = 0; 2207 found = 0;
2143 fcport = NULL; 2208 fcport = NULL;
2144 list_for_each_entry(fcport, &pha->fcports, list) { 2209 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2145 if (fcport->vp_idx != ha->vp_idx)
2146 continue;
2147
2148 if (memcmp(new_fcport->port_name, fcport->port_name, 2210 if (memcmp(new_fcport->port_name, fcport->port_name,
2149 WWN_SIZE)) 2211 WWN_SIZE))
2150 continue; 2212 continue;
@@ -2164,17 +2226,15 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2164 if (!found) { 2226 if (!found) {
2165 /* New device, add to fcports list. */ 2227 /* New device, add to fcports list. */
2166 new_fcport->flags &= ~FCF_PERSISTENT_BOUND; 2228 new_fcport->flags &= ~FCF_PERSISTENT_BOUND;
2167 if (ha->parent) { 2229 if (vha->vp_idx) {
2168 new_fcport->ha = ha; 2230 new_fcport->vha = vha;
2169 new_fcport->vp_idx = ha->vp_idx; 2231 new_fcport->vp_idx = vha->vp_idx;
2170 list_add_tail(&new_fcport->vp_fcport,
2171 &ha->vp_fcports);
2172 } 2232 }
2173 list_add_tail(&new_fcport->list, &pha->fcports); 2233 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2174 2234
2175 /* Allocate a new replacement fcport. */ 2235 /* Allocate a new replacement fcport. */
2176 fcport = new_fcport; 2236 fcport = new_fcport;
2177 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2237 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2178 if (new_fcport == NULL) { 2238 if (new_fcport == NULL) {
2179 rval = QLA_MEMORY_ALLOC_FAILED; 2239 rval = QLA_MEMORY_ALLOC_FAILED;
2180 goto cleanup_allocation; 2240 goto cleanup_allocation;
@@ -2185,7 +2245,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2185 /* Base iIDMA settings on HBA port speed. */ 2245 /* Base iIDMA settings on HBA port speed. */
2186 fcport->fp_speed = ha->link_data_rate; 2246 fcport->fp_speed = ha->link_data_rate;
2187 2247
2188 qla2x00_update_fcport(ha, fcport); 2248 qla2x00_update_fcport(vha, fcport);
2189 2249
2190 found_devs++; 2250 found_devs++;
2191 } 2251 }
@@ -2195,24 +2255,25 @@ cleanup_allocation:
2195 2255
2196 if (rval != QLA_SUCCESS) { 2256 if (rval != QLA_SUCCESS) {
2197 DEBUG2(printk("scsi(%ld): Configure local loop error exit: " 2257 DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
2198 "rval=%x\n", ha->host_no, rval)); 2258 "rval=%x\n", vha->host_no, rval));
2199 } 2259 }
2200 2260
2201 if (found_devs) { 2261 if (found_devs) {
2202 ha->device_flags |= DFLG_LOCAL_DEVICES; 2262 vha->device_flags |= DFLG_LOCAL_DEVICES;
2203 ha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES; 2263 vha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES;
2204 } 2264 }
2205 2265
2206 return (rval); 2266 return (rval);
2207} 2267}
2208 2268
2209static void 2269static void
2210qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) 2270qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2211{ 2271{
2212#define LS_UNKNOWN 2 2272#define LS_UNKNOWN 2
2213 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 2273 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
2214 int rval; 2274 int rval;
2215 uint16_t mb[6]; 2275 uint16_t mb[6];
2276 struct qla_hw_data *ha = vha->hw;
2216 2277
2217 if (!IS_IIDMA_CAPABLE(ha)) 2278 if (!IS_IIDMA_CAPABLE(ha))
2218 return; 2279 return;
@@ -2221,12 +2282,12 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2221 fcport->fp_speed > ha->link_data_rate) 2282 fcport->fp_speed > ha->link_data_rate)
2222 return; 2283 return;
2223 2284
2224 rval = qla2x00_set_idma_speed(ha, fcport->loop_id, fcport->fp_speed, 2285 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2225 mb); 2286 mb);
2226 if (rval != QLA_SUCCESS) { 2287 if (rval != QLA_SUCCESS) {
2227 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " 2288 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2228 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", 2289 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2229 ha->host_no, fcport->port_name[0], fcport->port_name[1], 2290 vha->host_no, fcport->port_name[0], fcport->port_name[1],
2230 fcport->port_name[2], fcport->port_name[3], 2291 fcport->port_name[2], fcport->port_name[3],
2231 fcport->port_name[4], fcport->port_name[5], 2292 fcport->port_name[4], fcport->port_name[5],
2232 fcport->port_name[6], fcport->port_name[7], rval, 2293 fcport->port_name[6], fcport->port_name[7], rval,
@@ -2244,10 +2305,11 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2244} 2305}
2245 2306
2246static void 2307static void
2247qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport) 2308qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2248{ 2309{
2249 struct fc_rport_identifiers rport_ids; 2310 struct fc_rport_identifiers rport_ids;
2250 struct fc_rport *rport; 2311 struct fc_rport *rport;
2312 struct qla_hw_data *ha = vha->hw;
2251 2313
2252 if (fcport->drport) 2314 if (fcport->drport)
2253 qla2x00_rport_del(fcport); 2315 qla2x00_rport_del(fcport);
@@ -2257,15 +2319,15 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2257 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2319 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2258 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2320 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2259 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2321 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2260 fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids); 2322 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2261 if (!rport) { 2323 if (!rport) {
2262 qla_printk(KERN_WARNING, ha, 2324 qla_printk(KERN_WARNING, ha,
2263 "Unable to allocate fc remote port!\n"); 2325 "Unable to allocate fc remote port!\n");
2264 return; 2326 return;
2265 } 2327 }
2266 spin_lock_irq(fcport->ha->host->host_lock); 2328 spin_lock_irq(fcport->vha->host->host_lock);
2267 *((fc_port_t **)rport->dd_data) = fcport; 2329 *((fc_port_t **)rport->dd_data) = fcport;
2268 spin_unlock_irq(fcport->ha->host->host_lock); 2330 spin_unlock_irq(fcport->vha->host->host_lock);
2269 2331
2270 rport->supported_classes = fcport->supported_classes; 2332 rport->supported_classes = fcport->supported_classes;
2271 2333
@@ -2293,23 +2355,23 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2293 * Kernel context. 2355 * Kernel context.
2294 */ 2356 */
2295void 2357void
2296qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) 2358qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2297{ 2359{
2298 scsi_qla_host_t *pha = to_qla_parent(ha); 2360 struct qla_hw_data *ha = vha->hw;
2299 2361
2300 fcport->ha = ha; 2362 fcport->vha = vha;
2301 fcport->login_retry = 0; 2363 fcport->login_retry = 0;
2302 fcport->port_login_retry_count = pha->port_down_retry_count * 2364 fcport->port_login_retry_count = ha->port_down_retry_count *
2303 PORT_RETRY_TIME; 2365 PORT_RETRY_TIME;
2304 atomic_set(&fcport->port_down_timer, pha->port_down_retry_count * 2366 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
2305 PORT_RETRY_TIME); 2367 PORT_RETRY_TIME);
2306 fcport->flags &= ~FCF_LOGIN_NEEDED; 2368 fcport->flags &= ~FCF_LOGIN_NEEDED;
2307 2369
2308 qla2x00_iidma_fcport(ha, fcport); 2370 qla2x00_iidma_fcport(vha, fcport);
2309 2371
2310 atomic_set(&fcport->state, FCS_ONLINE); 2372 atomic_set(&fcport->state, FCS_ONLINE);
2311 2373
2312 qla2x00_reg_remote_port(ha, fcport); 2374 qla2x00_reg_remote_port(vha, fcport);
2313} 2375}
2314 2376
2315/* 2377/*
@@ -2324,7 +2386,7 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2324 * BIT_0 = error 2386 * BIT_0 = error
2325 */ 2387 */
2326static int 2388static int
2327qla2x00_configure_fabric(scsi_qla_host_t *ha) 2389qla2x00_configure_fabric(scsi_qla_host_t *vha)
2328{ 2390{
2329 int rval, rval2; 2391 int rval, rval2;
2330 fc_port_t *fcport, *fcptemp; 2392 fc_port_t *fcport, *fcptemp;
@@ -2332,25 +2394,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2332 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2394 uint16_t mb[MAILBOX_REGISTER_COUNT];
2333 uint16_t loop_id; 2395 uint16_t loop_id;
2334 LIST_HEAD(new_fcports); 2396 LIST_HEAD(new_fcports);
2335 scsi_qla_host_t *pha = to_qla_parent(ha); 2397 struct qla_hw_data *ha = vha->hw;
2398 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2336 2399
2337 /* If FL port exists, then SNS is present */ 2400 /* If FL port exists, then SNS is present */
2338 if (IS_FWI2_CAPABLE(ha)) 2401 if (IS_FWI2_CAPABLE(ha))
2339 loop_id = NPH_F_PORT; 2402 loop_id = NPH_F_PORT;
2340 else 2403 else
2341 loop_id = SNS_FL_PORT; 2404 loop_id = SNS_FL_PORT;
2342 rval = qla2x00_get_port_name(ha, loop_id, ha->fabric_node_name, 1); 2405 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2343 if (rval != QLA_SUCCESS) { 2406 if (rval != QLA_SUCCESS) {
2344 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2407 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2345 "Port\n", ha->host_no)); 2408 "Port\n", vha->host_no));
2346 2409
2347 ha->device_flags &= ~SWITCH_FOUND; 2410 vha->device_flags &= ~SWITCH_FOUND;
2348 return (QLA_SUCCESS); 2411 return (QLA_SUCCESS);
2349 } 2412 }
2350 ha->device_flags |= SWITCH_FOUND; 2413 vha->device_flags |= SWITCH_FOUND;
2351 2414
2352 /* Mark devices that need re-synchronization. */ 2415 /* Mark devices that need re-synchronization. */
2353 rval2 = qla2x00_device_resync(ha); 2416 rval2 = qla2x00_device_resync(vha);
2354 if (rval2 == QLA_RSCNS_HANDLED) { 2417 if (rval2 == QLA_RSCNS_HANDLED) {
2355 /* No point doing the scan, just continue. */ 2418 /* No point doing the scan, just continue. */
2356 return (QLA_SUCCESS); 2419 return (QLA_SUCCESS);
@@ -2358,15 +2421,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2358 do { 2421 do {
2359 /* FDMI support. */ 2422 /* FDMI support. */
2360 if (ql2xfdmienable && 2423 if (ql2xfdmienable &&
2361 test_and_clear_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags)) 2424 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
2362 qla2x00_fdmi_register(ha); 2425 qla2x00_fdmi_register(vha);
2363 2426
2364 /* Ensure we are logged into the SNS. */ 2427 /* Ensure we are logged into the SNS. */
2365 if (IS_FWI2_CAPABLE(ha)) 2428 if (IS_FWI2_CAPABLE(ha))
2366 loop_id = NPH_SNS; 2429 loop_id = NPH_SNS;
2367 else 2430 else
2368 loop_id = SIMPLE_NAME_SERVER; 2431 loop_id = SIMPLE_NAME_SERVER;
2369 ha->isp_ops->fabric_login(ha, loop_id, 0xff, 0xff, 2432 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
2370 0xfc, mb, BIT_1 | BIT_0); 2433 0xfc, mb, BIT_1 | BIT_0);
2371 if (mb[0] != MBS_COMMAND_COMPLETE) { 2434 if (mb[0] != MBS_COMMAND_COMPLETE) {
2372 DEBUG2(qla_printk(KERN_INFO, ha, 2435 DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2376,29 +2439,29 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2376 return (QLA_SUCCESS); 2439 return (QLA_SUCCESS);
2377 } 2440 }
2378 2441
2379 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags)) { 2442 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
2380 if (qla2x00_rft_id(ha)) { 2443 if (qla2x00_rft_id(vha)) {
2381 /* EMPTY */ 2444 /* EMPTY */
2382 DEBUG2(printk("scsi(%ld): Register FC-4 " 2445 DEBUG2(printk("scsi(%ld): Register FC-4 "
2383 "TYPE failed.\n", ha->host_no)); 2446 "TYPE failed.\n", vha->host_no));
2384 } 2447 }
2385 if (qla2x00_rff_id(ha)) { 2448 if (qla2x00_rff_id(vha)) {
2386 /* EMPTY */ 2449 /* EMPTY */
2387 DEBUG2(printk("scsi(%ld): Register FC-4 " 2450 DEBUG2(printk("scsi(%ld): Register FC-4 "
2388 "Features failed.\n", ha->host_no)); 2451 "Features failed.\n", vha->host_no));
2389 } 2452 }
2390 if (qla2x00_rnn_id(ha)) { 2453 if (qla2x00_rnn_id(vha)) {
2391 /* EMPTY */ 2454 /* EMPTY */
2392 DEBUG2(printk("scsi(%ld): Register Node Name " 2455 DEBUG2(printk("scsi(%ld): Register Node Name "
2393 "failed.\n", ha->host_no)); 2456 "failed.\n", vha->host_no));
2394 } else if (qla2x00_rsnn_nn(ha)) { 2457 } else if (qla2x00_rsnn_nn(vha)) {
2395 /* EMPTY */ 2458 /* EMPTY */
2396 DEBUG2(printk("scsi(%ld): Register Symbolic " 2459 DEBUG2(printk("scsi(%ld): Register Symbolic "
2397 "Node Name failed.\n", ha->host_no)); 2460 "Node Name failed.\n", vha->host_no));
2398 } 2461 }
2399 } 2462 }
2400 2463
2401 rval = qla2x00_find_all_fabric_devs(ha, &new_fcports); 2464 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
2402 if (rval != QLA_SUCCESS) 2465 if (rval != QLA_SUCCESS)
2403 break; 2466 break;
2404 2467
@@ -2406,24 +2469,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2406 * Logout all previous fabric devices marked lost, except 2469 * Logout all previous fabric devices marked lost, except
2407 * tape devices. 2470 * tape devices.
2408 */ 2471 */
2409 list_for_each_entry(fcport, &pha->fcports, list) { 2472 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2410 if (fcport->vp_idx !=ha->vp_idx) 2473 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2411 continue;
2412
2413 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2414 break; 2474 break;
2415 2475
2416 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 2476 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
2417 continue; 2477 continue;
2418 2478
2419 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 2479 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
2420 qla2x00_mark_device_lost(ha, fcport, 2480 qla2x00_mark_device_lost(vha, fcport,
2421 ql2xplogiabsentdevice, 0); 2481 ql2xplogiabsentdevice, 0);
2422 if (fcport->loop_id != FC_NO_LOOP_ID && 2482 if (fcport->loop_id != FC_NO_LOOP_ID &&
2423 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2483 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2424 fcport->port_type != FCT_INITIATOR && 2484 fcport->port_type != FCT_INITIATOR &&
2425 fcport->port_type != FCT_BROADCAST) { 2485 fcport->port_type != FCT_BROADCAST) {
2426 ha->isp_ops->fabric_logout(ha, 2486 ha->isp_ops->fabric_logout(vha,
2427 fcport->loop_id, 2487 fcport->loop_id,
2428 fcport->d_id.b.domain, 2488 fcport->d_id.b.domain,
2429 fcport->d_id.b.area, 2489 fcport->d_id.b.area,
@@ -2434,18 +2494,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2434 } 2494 }
2435 2495
2436 /* Starting free loop ID. */ 2496 /* Starting free loop ID. */
2437 next_loopid = pha->min_external_loopid; 2497 next_loopid = ha->min_external_loopid;
2438 2498
2439 /* 2499 /*
2440 * Scan through our port list and login entries that need to be 2500 * Scan through our port list and login entries that need to be
2441 * logged in. 2501 * logged in.
2442 */ 2502 */
2443 list_for_each_entry(fcport, &pha->fcports, list) { 2503 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2444 if (fcport->vp_idx != ha->vp_idx) 2504 if (atomic_read(&vha->loop_down_timer) ||
2445 continue; 2505 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2446
2447 if (atomic_read(&ha->loop_down_timer) ||
2448 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2449 break; 2506 break;
2450 2507
2451 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2508 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
@@ -2455,14 +2512,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2455 if (fcport->loop_id == FC_NO_LOOP_ID) { 2512 if (fcport->loop_id == FC_NO_LOOP_ID) {
2456 fcport->loop_id = next_loopid; 2513 fcport->loop_id = next_loopid;
2457 rval = qla2x00_find_new_loop_id( 2514 rval = qla2x00_find_new_loop_id(
2458 to_qla_parent(ha), fcport); 2515 base_vha, fcport);
2459 if (rval != QLA_SUCCESS) { 2516 if (rval != QLA_SUCCESS) {
2460 /* Ran out of IDs to use */ 2517 /* Ran out of IDs to use */
2461 break; 2518 break;
2462 } 2519 }
2463 } 2520 }
2464 /* Login and update database */ 2521 /* Login and update database */
2465 qla2x00_fabric_dev_login(ha, fcport, &next_loopid); 2522 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2466 } 2523 }
2467 2524
2468 /* Exit if out of loop IDs. */ 2525 /* Exit if out of loop IDs. */
@@ -2474,31 +2531,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2474 * Login and add the new devices to our port list. 2531 * Login and add the new devices to our port list.
2475 */ 2532 */
2476 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 2533 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
2477 if (atomic_read(&ha->loop_down_timer) || 2534 if (atomic_read(&vha->loop_down_timer) ||
2478 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2535 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2479 break; 2536 break;
2480 2537
2481 /* Find a new loop ID to use. */ 2538 /* Find a new loop ID to use. */
2482 fcport->loop_id = next_loopid; 2539 fcport->loop_id = next_loopid;
2483 rval = qla2x00_find_new_loop_id(to_qla_parent(ha), 2540 rval = qla2x00_find_new_loop_id(base_vha, fcport);
2484 fcport);
2485 if (rval != QLA_SUCCESS) { 2541 if (rval != QLA_SUCCESS) {
2486 /* Ran out of IDs to use */ 2542 /* Ran out of IDs to use */
2487 break; 2543 break;
2488 } 2544 }
2489 2545
2490 /* Login and update database */ 2546 /* Login and update database */
2491 qla2x00_fabric_dev_login(ha, fcport, &next_loopid); 2547 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2492 2548
2493 if (ha->parent) { 2549 if (vha->vp_idx) {
2494 fcport->ha = ha; 2550 fcport->vha = vha;
2495 fcport->vp_idx = ha->vp_idx; 2551 fcport->vp_idx = vha->vp_idx;
2496 list_add_tail(&fcport->vp_fcport, 2552 }
2497 &ha->vp_fcports); 2553 list_move_tail(&fcport->list, &vha->vp_fcports);
2498 list_move_tail(&fcport->list,
2499 &ha->parent->fcports);
2500 } else
2501 list_move_tail(&fcport->list, &ha->fcports);
2502 } 2554 }
2503 } while (0); 2555 } while (0);
2504 2556
@@ -2510,7 +2562,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2510 2562
2511 if (rval) { 2563 if (rval) {
2512 DEBUG2(printk("scsi(%ld): Configure fabric error exit: " 2564 DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
2513 "rval=%d\n", ha->host_no, rval)); 2565 "rval=%d\n", vha->host_no, rval));
2514 } 2566 }
2515 2567
2516 return (rval); 2568 return (rval);
@@ -2531,7 +2583,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2531 * Kernel context. 2583 * Kernel context.
2532 */ 2584 */
2533static int 2585static int
2534qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports) 2586qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
2587 struct list_head *new_fcports)
2535{ 2588{
2536 int rval; 2589 int rval;
2537 uint16_t loop_id; 2590 uint16_t loop_id;
@@ -2542,11 +2595,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2542 int swl_idx; 2595 int swl_idx;
2543 int first_dev, last_dev; 2596 int first_dev, last_dev;
2544 port_id_t wrap, nxt_d_id; 2597 port_id_t wrap, nxt_d_id;
2545 int vp_index; 2598 struct qla_hw_data *ha = vha->hw;
2546 int empty_vp_index; 2599 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
2547 int found_vp;
2548 scsi_qla_host_t *vha;
2549 scsi_qla_host_t *pha = to_qla_parent(ha);
2550 2600
2551 rval = QLA_SUCCESS; 2601 rval = QLA_SUCCESS;
2552 2602
@@ -2555,43 +2605,42 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2555 if (!swl) { 2605 if (!swl) {
2556 /*EMPTY*/ 2606 /*EMPTY*/
2557 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 2607 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
2558 "on GA_NXT\n", ha->host_no)); 2608 "on GA_NXT\n", vha->host_no));
2559 } else { 2609 } else {
2560 if (qla2x00_gid_pt(ha, swl) != QLA_SUCCESS) { 2610 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
2561 kfree(swl); 2611 kfree(swl);
2562 swl = NULL; 2612 swl = NULL;
2563 } else if (qla2x00_gpn_id(ha, swl) != QLA_SUCCESS) { 2613 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
2564 kfree(swl); 2614 kfree(swl);
2565 swl = NULL; 2615 swl = NULL;
2566 } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) { 2616 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
2567 kfree(swl); 2617 kfree(swl);
2568 swl = NULL; 2618 swl = NULL;
2569 } else if (ql2xiidmaenable && 2619 } else if (ql2xiidmaenable &&
2570 qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) { 2620 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
2571 qla2x00_gpsc(ha, swl); 2621 qla2x00_gpsc(vha, swl);
2572 } 2622 }
2573 } 2623 }
2574 swl_idx = 0; 2624 swl_idx = 0;
2575 2625
2576 /* Allocate temporary fcport for any new fcports discovered. */ 2626 /* Allocate temporary fcport for any new fcports discovered. */
2577 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2627 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2578 if (new_fcport == NULL) { 2628 if (new_fcport == NULL) {
2579 kfree(swl); 2629 kfree(swl);
2580 return (QLA_MEMORY_ALLOC_FAILED); 2630 return (QLA_MEMORY_ALLOC_FAILED);
2581 } 2631 }
2582 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2632 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2583 new_fcport->vp_idx = ha->vp_idx;
2584 /* Set start port ID scan at adapter ID. */ 2633 /* Set start port ID scan at adapter ID. */
2585 first_dev = 1; 2634 first_dev = 1;
2586 last_dev = 0; 2635 last_dev = 0;
2587 2636
2588 /* Starting free loop ID. */ 2637 /* Starting free loop ID. */
2589 loop_id = pha->min_external_loopid; 2638 loop_id = ha->min_external_loopid;
2590 for (; loop_id <= ha->last_loop_id; loop_id++) { 2639 for (; loop_id <= ha->max_loop_id; loop_id++) {
2591 if (qla2x00_is_reserved_id(ha, loop_id)) 2640 if (qla2x00_is_reserved_id(vha, loop_id))
2592 continue; 2641 continue;
2593 2642
2594 if (atomic_read(&ha->loop_down_timer) || LOOP_TRANSITION(ha)) 2643 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
2595 break; 2644 break;
2596 2645
2597 if (swl != NULL) { 2646 if (swl != NULL) {
@@ -2614,7 +2663,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2614 } 2663 }
2615 } else { 2664 } else {
2616 /* Send GA_NXT to the switch */ 2665 /* Send GA_NXT to the switch */
2617 rval = qla2x00_ga_nxt(ha, new_fcport); 2666 rval = qla2x00_ga_nxt(vha, new_fcport);
2618 if (rval != QLA_SUCCESS) { 2667 if (rval != QLA_SUCCESS) {
2619 qla_printk(KERN_WARNING, ha, 2668 qla_printk(KERN_WARNING, ha,
2620 "SNS scan failed -- assuming zero-entry " 2669 "SNS scan failed -- assuming zero-entry "
@@ -2635,44 +2684,31 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2635 first_dev = 0; 2684 first_dev = 0;
2636 } else if (new_fcport->d_id.b24 == wrap.b24) { 2685 } else if (new_fcport->d_id.b24 == wrap.b24) {
2637 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", 2686 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
2638 ha->host_no, new_fcport->d_id.b.domain, 2687 vha->host_no, new_fcport->d_id.b.domain,
2639 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); 2688 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
2640 break; 2689 break;
2641 } 2690 }
2642 2691
2643 /* Bypass if same physical adapter. */ 2692 /* Bypass if same physical adapter. */
2644 if (new_fcport->d_id.b24 == pha->d_id.b24) 2693 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
2645 continue; 2694 continue;
2646 2695
2647 /* Bypass virtual ports of the same host. */ 2696 /* Bypass virtual ports of the same host. */
2648 if (pha->num_vhosts) { 2697 found = 0;
2649 for_each_mapped_vp_idx(pha, vp_index) { 2698 if (ha->num_vhosts) {
2650 empty_vp_index = 1; 2699 list_for_each_entry(vp, &ha->vp_list, list) {
2651 found_vp = 0; 2700 if (new_fcport->d_id.b24 == vp->d_id.b24) {
2652 list_for_each_entry(vha, &pha->vp_list, 2701 found = 1;
2653 vp_list) {
2654 if (vp_index == vha->vp_idx) {
2655 empty_vp_index = 0;
2656 found_vp = 1;
2657 break;
2658 }
2659 }
2660
2661 if (empty_vp_index)
2662 continue;
2663
2664 if (found_vp &&
2665 new_fcport->d_id.b24 == vha->d_id.b24)
2666 break; 2702 break;
2703 }
2667 } 2704 }
2668 2705 if (found)
2669 if (vp_index <= pha->max_npiv_vports)
2670 continue; 2706 continue;
2671 } 2707 }
2672 2708
2673 /* Bypass if same domain and area of adapter. */ 2709 /* Bypass if same domain and area of adapter. */
2674 if (((new_fcport->d_id.b24 & 0xffff00) == 2710 if (((new_fcport->d_id.b24 & 0xffff00) ==
2675 (ha->d_id.b24 & 0xffff00)) && ha->current_topology == 2711 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
2676 ISP_CFG_FL) 2712 ISP_CFG_FL)
2677 continue; 2713 continue;
2678 2714
@@ -2682,9 +2718,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2682 2718
2683 /* Locate matching device in database. */ 2719 /* Locate matching device in database. */
2684 found = 0; 2720 found = 0;
2685 list_for_each_entry(fcport, &pha->fcports, list) { 2721 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2686 if (new_fcport->vp_idx != fcport->vp_idx)
2687 continue;
2688 if (memcmp(new_fcport->port_name, fcport->port_name, 2722 if (memcmp(new_fcport->port_name, fcport->port_name,
2689 WWN_SIZE)) 2723 WWN_SIZE))
2690 continue; 2724 continue;
@@ -2728,7 +2762,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2728 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2762 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2729 fcport->port_type != FCT_INITIATOR && 2763 fcport->port_type != FCT_INITIATOR &&
2730 fcport->port_type != FCT_BROADCAST) { 2764 fcport->port_type != FCT_BROADCAST) {
2731 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 2765 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
2732 fcport->d_id.b.domain, fcport->d_id.b.area, 2766 fcport->d_id.b.domain, fcport->d_id.b.area,
2733 fcport->d_id.b.al_pa); 2767 fcport->d_id.b.al_pa);
2734 fcport->loop_id = FC_NO_LOOP_ID; 2768 fcport->loop_id = FC_NO_LOOP_ID;
@@ -2739,27 +2773,25 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2739 2773
2740 if (found) 2774 if (found)
2741 continue; 2775 continue;
2742
2743 /* If device was not in our fcports list, then add it. */ 2776 /* If device was not in our fcports list, then add it. */
2744 list_add_tail(&new_fcport->list, new_fcports); 2777 list_add_tail(&new_fcport->list, new_fcports);
2745 2778
2746 /* Allocate a new replacement fcport. */ 2779 /* Allocate a new replacement fcport. */
2747 nxt_d_id.b24 = new_fcport->d_id.b24; 2780 nxt_d_id.b24 = new_fcport->d_id.b24;
2748 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2781 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2749 if (new_fcport == NULL) { 2782 if (new_fcport == NULL) {
2750 kfree(swl); 2783 kfree(swl);
2751 return (QLA_MEMORY_ALLOC_FAILED); 2784 return (QLA_MEMORY_ALLOC_FAILED);
2752 } 2785 }
2753 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2786 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2754 new_fcport->d_id.b24 = nxt_d_id.b24; 2787 new_fcport->d_id.b24 = nxt_d_id.b24;
2755 new_fcport->vp_idx = ha->vp_idx;
2756 } 2788 }
2757 2789
2758 kfree(swl); 2790 kfree(swl);
2759 kfree(new_fcport); 2791 kfree(new_fcport);
2760 2792
2761 if (!list_empty(new_fcports)) 2793 if (!list_empty(new_fcports))
2762 ha->device_flags |= DFLG_FABRIC_DEVICES; 2794 vha->device_flags |= DFLG_FABRIC_DEVICES;
2763 2795
2764 return (rval); 2796 return (rval);
2765} 2797}
@@ -2779,13 +2811,14 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2779 * Kernel context. 2811 * Kernel context.
2780 */ 2812 */
2781static int 2813static int
2782qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev) 2814qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
2783{ 2815{
2784 int rval; 2816 int rval;
2785 int found; 2817 int found;
2786 fc_port_t *fcport; 2818 fc_port_t *fcport;
2787 uint16_t first_loop_id; 2819 uint16_t first_loop_id;
2788 scsi_qla_host_t *pha = to_qla_parent(ha); 2820 struct qla_hw_data *ha = vha->hw;
2821 struct scsi_qla_host *vp;
2789 2822
2790 rval = QLA_SUCCESS; 2823 rval = QLA_SUCCESS;
2791 2824
@@ -2794,17 +2827,15 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2794 2827
2795 for (;;) { 2828 for (;;) {
2796 /* Skip loop ID if already used by adapter. */ 2829 /* Skip loop ID if already used by adapter. */
2797 if (dev->loop_id == ha->loop_id) { 2830 if (dev->loop_id == vha->loop_id)
2798 dev->loop_id++; 2831 dev->loop_id++;
2799 }
2800 2832
2801 /* Skip reserved loop IDs. */ 2833 /* Skip reserved loop IDs. */
2802 while (qla2x00_is_reserved_id(ha, dev->loop_id)) { 2834 while (qla2x00_is_reserved_id(vha, dev->loop_id))
2803 dev->loop_id++; 2835 dev->loop_id++;
2804 }
2805 2836
2806 /* Reset loop ID if passed the end. */ 2837 /* Reset loop ID if passed the end. */
2807 if (dev->loop_id > ha->last_loop_id) { 2838 if (dev->loop_id > ha->max_loop_id) {
2808 /* first loop ID. */ 2839 /* first loop ID. */
2809 dev->loop_id = ha->min_external_loopid; 2840 dev->loop_id = ha->min_external_loopid;
2810 } 2841 }
@@ -2812,12 +2843,17 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2812 /* Check for loop ID being already in use. */ 2843 /* Check for loop ID being already in use. */
2813 found = 0; 2844 found = 0;
2814 fcport = NULL; 2845 fcport = NULL;
2815 list_for_each_entry(fcport, &pha->fcports, list) { 2846 list_for_each_entry(vp, &ha->vp_list, list) {
2816 if (fcport->loop_id == dev->loop_id && fcport != dev) { 2847 list_for_each_entry(fcport, &vp->vp_fcports, list) {
2817 /* ID possibly in use */ 2848 if (fcport->loop_id == dev->loop_id &&
2818 found++; 2849 fcport != dev) {
2819 break; 2850 /* ID possibly in use */
2851 found++;
2852 break;
2853 }
2820 } 2854 }
2855 if (found)
2856 break;
2821 } 2857 }
2822 2858
2823 /* If not in use then it is free to use. */ 2859 /* If not in use then it is free to use. */
@@ -2850,7 +2886,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2850 * Kernel context. 2886 * Kernel context.
2851 */ 2887 */
2852static int 2888static int
2853qla2x00_device_resync(scsi_qla_host_t *ha) 2889qla2x00_device_resync(scsi_qla_host_t *vha)
2854{ 2890{
2855 int rval; 2891 int rval;
2856 uint32_t mask; 2892 uint32_t mask;
@@ -2859,14 +2895,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2859 uint8_t rscn_out_iter; 2895 uint8_t rscn_out_iter;
2860 uint8_t format; 2896 uint8_t format;
2861 port_id_t d_id; 2897 port_id_t d_id;
2862 scsi_qla_host_t *pha = to_qla_parent(ha);
2863 2898
2864 rval = QLA_RSCNS_HANDLED; 2899 rval = QLA_RSCNS_HANDLED;
2865 2900
2866 while (ha->rscn_out_ptr != ha->rscn_in_ptr || 2901 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
2867 ha->flags.rscn_queue_overflow) { 2902 vha->flags.rscn_queue_overflow) {
2868 2903
2869 rscn_entry = ha->rscn_queue[ha->rscn_out_ptr]; 2904 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
2870 format = MSB(MSW(rscn_entry)); 2905 format = MSB(MSW(rscn_entry));
2871 d_id.b.domain = LSB(MSW(rscn_entry)); 2906 d_id.b.domain = LSB(MSW(rscn_entry));
2872 d_id.b.area = MSB(LSW(rscn_entry)); 2907 d_id.b.area = MSB(LSW(rscn_entry));
@@ -2874,37 +2909,37 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2874 2909
2875 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " 2910 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
2876 "[%02x/%02x%02x%02x].\n", 2911 "[%02x/%02x%02x%02x].\n",
2877 ha->host_no, ha->rscn_out_ptr, format, d_id.b.domain, 2912 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
2878 d_id.b.area, d_id.b.al_pa)); 2913 d_id.b.area, d_id.b.al_pa));
2879 2914
2880 ha->rscn_out_ptr++; 2915 vha->rscn_out_ptr++;
2881 if (ha->rscn_out_ptr == MAX_RSCN_COUNT) 2916 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
2882 ha->rscn_out_ptr = 0; 2917 vha->rscn_out_ptr = 0;
2883 2918
2884 /* Skip duplicate entries. */ 2919 /* Skip duplicate entries. */
2885 for (rscn_out_iter = ha->rscn_out_ptr; 2920 for (rscn_out_iter = vha->rscn_out_ptr;
2886 !ha->flags.rscn_queue_overflow && 2921 !vha->flags.rscn_queue_overflow &&
2887 rscn_out_iter != ha->rscn_in_ptr; 2922 rscn_out_iter != vha->rscn_in_ptr;
2888 rscn_out_iter = (rscn_out_iter == 2923 rscn_out_iter = (rscn_out_iter ==
2889 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) { 2924 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
2890 2925
2891 if (rscn_entry != ha->rscn_queue[rscn_out_iter]) 2926 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
2892 break; 2927 break;
2893 2928
2894 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " 2929 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
2895 "entry found at [%d].\n", ha->host_no, 2930 "entry found at [%d].\n", vha->host_no,
2896 rscn_out_iter)); 2931 rscn_out_iter));
2897 2932
2898 ha->rscn_out_ptr = rscn_out_iter; 2933 vha->rscn_out_ptr = rscn_out_iter;
2899 } 2934 }
2900 2935
2901 /* Queue overflow, set switch default case. */ 2936 /* Queue overflow, set switch default case. */
2902 if (ha->flags.rscn_queue_overflow) { 2937 if (vha->flags.rscn_queue_overflow) {
2903 DEBUG(printk("scsi(%ld): device_resync: rscn " 2938 DEBUG(printk("scsi(%ld): device_resync: rscn "
2904 "overflow.\n", ha->host_no)); 2939 "overflow.\n", vha->host_no));
2905 2940
2906 format = 3; 2941 format = 3;
2907 ha->flags.rscn_queue_overflow = 0; 2942 vha->flags.rscn_queue_overflow = 0;
2908 } 2943 }
2909 2944
2910 switch (format) { 2945 switch (format) {
@@ -2920,16 +2955,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2920 default: 2955 default:
2921 mask = 0x0; 2956 mask = 0x0;
2922 d_id.b24 = 0; 2957 d_id.b24 = 0;
2923 ha->rscn_out_ptr = ha->rscn_in_ptr; 2958 vha->rscn_out_ptr = vha->rscn_in_ptr;
2924 break; 2959 break;
2925 } 2960 }
2926 2961
2927 rval = QLA_SUCCESS; 2962 rval = QLA_SUCCESS;
2928 2963
2929 list_for_each_entry(fcport, &pha->fcports, list) { 2964 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2930 if (fcport->vp_idx != ha->vp_idx)
2931 continue;
2932
2933 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2965 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
2934 (fcport->d_id.b24 & mask) != d_id.b24 || 2966 (fcport->d_id.b24 & mask) != d_id.b24 ||
2935 fcport->port_type == FCT_BROADCAST) 2967 fcport->port_type == FCT_BROADCAST)
@@ -2938,7 +2970,7 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2938 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2970 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2939 if (format != 3 || 2971 if (format != 3 ||
2940 fcport->port_type != FCT_INITIATOR) { 2972 fcport->port_type != FCT_INITIATOR) {
2941 qla2x00_mark_device_lost(ha, fcport, 2973 qla2x00_mark_device_lost(vha, fcport,
2942 0, 0); 2974 0, 0);
2943 } 2975 }
2944 } 2976 }
@@ -2965,30 +2997,31 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2965 * Kernel context. 2997 * Kernel context.
2966 */ 2998 */
2967static int 2999static int
2968qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport, 3000qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
2969 uint16_t *next_loopid) 3001 uint16_t *next_loopid)
2970{ 3002{
2971 int rval; 3003 int rval;
2972 int retry; 3004 int retry;
2973 uint8_t opts; 3005 uint8_t opts;
3006 struct qla_hw_data *ha = vha->hw;
2974 3007
2975 rval = QLA_SUCCESS; 3008 rval = QLA_SUCCESS;
2976 retry = 0; 3009 retry = 0;
2977 3010
2978 rval = qla2x00_fabric_login(ha, fcport, next_loopid); 3011 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
2979 if (rval == QLA_SUCCESS) { 3012 if (rval == QLA_SUCCESS) {
2980 /* Send an ADISC to tape devices.*/ 3013 /* Send an ADISC to tape devices.*/
2981 opts = 0; 3014 opts = 0;
2982 if (fcport->flags & FCF_TAPE_PRESENT) 3015 if (fcport->flags & FCF_TAPE_PRESENT)
2983 opts |= BIT_1; 3016 opts |= BIT_1;
2984 rval = qla2x00_get_port_database(ha, fcport, opts); 3017 rval = qla2x00_get_port_database(vha, fcport, opts);
2985 if (rval != QLA_SUCCESS) { 3018 if (rval != QLA_SUCCESS) {
2986 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 3019 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
2987 fcport->d_id.b.domain, fcport->d_id.b.area, 3020 fcport->d_id.b.domain, fcport->d_id.b.area,
2988 fcport->d_id.b.al_pa); 3021 fcport->d_id.b.al_pa);
2989 qla2x00_mark_device_lost(ha, fcport, 1, 0); 3022 qla2x00_mark_device_lost(vha, fcport, 1, 0);
2990 } else { 3023 } else {
2991 qla2x00_update_fcport(ha, fcport); 3024 qla2x00_update_fcport(vha, fcport);
2992 } 3025 }
2993 } 3026 }
2994 3027
@@ -3010,13 +3043,14 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3010 * 3 - Fatal error 3043 * 3 - Fatal error
3011 */ 3044 */
3012int 3045int
3013qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport, 3046qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3014 uint16_t *next_loopid) 3047 uint16_t *next_loopid)
3015{ 3048{
3016 int rval; 3049 int rval;
3017 int retry; 3050 int retry;
3018 uint16_t tmp_loopid; 3051 uint16_t tmp_loopid;
3019 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3052 uint16_t mb[MAILBOX_REGISTER_COUNT];
3053 struct qla_hw_data *ha = vha->hw;
3020 3054
3021 retry = 0; 3055 retry = 0;
3022 tmp_loopid = 0; 3056 tmp_loopid = 0;
@@ -3024,11 +3058,11 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3024 for (;;) { 3058 for (;;) {
3025 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " 3059 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
3026 "for port %02x%02x%02x.\n", 3060 "for port %02x%02x%02x.\n",
3027 ha->host_no, fcport->loop_id, fcport->d_id.b.domain, 3061 vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
3028 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3062 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3029 3063
3030 /* Login fcport on switch. */ 3064 /* Login fcport on switch. */
3031 ha->isp_ops->fabric_login(ha, fcport->loop_id, 3065 ha->isp_ops->fabric_login(vha, fcport->loop_id,
3032 fcport->d_id.b.domain, fcport->d_id.b.area, 3066 fcport->d_id.b.domain, fcport->d_id.b.area,
3033 fcport->d_id.b.al_pa, mb, BIT_0); 3067 fcport->d_id.b.al_pa, mb, BIT_0);
3034 if (mb[0] == MBS_PORT_ID_USED) { 3068 if (mb[0] == MBS_PORT_ID_USED) {
@@ -3084,7 +3118,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3084 * Loop ID already used, try next loop ID. 3118 * Loop ID already used, try next loop ID.
3085 */ 3119 */
3086 fcport->loop_id++; 3120 fcport->loop_id++;
3087 rval = qla2x00_find_new_loop_id(ha, fcport); 3121 rval = qla2x00_find_new_loop_id(vha, fcport);
3088 if (rval != QLA_SUCCESS) { 3122 if (rval != QLA_SUCCESS) {
3089 /* Ran out of loop IDs to use */ 3123 /* Ran out of loop IDs to use */
3090 break; 3124 break;
@@ -3096,10 +3130,10 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3096 * dead. 3130 * dead.
3097 */ 3131 */
3098 *next_loopid = fcport->loop_id; 3132 *next_loopid = fcport->loop_id;
3099 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 3133 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3100 fcport->d_id.b.domain, fcport->d_id.b.area, 3134 fcport->d_id.b.domain, fcport->d_id.b.area,
3101 fcport->d_id.b.al_pa); 3135 fcport->d_id.b.al_pa);
3102 qla2x00_mark_device_lost(ha, fcport, 1, 0); 3136 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3103 3137
3104 rval = 1; 3138 rval = 1;
3105 break; 3139 break;
@@ -3109,12 +3143,12 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3109 */ 3143 */
3110 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " 3144 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
3111 "loop_id=%x jiffies=%lx.\n", 3145 "loop_id=%x jiffies=%lx.\n",
3112 __func__, ha->host_no, mb[0], 3146 __func__, vha->host_no, mb[0],
3113 fcport->d_id.b.domain, fcport->d_id.b.area, 3147 fcport->d_id.b.domain, fcport->d_id.b.area,
3114 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3148 fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3115 3149
3116 *next_loopid = fcport->loop_id; 3150 *next_loopid = fcport->loop_id;
3117 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 3151 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3118 fcport->d_id.b.domain, fcport->d_id.b.area, 3152 fcport->d_id.b.domain, fcport->d_id.b.area,
3119 fcport->d_id.b.al_pa); 3153 fcport->d_id.b.al_pa);
3120 fcport->loop_id = FC_NO_LOOP_ID; 3154 fcport->loop_id = FC_NO_LOOP_ID;
@@ -3142,13 +3176,13 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3142 * 3 - Fatal error 3176 * 3 - Fatal error
3143 */ 3177 */
3144int 3178int
3145qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport) 3179qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3146{ 3180{
3147 int rval; 3181 int rval;
3148 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3182 uint16_t mb[MAILBOX_REGISTER_COUNT];
3149 3183
3150 memset(mb, 0, sizeof(mb)); 3184 memset(mb, 0, sizeof(mb));
3151 rval = qla2x00_login_local_device(ha, fcport, mb, BIT_0); 3185 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3152 if (rval == QLA_SUCCESS) { 3186 if (rval == QLA_SUCCESS) {
3153 /* Interrogate mailbox registers for any errors */ 3187 /* Interrogate mailbox registers for any errors */
3154 if (mb[0] == MBS_COMMAND_ERROR) 3188 if (mb[0] == MBS_COMMAND_ERROR)
@@ -3172,57 +3206,57 @@ qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport)
3172 * 0 = success 3206 * 0 = success
3173 */ 3207 */
3174int 3208int
3175qla2x00_loop_resync(scsi_qla_host_t *ha) 3209qla2x00_loop_resync(scsi_qla_host_t *vha)
3176{ 3210{
3177 int rval; 3211 int rval = QLA_SUCCESS;
3178 uint32_t wait_time; 3212 uint32_t wait_time;
3179 3213 struct qla_hw_data *ha = vha->hw;
3180 rval = QLA_SUCCESS; 3214 struct req_que *req = ha->req_q_map[0];
3181 3215 struct rsp_que *rsp = ha->rsp_q_map[0];
3182 atomic_set(&ha->loop_state, LOOP_UPDATE); 3216
3183 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3217 atomic_set(&vha->loop_state, LOOP_UPDATE);
3184 if (ha->flags.online) { 3218 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3185 if (!(rval = qla2x00_fw_ready(ha))) { 3219 if (vha->flags.online) {
3220 if (!(rval = qla2x00_fw_ready(vha))) {
3186 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3221 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3187 wait_time = 256; 3222 wait_time = 256;
3188 do { 3223 do {
3189 atomic_set(&ha->loop_state, LOOP_UPDATE); 3224 atomic_set(&vha->loop_state, LOOP_UPDATE);
3190 3225
3191 /* Issue a marker after FW becomes ready. */ 3226 /* Issue a marker after FW becomes ready. */
3192 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 3227 qla2x00_marker(vha, req, rsp, 0, 0,
3193 ha->marker_needed = 0; 3228 MK_SYNC_ALL);
3229 vha->marker_needed = 0;
3194 3230
3195 /* Remap devices on Loop. */ 3231 /* Remap devices on Loop. */
3196 clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 3232 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3197 3233
3198 qla2x00_configure_loop(ha); 3234 qla2x00_configure_loop(vha);
3199 wait_time--; 3235 wait_time--;
3200 } while (!atomic_read(&ha->loop_down_timer) && 3236 } while (!atomic_read(&vha->loop_down_timer) &&
3201 !(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) && 3237 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3202 wait_time && 3238 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3203 (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))); 3239 &vha->dpc_flags)));
3204 } 3240 }
3205 } 3241 }
3206 3242
3207 if (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 3243 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3208 return (QLA_FUNCTION_FAILED); 3244 return (QLA_FUNCTION_FAILED);
3209 }
3210 3245
3211 if (rval) { 3246 if (rval)
3212 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); 3247 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
3213 }
3214 3248
3215 return (rval); 3249 return (rval);
3216} 3250}
3217 3251
3218void 3252void
3219qla2x00_update_fcports(scsi_qla_host_t *ha) 3253qla2x00_update_fcports(scsi_qla_host_t *vha)
3220{ 3254{
3221 fc_port_t *fcport; 3255 fc_port_t *fcport;
3222 3256
3223 /* Go with deferred removal of rport references. */ 3257 /* Go with deferred removal of rport references. */
3224 list_for_each_entry(fcport, &ha->fcports, list) 3258 list_for_each_entry(fcport, &vha->vp_fcports, list)
3225 if (fcport->drport && 3259 if (fcport && fcport->drport &&
3226 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3260 atomic_read(&fcport->state) != FCS_UNCONFIGURED)
3227 qla2x00_rport_del(fcport); 3261 qla2x00_rport_del(fcport);
3228} 3262}
@@ -3238,63 +3272,65 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
3238* 0 = success 3272* 0 = success
3239*/ 3273*/
3240int 3274int
3241qla2x00_abort_isp(scsi_qla_host_t *ha) 3275qla2x00_abort_isp(scsi_qla_host_t *vha)
3242{ 3276{
3243 int rval; 3277 int rval;
3244 uint8_t status = 0; 3278 uint8_t status = 0;
3245 scsi_qla_host_t *vha; 3279 struct qla_hw_data *ha = vha->hw;
3280 struct scsi_qla_host *vp;
3281 struct req_que *req = ha->req_q_map[0];
3246 3282
3247 if (ha->flags.online) { 3283 if (vha->flags.online) {
3248 ha->flags.online = 0; 3284 vha->flags.online = 0;
3249 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 3285 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3250 ha->qla_stats.total_isp_aborts++; 3286 ha->qla_stats.total_isp_aborts++;
3251 3287
3252 qla_printk(KERN_INFO, ha, 3288 qla_printk(KERN_INFO, ha,
3253 "Performing ISP error recovery - ha= %p.\n", ha); 3289 "Performing ISP error recovery - ha= %p.\n", ha);
3254 ha->isp_ops->reset_chip(ha); 3290 ha->isp_ops->reset_chip(vha);
3255 3291
3256 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 3292 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3257 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 3293 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3258 atomic_set(&ha->loop_state, LOOP_DOWN); 3294 atomic_set(&vha->loop_state, LOOP_DOWN);
3259 qla2x00_mark_all_devices_lost(ha, 0); 3295 qla2x00_mark_all_devices_lost(vha, 0);
3260 list_for_each_entry(vha, &ha->vp_list, vp_list) 3296 list_for_each_entry(vp, &ha->vp_list, list)
3261 qla2x00_mark_all_devices_lost(vha, 0); 3297 qla2x00_mark_all_devices_lost(vp, 0);
3262 } else { 3298 } else {
3263 if (!atomic_read(&ha->loop_down_timer)) 3299 if (!atomic_read(&vha->loop_down_timer))
3264 atomic_set(&ha->loop_down_timer, 3300 atomic_set(&vha->loop_down_timer,
3265 LOOP_DOWN_TIME); 3301 LOOP_DOWN_TIME);
3266 } 3302 }
3267 3303
3268 /* Requeue all commands in outstanding command list. */ 3304 /* Requeue all commands in outstanding command list. */
3269 qla2x00_abort_all_cmds(ha, DID_RESET << 16); 3305 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3270 3306
3271 ha->isp_ops->get_flash_version(ha, ha->request_ring); 3307 ha->isp_ops->get_flash_version(vha, req->ring);
3272 3308
3273 ha->isp_ops->nvram_config(ha); 3309 ha->isp_ops->nvram_config(vha);
3274 3310
3275 if (!qla2x00_restart_isp(ha)) { 3311 if (!qla2x00_restart_isp(vha)) {
3276 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3312 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3277 3313
3278 if (!atomic_read(&ha->loop_down_timer)) { 3314 if (!atomic_read(&vha->loop_down_timer)) {
3279 /* 3315 /*
3280 * Issue marker command only when we are going 3316 * Issue marker command only when we are going
3281 * to start the I/O . 3317 * to start the I/O .
3282 */ 3318 */
3283 ha->marker_needed = 1; 3319 vha->marker_needed = 1;
3284 } 3320 }
3285 3321
3286 ha->flags.online = 1; 3322 vha->flags.online = 1;
3287 3323
3288 ha->isp_ops->enable_intrs(ha); 3324 ha->isp_ops->enable_intrs(ha);
3289 3325
3290 ha->isp_abort_cnt = 0; 3326 ha->isp_abort_cnt = 0;
3291 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3327 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3292 3328
3293 if (ha->fce) { 3329 if (ha->fce) {
3294 ha->flags.fce_enabled = 1; 3330 ha->flags.fce_enabled = 1;
3295 memset(ha->fce, 0, 3331 memset(ha->fce, 0,
3296 fce_calc_size(ha->fce_bufs)); 3332 fce_calc_size(ha->fce_bufs));
3297 rval = qla2x00_enable_fce_trace(ha, 3333 rval = qla2x00_enable_fce_trace(vha,
3298 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 3334 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
3299 &ha->fce_bufs); 3335 &ha->fce_bufs);
3300 if (rval) { 3336 if (rval) {
@@ -3307,7 +3343,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3307 3343
3308 if (ha->eft) { 3344 if (ha->eft) {
3309 memset(ha->eft, 0, EFT_SIZE); 3345 memset(ha->eft, 0, EFT_SIZE);
3310 rval = qla2x00_enable_eft_trace(ha, 3346 rval = qla2x00_enable_eft_trace(vha,
3311 ha->eft_dma, EFT_NUM_BUFFERS); 3347 ha->eft_dma, EFT_NUM_BUFFERS);
3312 if (rval) { 3348 if (rval) {
3313 qla_printk(KERN_WARNING, ha, 3349 qla_printk(KERN_WARNING, ha,
@@ -3316,8 +3352,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3316 } 3352 }
3317 } 3353 }
3318 } else { /* failed the ISP abort */ 3354 } else { /* failed the ISP abort */
3319 ha->flags.online = 1; 3355 vha->flags.online = 1;
3320 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3356 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3321 if (ha->isp_abort_cnt == 0) { 3357 if (ha->isp_abort_cnt == 0) {
3322 qla_printk(KERN_WARNING, ha, 3358 qla_printk(KERN_WARNING, ha,
3323 "ISP error recovery failed - " 3359 "ISP error recovery failed - "
@@ -3326,37 +3362,41 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3326 * The next call disables the board 3362 * The next call disables the board
3327 * completely. 3363 * completely.
3328 */ 3364 */
3329 ha->isp_ops->reset_adapter(ha); 3365 ha->isp_ops->reset_adapter(vha);
3330 ha->flags.online = 0; 3366 vha->flags.online = 0;
3331 clear_bit(ISP_ABORT_RETRY, 3367 clear_bit(ISP_ABORT_RETRY,
3332 &ha->dpc_flags); 3368 &vha->dpc_flags);
3333 status = 0; 3369 status = 0;
3334 } else { /* schedule another ISP abort */ 3370 } else { /* schedule another ISP abort */
3335 ha->isp_abort_cnt--; 3371 ha->isp_abort_cnt--;
3336 DEBUG(printk("qla%ld: ISP abort - " 3372 DEBUG(printk("qla%ld: ISP abort - "
3337 "retry remaining %d\n", 3373 "retry remaining %d\n",
3338 ha->host_no, ha->isp_abort_cnt)); 3374 vha->host_no, ha->isp_abort_cnt));
3339 status = 1; 3375 status = 1;
3340 } 3376 }
3341 } else { 3377 } else {
3342 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3378 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3343 DEBUG(printk("qla2x00(%ld): ISP error recovery " 3379 DEBUG(printk("qla2x00(%ld): ISP error recovery "
3344 "- retrying (%d) more times\n", 3380 "- retrying (%d) more times\n",
3345 ha->host_no, ha->isp_abort_cnt)); 3381 vha->host_no, ha->isp_abort_cnt));
3346 set_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3382 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3347 status = 1; 3383 status = 1;
3348 } 3384 }
3349 } 3385 }
3350 3386
3351 } 3387 }
3352 3388
3353 if (status) { 3389 if (!status) {
3390 DEBUG(printk(KERN_INFO
3391 "qla2x00_abort_isp(%ld): succeeded.\n",
3392 vha->host_no));
3393 list_for_each_entry(vp, &ha->vp_list, list) {
3394 if (vp->vp_idx)
3395 qla2x00_vp_abort_isp(vp);
3396 }
3397 } else {
3354 qla_printk(KERN_INFO, ha, 3398 qla_printk(KERN_INFO, ha,
3355 "qla2x00_abort_isp: **** FAILED ****\n"); 3399 "qla2x00_abort_isp: **** FAILED ****\n");
3356 } else {
3357 DEBUG(printk(KERN_INFO
3358 "qla2x00_abort_isp(%ld): exiting.\n",
3359 ha->host_no));
3360 } 3400 }
3361 3401
3362 return(status); 3402 return(status);
@@ -3373,42 +3413,50 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3373* 0 = success 3413* 0 = success
3374*/ 3414*/
3375static int 3415static int
3376qla2x00_restart_isp(scsi_qla_host_t *ha) 3416qla2x00_restart_isp(scsi_qla_host_t *vha)
3377{ 3417{
3378 uint8_t status = 0; 3418 uint8_t status = 0;
3379 uint32_t wait_time; 3419 uint32_t wait_time;
3420 struct qla_hw_data *ha = vha->hw;
3421 struct req_que *req = ha->req_q_map[0];
3422 struct rsp_que *rsp = ha->rsp_q_map[0];
3380 3423
3381 /* If firmware needs to be loaded */ 3424 /* If firmware needs to be loaded */
3382 if (qla2x00_isp_firmware(ha)) { 3425 if (qla2x00_isp_firmware(vha)) {
3383 ha->flags.online = 0; 3426 vha->flags.online = 0;
3384 if (!(status = ha->isp_ops->chip_diag(ha))) 3427 status = ha->isp_ops->chip_diag(vha);
3385 status = qla2x00_setup_chip(ha); 3428 if (!status)
3429 status = qla2x00_setup_chip(vha);
3386 } 3430 }
3387 3431
3388 if (!status && !(status = qla2x00_init_rings(ha))) { 3432 if (!status && !(status = qla2x00_init_rings(vha))) {
3389 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3433 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3390 if (!(status = qla2x00_fw_ready(ha))) { 3434 /* Initialize the queues in use */
3435 qla25xx_init_queues(ha);
3436
3437 status = qla2x00_fw_ready(vha);
3438 if (!status) {
3391 DEBUG(printk("%s(): Start configure loop, " 3439 DEBUG(printk("%s(): Start configure loop, "
3392 "status = %d\n", __func__, status)); 3440 "status = %d\n", __func__, status));
3393 3441
3394 /* Issue a marker after FW becomes ready. */ 3442 /* Issue a marker after FW becomes ready. */
3395 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 3443 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
3396 3444
3397 ha->flags.online = 1; 3445 vha->flags.online = 1;
3398 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3446 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3399 wait_time = 256; 3447 wait_time = 256;
3400 do { 3448 do {
3401 clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 3449 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3402 qla2x00_configure_loop(ha); 3450 qla2x00_configure_loop(vha);
3403 wait_time--; 3451 wait_time--;
3404 } while (!atomic_read(&ha->loop_down_timer) && 3452 } while (!atomic_read(&vha->loop_down_timer) &&
3405 !(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) && 3453 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3406 wait_time && 3454 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3407 (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))); 3455 &vha->dpc_flags)));
3408 } 3456 }
3409 3457
3410 /* if no cable then assume it's good */ 3458 /* if no cable then assume it's good */
3411 if ((ha->device_flags & DFLG_NO_CABLE)) 3459 if ((vha->device_flags & DFLG_NO_CABLE))
3412 status = 0; 3460 status = 0;
3413 3461
3414 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 3462 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
@@ -3418,6 +3466,46 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3418 return (status); 3466 return (status);
3419} 3467}
3420 3468
3469static int
3470qla25xx_init_queues(struct qla_hw_data *ha)
3471{
3472 struct rsp_que *rsp = NULL;
3473 struct req_que *req = NULL;
3474 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3475 int ret = -1;
3476 int i;
3477
3478 for (i = 1; i < ha->max_queues; i++) {
3479 rsp = ha->rsp_q_map[i];
3480 if (rsp) {
3481 rsp->options &= ~BIT_0;
3482 ret = qla25xx_init_rsp_que(base_vha, rsp, rsp->options);
3483 if (ret != QLA_SUCCESS)
3484 DEBUG2_17(printk(KERN_WARNING
3485 "%s Rsp que:%d init failed\n", __func__,
3486 rsp->id));
3487 else
3488 DEBUG2_17(printk(KERN_INFO
3489 "%s Rsp que:%d inited\n", __func__,
3490 rsp->id));
3491 }
3492 req = ha->req_q_map[i];
3493 if (req) {
3494 req->options &= ~BIT_0;
3495 ret = qla25xx_init_req_que(base_vha, req, req->options);
3496 if (ret != QLA_SUCCESS)
3497 DEBUG2_17(printk(KERN_WARNING
3498 "%s Req que:%d init failed\n", __func__,
3499 req->id));
3500 else
3501 DEBUG2_17(printk(KERN_WARNING
3502 "%s Rsp que:%d inited\n", __func__,
3503 req->id));
3504 }
3505 }
3506 return ret;
3507}
3508
3421/* 3509/*
3422* qla2x00_reset_adapter 3510* qla2x00_reset_adapter
3423* Reset adapter. 3511* Reset adapter.
@@ -3426,12 +3514,13 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3426* ha = adapter block pointer. 3514* ha = adapter block pointer.
3427*/ 3515*/
3428void 3516void
3429qla2x00_reset_adapter(scsi_qla_host_t *ha) 3517qla2x00_reset_adapter(scsi_qla_host_t *vha)
3430{ 3518{
3431 unsigned long flags = 0; 3519 unsigned long flags = 0;
3520 struct qla_hw_data *ha = vha->hw;
3432 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3521 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3433 3522
3434 ha->flags.online = 0; 3523 vha->flags.online = 0;
3435 ha->isp_ops->disable_intrs(ha); 3524 ha->isp_ops->disable_intrs(ha);
3436 3525
3437 spin_lock_irqsave(&ha->hardware_lock, flags); 3526 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3443,12 +3532,13 @@ qla2x00_reset_adapter(scsi_qla_host_t *ha)
3443} 3532}
3444 3533
3445void 3534void
3446qla24xx_reset_adapter(scsi_qla_host_t *ha) 3535qla24xx_reset_adapter(scsi_qla_host_t *vha)
3447{ 3536{
3448 unsigned long flags = 0; 3537 unsigned long flags = 0;
3538 struct qla_hw_data *ha = vha->hw;
3449 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3539 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3450 3540
3451 ha->flags.online = 0; 3541 vha->flags.online = 0;
3452 ha->isp_ops->disable_intrs(ha); 3542 ha->isp_ops->disable_intrs(ha);
3453 3543
3454 spin_lock_irqsave(&ha->hardware_lock, flags); 3544 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3462,9 +3552,11 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
3462/* On sparc systems, obtain port and node WWN from firmware 3552/* On sparc systems, obtain port and node WWN from firmware
3463 * properties. 3553 * properties.
3464 */ 3554 */
3465static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *nv) 3555static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
3556 struct nvram_24xx *nv)
3466{ 3557{
3467#ifdef CONFIG_SPARC 3558#ifdef CONFIG_SPARC
3559 struct qla_hw_data *ha = vha->hw;
3468 struct pci_dev *pdev = ha->pdev; 3560 struct pci_dev *pdev = ha->pdev;
3469 struct device_node *dp = pci_device_to_OF_node(pdev); 3561 struct device_node *dp = pci_device_to_OF_node(pdev);
3470 const u8 *val; 3562 const u8 *val;
@@ -3481,7 +3573,7 @@ static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *n
3481} 3573}
3482 3574
3483int 3575int
3484qla24xx_nvram_config(scsi_qla_host_t *ha) 3576qla24xx_nvram_config(scsi_qla_host_t *vha)
3485{ 3577{
3486 int rval; 3578 int rval;
3487 struct init_cb_24xx *icb; 3579 struct init_cb_24xx *icb;
@@ -3490,6 +3582,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3490 uint8_t *dptr1, *dptr2; 3582 uint8_t *dptr1, *dptr2;
3491 uint32_t chksum; 3583 uint32_t chksum;
3492 uint16_t cnt; 3584 uint16_t cnt;
3585 struct qla_hw_data *ha = vha->hw;
3493 3586
3494 rval = QLA_SUCCESS; 3587 rval = QLA_SUCCESS;
3495 icb = (struct init_cb_24xx *)ha->init_cb; 3588 icb = (struct init_cb_24xx *)ha->init_cb;
@@ -3507,12 +3600,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3507 3600
3508 /* Get VPD data into cache */ 3601 /* Get VPD data into cache */
3509 ha->vpd = ha->nvram + VPD_OFFSET; 3602 ha->vpd = ha->nvram + VPD_OFFSET;
3510 ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, 3603 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
3511 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 3604 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
3512 3605
3513 /* Get NVRAM data into cache and calculate checksum. */ 3606 /* Get NVRAM data into cache and calculate checksum. */
3514 dptr = (uint32_t *)nv; 3607 dptr = (uint32_t *)nv;
3515 ha->isp_ops->read_nvram(ha, (uint8_t *)dptr, ha->nvram_base, 3608 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
3516 ha->nvram_size); 3609 ha->nvram_size);
3517 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3610 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3518 chksum += le32_to_cpu(*dptr++); 3611 chksum += le32_to_cpu(*dptr++);
@@ -3557,7 +3650,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3557 nv->node_name[5] = 0x1c; 3650 nv->node_name[5] = 0x1c;
3558 nv->node_name[6] = 0x55; 3651 nv->node_name[6] = 0x55;
3559 nv->node_name[7] = 0x86; 3652 nv->node_name[7] = 0x86;
3560 qla24xx_nvram_wwn_from_ofw(ha, nv); 3653 qla24xx_nvram_wwn_from_ofw(vha, nv);
3561 nv->login_retry_count = __constant_cpu_to_le16(8); 3654 nv->login_retry_count = __constant_cpu_to_le16(8);
3562 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 3655 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
3563 nv->login_timeout = __constant_cpu_to_le16(0); 3656 nv->login_timeout = __constant_cpu_to_le16(0);
@@ -3577,7 +3670,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3577 } 3670 }
3578 3671
3579 /* Reset Initialization control block */ 3672 /* Reset Initialization control block */
3580 memset(icb, 0, sizeof(struct init_cb_24xx)); 3673 memset(icb, 0, ha->init_cb_size);
3581 3674
3582 /* Copy 1st segment. */ 3675 /* Copy 1st segment. */
3583 dptr1 = (uint8_t *)icb; 3676 dptr1 = (uint8_t *)icb;
@@ -3600,7 +3693,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3600 /* 3693 /*
3601 * Setup driver NVRAM options. 3694 * Setup driver NVRAM options.
3602 */ 3695 */
3603 qla2x00_set_model_info(ha, nv->model_name, sizeof(nv->model_name), 3696 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
3604 "QLA2462"); 3697 "QLA2462");
3605 3698
3606 /* Use alternate WWN? */ 3699 /* Use alternate WWN? */
@@ -3639,8 +3732,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3639 ha->serial0 = icb->port_name[5]; 3732 ha->serial0 = icb->port_name[5];
3640 ha->serial1 = icb->port_name[6]; 3733 ha->serial1 = icb->port_name[6];
3641 ha->serial2 = icb->port_name[7]; 3734 ha->serial2 = icb->port_name[7];
3642 ha->node_name = icb->node_name; 3735 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
3643 ha->port_name = icb->port_name; 3736 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
3644 3737
3645 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 3738 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
3646 3739
@@ -3695,7 +3788,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3695 ha->login_retry_count = ql2xloginretrycount; 3788 ha->login_retry_count = ql2xloginretrycount;
3696 3789
3697 /* Enable ZIO. */ 3790 /* Enable ZIO. */
3698 if (!ha->flags.init_done) { 3791 if (!vha->flags.init_done) {
3699 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 3792 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
3700 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3793 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3701 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 3794 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
@@ -3703,12 +3796,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3703 } 3796 }
3704 icb->firmware_options_2 &= __constant_cpu_to_le32( 3797 icb->firmware_options_2 &= __constant_cpu_to_le32(
3705 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 3798 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
3706 ha->flags.process_response_queue = 0; 3799 vha->flags.process_response_queue = 0;
3707 if (ha->zio_mode != QLA_ZIO_DISABLED) { 3800 if (ha->zio_mode != QLA_ZIO_DISABLED) {
3708 ha->zio_mode = QLA_ZIO_MODE_6; 3801 ha->zio_mode = QLA_ZIO_MODE_6;
3709 3802
3710 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 3803 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
3711 "(%d us).\n", ha->host_no, ha->zio_mode, 3804 "(%d us).\n", vha->host_no, ha->zio_mode,
3712 ha->zio_timer * 100)); 3805 ha->zio_timer * 100));
3713 qla_printk(KERN_INFO, ha, 3806 qla_printk(KERN_INFO, ha,
3714 "ZIO mode %d enabled; timer delay (%d us).\n", 3807 "ZIO mode %d enabled; timer delay (%d us).\n",
@@ -3717,36 +3810,37 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3717 icb->firmware_options_2 |= cpu_to_le32( 3810 icb->firmware_options_2 |= cpu_to_le32(
3718 (uint32_t)ha->zio_mode); 3811 (uint32_t)ha->zio_mode);
3719 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 3812 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
3720 ha->flags.process_response_queue = 1; 3813 vha->flags.process_response_queue = 1;
3721 } 3814 }
3722 3815
3723 if (rval) { 3816 if (rval) {
3724 DEBUG2_3(printk(KERN_WARNING 3817 DEBUG2_3(printk(KERN_WARNING
3725 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no)); 3818 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
3726 } 3819 }
3727 return (rval); 3820 return (rval);
3728} 3821}
3729 3822
3730static int 3823static int
3731qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr) 3824qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3732{ 3825{
3733 int rval; 3826 int rval = QLA_SUCCESS;
3734 int segments, fragment; 3827 int segments, fragment;
3735 uint32_t faddr; 3828 uint32_t faddr;
3736 uint32_t *dcode, dlen; 3829 uint32_t *dcode, dlen;
3737 uint32_t risc_addr; 3830 uint32_t risc_addr;
3738 uint32_t risc_size; 3831 uint32_t risc_size;
3739 uint32_t i; 3832 uint32_t i;
3740 3833 struct qla_hw_data *ha = vha->hw;
3834 struct req_que *req = ha->req_q_map[0];
3741 rval = QLA_SUCCESS; 3835 rval = QLA_SUCCESS;
3742 3836
3743 segments = FA_RISC_CODE_SEGMENTS; 3837 segments = FA_RISC_CODE_SEGMENTS;
3744 faddr = ha->flt_region_fw; 3838 faddr = ha->flt_region_fw;
3745 dcode = (uint32_t *)ha->request_ring; 3839 dcode = (uint32_t *)req->ring;
3746 *srisc_addr = 0; 3840 *srisc_addr = 0;
3747 3841
3748 /* Validate firmware image by checking version. */ 3842 /* Validate firmware image by checking version. */
3749 qla24xx_read_flash_data(ha, dcode, faddr + 4, 4); 3843 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
3750 for (i = 0; i < 4; i++) 3844 for (i = 0; i < 4; i++)
3751 dcode[i] = be32_to_cpu(dcode[i]); 3845 dcode[i] = be32_to_cpu(dcode[i]);
3752 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 3846 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
@@ -3764,7 +3858,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3764 3858
3765 while (segments && rval == QLA_SUCCESS) { 3859 while (segments && rval == QLA_SUCCESS) {
3766 /* Read segment's load information. */ 3860 /* Read segment's load information. */
3767 qla24xx_read_flash_data(ha, dcode, faddr, 4); 3861 qla24xx_read_flash_data(vha, dcode, faddr, 4);
3768 3862
3769 risc_addr = be32_to_cpu(dcode[2]); 3863 risc_addr = be32_to_cpu(dcode[2]);
3770 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 3864 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
@@ -3778,17 +3872,17 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3778 3872
3779 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3873 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3780 "addr %x, number of dwords 0x%x, offset 0x%x.\n", 3874 "addr %x, number of dwords 0x%x, offset 0x%x.\n",
3781 ha->host_no, risc_addr, dlen, faddr)); 3875 vha->host_no, risc_addr, dlen, faddr));
3782 3876
3783 qla24xx_read_flash_data(ha, dcode, faddr, dlen); 3877 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
3784 for (i = 0; i < dlen; i++) 3878 for (i = 0; i < dlen; i++)
3785 dcode[i] = swab32(dcode[i]); 3879 dcode[i] = swab32(dcode[i]);
3786 3880
3787 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 3881 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
3788 dlen); 3882 dlen);
3789 if (rval) { 3883 if (rval) {
3790 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 3884 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
3791 "segment %d of firmware\n", ha->host_no, 3885 "segment %d of firmware\n", vha->host_no,
3792 fragment)); 3886 fragment));
3793 qla_printk(KERN_WARNING, ha, 3887 qla_printk(KERN_WARNING, ha,
3794 "[ERROR] Failed to load segment %d of " 3888 "[ERROR] Failed to load segment %d of "
@@ -3812,16 +3906,18 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3812#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/" 3906#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
3813 3907
3814int 3908int
3815qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr) 3909qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3816{ 3910{
3817 int rval; 3911 int rval;
3818 int i, fragment; 3912 int i, fragment;
3819 uint16_t *wcode, *fwcode; 3913 uint16_t *wcode, *fwcode;
3820 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 3914 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
3821 struct fw_blob *blob; 3915 struct fw_blob *blob;
3916 struct qla_hw_data *ha = vha->hw;
3917 struct req_que *req = ha->req_q_map[0];
3822 3918
3823 /* Load firmware blob. */ 3919 /* Load firmware blob. */
3824 blob = qla2x00_request_firmware(ha); 3920 blob = qla2x00_request_firmware(vha);
3825 if (!blob) { 3921 if (!blob) {
3826 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 3922 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
3827 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 3923 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3831,7 +3927,7 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3831 3927
3832 rval = QLA_SUCCESS; 3928 rval = QLA_SUCCESS;
3833 3929
3834 wcode = (uint16_t *)ha->request_ring; 3930 wcode = (uint16_t *)req->ring;
3835 *srisc_addr = 0; 3931 *srisc_addr = 0;
3836 fwcode = (uint16_t *)blob->fw->data; 3932 fwcode = (uint16_t *)blob->fw->data;
3837 fwclen = 0; 3933 fwclen = 0;
@@ -3878,17 +3974,17 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3878 wlen = risc_size; 3974 wlen = risc_size;
3879 3975
3880 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3976 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3881 "addr %x, number of words 0x%x.\n", ha->host_no, 3977 "addr %x, number of words 0x%x.\n", vha->host_no,
3882 risc_addr, wlen)); 3978 risc_addr, wlen));
3883 3979
3884 for (i = 0; i < wlen; i++) 3980 for (i = 0; i < wlen; i++)
3885 wcode[i] = swab16(fwcode[i]); 3981 wcode[i] = swab16(fwcode[i]);
3886 3982
3887 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 3983 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
3888 wlen); 3984 wlen);
3889 if (rval) { 3985 if (rval) {
3890 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 3986 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
3891 "segment %d of firmware\n", ha->host_no, 3987 "segment %d of firmware\n", vha->host_no,
3892 fragment)); 3988 fragment));
3893 qla_printk(KERN_WARNING, ha, 3989 qla_printk(KERN_WARNING, ha,
3894 "[ERROR] Failed to load segment %d of " 3990 "[ERROR] Failed to load segment %d of "
@@ -3912,7 +4008,7 @@ fail_fw_integrity:
3912} 4008}
3913 4009
3914int 4010int
3915qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr) 4011qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3916{ 4012{
3917 int rval; 4013 int rval;
3918 int segments, fragment; 4014 int segments, fragment;
@@ -3922,9 +4018,11 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3922 uint32_t i; 4018 uint32_t i;
3923 struct fw_blob *blob; 4019 struct fw_blob *blob;
3924 uint32_t *fwcode, fwclen; 4020 uint32_t *fwcode, fwclen;
4021 struct qla_hw_data *ha = vha->hw;
4022 struct req_que *req = ha->req_q_map[0];
3925 4023
3926 /* Load firmware blob. */ 4024 /* Load firmware blob. */
3927 blob = qla2x00_request_firmware(ha); 4025 blob = qla2x00_request_firmware(vha);
3928 if (!blob) { 4026 if (!blob) {
3929 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4027 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
3930 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4028 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3933,13 +4031,13 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3933 /* Try to load RISC code from flash. */ 4031 /* Try to load RISC code from flash. */
3934 qla_printk(KERN_ERR, ha, "Attempting to load (potentially " 4032 qla_printk(KERN_ERR, ha, "Attempting to load (potentially "
3935 "outdated) firmware from flash.\n"); 4033 "outdated) firmware from flash.\n");
3936 return qla24xx_load_risc_flash(ha, srisc_addr); 4034 return qla24xx_load_risc_flash(vha, srisc_addr);
3937 } 4035 }
3938 4036
3939 rval = QLA_SUCCESS; 4037 rval = QLA_SUCCESS;
3940 4038
3941 segments = FA_RISC_CODE_SEGMENTS; 4039 segments = FA_RISC_CODE_SEGMENTS;
3942 dcode = (uint32_t *)ha->request_ring; 4040 dcode = (uint32_t *)req->ring;
3943 *srisc_addr = 0; 4041 *srisc_addr = 0;
3944 fwcode = (uint32_t *)blob->fw->data; 4042 fwcode = (uint32_t *)blob->fw->data;
3945 fwclen = 0; 4043 fwclen = 0;
@@ -3987,17 +4085,17 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3987 dlen = risc_size; 4085 dlen = risc_size;
3988 4086
3989 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4087 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3990 "addr %x, number of dwords 0x%x.\n", ha->host_no, 4088 "addr %x, number of dwords 0x%x.\n", vha->host_no,
3991 risc_addr, dlen)); 4089 risc_addr, dlen));
3992 4090
3993 for (i = 0; i < dlen; i++) 4091 for (i = 0; i < dlen; i++)
3994 dcode[i] = swab32(fwcode[i]); 4092 dcode[i] = swab32(fwcode[i]);
3995 4093
3996 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 4094 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
3997 dlen); 4095 dlen);
3998 if (rval) { 4096 if (rval) {
3999 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4097 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4000 "segment %d of firmware\n", ha->host_no, 4098 "segment %d of firmware\n", vha->host_no,
4001 fragment)); 4099 fragment));
4002 qla_printk(KERN_WARNING, ha, 4100 qla_printk(KERN_WARNING, ha,
4003 "[ERROR] Failed to load segment %d of " 4101 "[ERROR] Failed to load segment %d of "
@@ -4021,49 +4119,53 @@ fail_fw_integrity:
4021} 4119}
4022 4120
4023void 4121void
4024qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha) 4122qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4025{ 4123{
4026 int ret, retries; 4124 int ret, retries;
4125 struct qla_hw_data *ha = vha->hw;
4027 4126
4028 if (!IS_FWI2_CAPABLE(ha)) 4127 if (!IS_FWI2_CAPABLE(ha))
4029 return; 4128 return;
4030 if (!ha->fw_major_version) 4129 if (!ha->fw_major_version)
4031 return; 4130 return;
4032 4131
4033 ret = qla2x00_stop_firmware(ha); 4132 ret = qla2x00_stop_firmware(vha);
4034 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4133 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4035 retries ; retries--) { 4134 retries ; retries--) {
4036 ha->isp_ops->reset_chip(ha); 4135 ha->isp_ops->reset_chip(vha);
4037 if (ha->isp_ops->chip_diag(ha) != QLA_SUCCESS) 4136 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4038 continue; 4137 continue;
4039 if (qla2x00_setup_chip(ha) != QLA_SUCCESS) 4138 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4040 continue; 4139 continue;
4041 qla_printk(KERN_INFO, ha, 4140 qla_printk(KERN_INFO, ha,
4042 "Attempting retry of stop-firmware command...\n"); 4141 "Attempting retry of stop-firmware command...\n");
4043 ret = qla2x00_stop_firmware(ha); 4142 ret = qla2x00_stop_firmware(vha);
4044 } 4143 }
4045} 4144}
4046 4145
4047int 4146int
4048qla24xx_configure_vhba(scsi_qla_host_t *ha) 4147qla24xx_configure_vhba(scsi_qla_host_t *vha)
4049{ 4148{
4050 int rval = QLA_SUCCESS; 4149 int rval = QLA_SUCCESS;
4051 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4150 uint16_t mb[MAILBOX_REGISTER_COUNT];
4151 struct qla_hw_data *ha = vha->hw;
4152 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4153 struct req_que *req = ha->req_q_map[0];
4154 struct rsp_que *rsp = ha->rsp_q_map[0];
4052 4155
4053 if (!ha->parent) 4156 if (!vha->vp_idx)
4054 return -EINVAL; 4157 return -EINVAL;
4055 4158
4056 rval = qla2x00_fw_ready(ha->parent); 4159 rval = qla2x00_fw_ready(base_vha);
4057 if (rval == QLA_SUCCESS) { 4160 if (rval == QLA_SUCCESS) {
4058 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 4161 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4059 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 4162 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4060 } 4163 }
4061 4164
4062 ha->flags.management_server_logged_in = 0; 4165 vha->flags.management_server_logged_in = 0;
4063 4166
4064 /* Login to SNS first */ 4167 /* Login to SNS first */
4065 qla24xx_login_fabric(ha->parent, NPH_SNS, 0xff, 0xff, 0xfc, 4168 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
4066 mb, BIT_1);
4067 if (mb[0] != MBS_COMMAND_COMPLETE) { 4169 if (mb[0] != MBS_COMMAND_COMPLETE) {
4068 DEBUG15(qla_printk(KERN_INFO, ha, 4170 DEBUG15(qla_printk(KERN_INFO, ha,
4069 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 4171 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
@@ -4072,11 +4174,11 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha)
4072 return (QLA_FUNCTION_FAILED); 4174 return (QLA_FUNCTION_FAILED);
4073 } 4175 }
4074 4176
4075 atomic_set(&ha->loop_down_timer, 0); 4177 atomic_set(&vha->loop_down_timer, 0);
4076 atomic_set(&ha->loop_state, LOOP_UP); 4178 atomic_set(&vha->loop_state, LOOP_UP);
4077 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 4179 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4078 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 4180 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4079 rval = qla2x00_loop_resync(ha->parent); 4181 rval = qla2x00_loop_resync(base_vha);
4080 4182
4081 return rval; 4183 return rval;
4082} 4184}
@@ -4087,9 +4189,10 @@ static LIST_HEAD(qla_cs84xx_list);
4087static DEFINE_MUTEX(qla_cs84xx_mutex); 4189static DEFINE_MUTEX(qla_cs84xx_mutex);
4088 4190
4089static struct qla_chip_state_84xx * 4191static struct qla_chip_state_84xx *
4090qla84xx_get_chip(struct scsi_qla_host *ha) 4192qla84xx_get_chip(struct scsi_qla_host *vha)
4091{ 4193{
4092 struct qla_chip_state_84xx *cs84xx; 4194 struct qla_chip_state_84xx *cs84xx;
4195 struct qla_hw_data *ha = vha->hw;
4093 4196
4094 mutex_lock(&qla_cs84xx_mutex); 4197 mutex_lock(&qla_cs84xx_mutex);
4095 4198
@@ -4129,21 +4232,23 @@ __qla84xx_chip_release(struct kref *kref)
4129} 4232}
4130 4233
4131void 4234void
4132qla84xx_put_chip(struct scsi_qla_host *ha) 4235qla84xx_put_chip(struct scsi_qla_host *vha)
4133{ 4236{
4237 struct qla_hw_data *ha = vha->hw;
4134 if (ha->cs84xx) 4238 if (ha->cs84xx)
4135 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 4239 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
4136} 4240}
4137 4241
4138static int 4242static int
4139qla84xx_init_chip(scsi_qla_host_t *ha) 4243qla84xx_init_chip(scsi_qla_host_t *vha)
4140{ 4244{
4141 int rval; 4245 int rval;
4142 uint16_t status[2]; 4246 uint16_t status[2];
4247 struct qla_hw_data *ha = vha->hw;
4143 4248
4144 mutex_lock(&ha->cs84xx->fw_update_mutex); 4249 mutex_lock(&ha->cs84xx->fw_update_mutex);
4145 4250
4146 rval = qla84xx_verify_chip(ha, status); 4251 rval = qla84xx_verify_chip(vha, status);
4147 4252
4148 mutex_unlock(&ha->cs84xx->fw_update_mutex); 4253 mutex_unlock(&ha->cs84xx->fw_update_mutex);
4149 4254
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index e90afad120ee..5e0a7095c9f2 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -32,47 +32,15 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr)
32} 32}
33 33
34static inline void 34static inline void
35qla2x00_poll(scsi_qla_host_t *ha) 35qla2x00_poll(struct rsp_que *rsp)
36{ 36{
37 unsigned long flags; 37 unsigned long flags;
38 38 struct qla_hw_data *ha = rsp->hw;
39 local_irq_save(flags); 39 local_irq_save(flags);
40 ha->isp_ops->intr_handler(0, ha); 40 ha->isp_ops->intr_handler(0, rsp);
41 local_irq_restore(flags); 41 local_irq_restore(flags);
42} 42}
43 43
44static __inline__ scsi_qla_host_t *
45to_qla_parent(scsi_qla_host_t *ha)
46{
47 return ha->parent ? ha->parent : ha;
48}
49
50/**
51 * qla2x00_issue_marker() - Issue a Marker IOCB if necessary.
52 * @ha: HA context
53 * @ha_locked: is function called with the hardware lock
54 *
55 * Returns non-zero if a failure occurred, else zero.
56 */
57static inline int
58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)
59{
60 /* Send marker if required */
61 if (ha->marker_needed != 0) {
62 if (ha_locked) {
63 if (__qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) !=
64 QLA_SUCCESS)
65 return (QLA_FUNCTION_FAILED);
66 } else {
67 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) !=
68 QLA_SUCCESS)
69 return (QLA_FUNCTION_FAILED);
70 }
71 ha->marker_needed = 0;
72 }
73 return (QLA_SUCCESS);
74}
75
76static inline uint8_t * 44static inline uint8_t *
77host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) 45host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
78{ 46{
@@ -87,11 +55,12 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
87} 55}
88 56
89static inline int 57static inline int
90qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id) 58qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
91{ 59{
60 struct qla_hw_data *ha = vha->hw;
92 if (IS_FWI2_CAPABLE(ha)) 61 if (IS_FWI2_CAPABLE(ha))
93 return (loop_id > NPH_LAST_HANDLE); 62 return (loop_id > NPH_LAST_HANDLE);
94 63
95 return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 64 return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
96 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); 65 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
97}; 66}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 85bc0a48598b..5bedc9d05942 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,9 @@
11 11
12#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
13 13
14static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha); 14static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15static void qla2x00_isp_cmd(scsi_qla_host_t *ha); 15 struct rsp_que *rsp);
16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
16 17
17/** 18/**
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 19 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -30,11 +31,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
30 /* Set transfer direction */ 31 /* Set transfer direction */
31 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) { 32 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE; 33 cflags = CF_WRITE;
33 sp->fcport->ha->qla_stats.output_bytes += 34 sp->fcport->vha->hw->qla_stats.output_bytes +=
34 scsi_bufflen(sp->cmd); 35 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) { 36 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 cflags = CF_READ; 37 cflags = CF_READ;
37 sp->fcport->ha->qla_stats.input_bytes += 38 sp->fcport->vha->hw->qla_stats.input_bytes +=
38 scsi_bufflen(sp->cmd); 39 scsi_bufflen(sp->cmd);
39 } 40 }
40 return (cflags); 41 return (cflags);
@@ -91,20 +92,19 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
91 * Returns a pointer to the Continuation Type 0 IOCB packet. 92 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */ 93 */
93static inline cont_entry_t * 94static inline cont_entry_t *
94qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha) 95qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
95{ 96{
96 cont_entry_t *cont_pkt; 97 cont_entry_t *cont_pkt;
97
98 /* Adjust ring index. */ 98 /* Adjust ring index. */
99 ha->req_ring_index++; 99 req->ring_index++;
100 if (ha->req_ring_index == ha->request_q_length) { 100 if (req->ring_index == req->length) {
101 ha->req_ring_index = 0; 101 req->ring_index = 0;
102 ha->request_ring_ptr = ha->request_ring; 102 req->ring_ptr = req->ring;
103 } else { 103 } else {
104 ha->request_ring_ptr++; 104 req->ring_ptr++;
105 } 105 }
106 106
107 cont_pkt = (cont_entry_t *)ha->request_ring_ptr; 107 cont_pkt = (cont_entry_t *)req->ring_ptr;
108 108
109 /* Load packet defaults. */ 109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) = 110 *((uint32_t *)(&cont_pkt->entry_type)) =
@@ -120,20 +120,20 @@ qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
120 * Returns a pointer to the continuation type 1 IOCB packet. 120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */ 121 */
122static inline cont_a64_entry_t * 122static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha) 123qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
124{ 124{
125 cont_a64_entry_t *cont_pkt; 125 cont_a64_entry_t *cont_pkt;
126 126
127 /* Adjust ring index. */ 127 /* Adjust ring index. */
128 ha->req_ring_index++; 128 req->ring_index++;
129 if (ha->req_ring_index == ha->request_q_length) { 129 if (req->ring_index == req->length) {
130 ha->req_ring_index = 0; 130 req->ring_index = 0;
131 ha->request_ring_ptr = ha->request_ring; 131 req->ring_ptr = req->ring;
132 } else { 132 } else {
133 ha->request_ring_ptr++; 133 req->ring_ptr++;
134 } 134 }
135 135
136 cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr; 136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 137
138 /* Load packet defaults. */ 138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt->entry_type)) = 139 *((uint32_t *)(&cont_pkt->entry_type)) =
@@ -155,10 +155,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
155{ 155{
156 uint16_t avail_dsds; 156 uint16_t avail_dsds;
157 uint32_t *cur_dsd; 157 uint32_t *cur_dsd;
158 scsi_qla_host_t *ha; 158 scsi_qla_host_t *vha;
159 struct scsi_cmnd *cmd; 159 struct scsi_cmnd *cmd;
160 struct scatterlist *sg; 160 struct scatterlist *sg;
161 int i; 161 int i;
162 struct req_que *req;
162 163
163 cmd = sp->cmd; 164 cmd = sp->cmd;
164 165
@@ -172,7 +173,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
172 return; 173 return;
173 } 174 }
174 175
175 ha = sp->ha; 176 vha = sp->vha;
177 req = sp->que;
176 178
177 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
178 180
@@ -190,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
190 * Seven DSDs are available in the Continuation 192 * Seven DSDs are available in the Continuation
191 * Type 0 IOCB. 193 * Type 0 IOCB.
192 */ 194 */
193 cont_pkt = qla2x00_prep_cont_type0_iocb(ha); 195 cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
194 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
195 avail_dsds = 7; 197 avail_dsds = 7;
196 } 198 }
@@ -214,10 +216,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
214{ 216{
215 uint16_t avail_dsds; 217 uint16_t avail_dsds;
216 uint32_t *cur_dsd; 218 uint32_t *cur_dsd;
217 scsi_qla_host_t *ha; 219 scsi_qla_host_t *vha;
218 struct scsi_cmnd *cmd; 220 struct scsi_cmnd *cmd;
219 struct scatterlist *sg; 221 struct scatterlist *sg;
220 int i; 222 int i;
223 struct req_que *req;
221 224
222 cmd = sp->cmd; 225 cmd = sp->cmd;
223 226
@@ -231,7 +234,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
231 return; 234 return;
232 } 235 }
233 236
234 ha = sp->ha; 237 vha = sp->vha;
238 req = sp->que;
235 239
236 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
237 241
@@ -250,7 +254,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 * Five DSDs are available in the Continuation 254 * Five DSDs are available in the Continuation
251 * Type 1 IOCB. 255 * Type 1 IOCB.
252 */ 256 */
253 cont_pkt = qla2x00_prep_cont_type1_iocb(ha); 257 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
254 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 258 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
255 avail_dsds = 5; 259 avail_dsds = 5;
256 } 260 }
@@ -274,7 +278,7 @@ qla2x00_start_scsi(srb_t *sp)
274{ 278{
275 int ret, nseg; 279 int ret, nseg;
276 unsigned long flags; 280 unsigned long flags;
277 scsi_qla_host_t *ha; 281 scsi_qla_host_t *vha;
278 struct scsi_cmnd *cmd; 282 struct scsi_cmnd *cmd;
279 uint32_t *clr_ptr; 283 uint32_t *clr_ptr;
280 uint32_t index; 284 uint32_t index;
@@ -284,33 +288,39 @@ qla2x00_start_scsi(srb_t *sp)
284 uint16_t req_cnt; 288 uint16_t req_cnt;
285 uint16_t tot_dsds; 289 uint16_t tot_dsds;
286 struct device_reg_2xxx __iomem *reg; 290 struct device_reg_2xxx __iomem *reg;
291 struct qla_hw_data *ha;
292 struct req_que *req;
293 struct rsp_que *rsp;
287 294
288 /* Setup device pointers. */ 295 /* Setup device pointers. */
289 ret = 0; 296 ret = 0;
290 ha = sp->ha; 297 vha = sp->vha;
298 ha = vha->hw;
291 reg = &ha->iobase->isp; 299 reg = &ha->iobase->isp;
292 cmd = sp->cmd; 300 cmd = sp->cmd;
301 req = ha->req_q_map[0];
302 rsp = ha->rsp_q_map[0];
293 /* So we know we haven't pci_map'ed anything yet */ 303 /* So we know we haven't pci_map'ed anything yet */
294 tot_dsds = 0; 304 tot_dsds = 0;
295 305
296 /* Send marker if required */ 306 /* Send marker if required */
297 if (ha->marker_needed != 0) { 307 if (vha->marker_needed != 0) {
298 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 308 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
309 != QLA_SUCCESS)
299 return (QLA_FUNCTION_FAILED); 310 return (QLA_FUNCTION_FAILED);
300 } 311 vha->marker_needed = 0;
301 ha->marker_needed = 0;
302 } 312 }
303 313
304 /* Acquire ring specific lock */ 314 /* Acquire ring specific lock */
305 spin_lock_irqsave(&ha->hardware_lock, flags); 315 spin_lock_irqsave(&ha->hardware_lock, flags);
306 316
307 /* Check for room in outstanding command list. */ 317 /* Check for room in outstanding command list. */
308 handle = ha->current_outstanding_cmd; 318 handle = req->current_outstanding_cmd;
309 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 319 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
310 handle++; 320 handle++;
311 if (handle == MAX_OUTSTANDING_COMMANDS) 321 if (handle == MAX_OUTSTANDING_COMMANDS)
312 handle = 1; 322 handle = 1;
313 if (!ha->outstanding_cmds[handle]) 323 if (!req->outstanding_cmds[handle])
314 break; 324 break;
315 } 325 }
316 if (index == MAX_OUTSTANDING_COMMANDS) 326 if (index == MAX_OUTSTANDING_COMMANDS)
@@ -329,25 +339,26 @@ qla2x00_start_scsi(srb_t *sp)
329 339
330 /* Calculate the number of request entries needed. */ 340 /* Calculate the number of request entries needed. */
331 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 341 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
332 if (ha->req_q_cnt < (req_cnt + 2)) { 342 if (req->cnt < (req_cnt + 2)) {
333 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 343 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
334 if (ha->req_ring_index < cnt) 344 if (req->ring_index < cnt)
335 ha->req_q_cnt = cnt - ha->req_ring_index; 345 req->cnt = cnt - req->ring_index;
336 else 346 else
337 ha->req_q_cnt = ha->request_q_length - 347 req->cnt = req->length -
338 (ha->req_ring_index - cnt); 348 (req->ring_index - cnt);
339 } 349 }
340 if (ha->req_q_cnt < (req_cnt + 2)) 350 if (req->cnt < (req_cnt + 2))
341 goto queuing_error; 351 goto queuing_error;
342 352
343 /* Build command packet */ 353 /* Build command packet */
344 ha->current_outstanding_cmd = handle; 354 req->current_outstanding_cmd = handle;
345 ha->outstanding_cmds[handle] = sp; 355 req->outstanding_cmds[handle] = sp;
346 sp->ha = ha; 356 sp->vha = vha;
357 sp->que = req;
347 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 358 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
348 ha->req_q_cnt -= req_cnt; 359 req->cnt -= req_cnt;
349 360
350 cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr; 361 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
351 cmd_pkt->handle = handle; 362 cmd_pkt->handle = handle;
352 /* Zero out remaining portion of packet. */ 363 /* Zero out remaining portion of packet. */
353 clr_ptr = (uint32_t *)cmd_pkt + 2; 364 clr_ptr = (uint32_t *)cmd_pkt + 2;
@@ -373,23 +384,23 @@ qla2x00_start_scsi(srb_t *sp)
373 wmb(); 384 wmb();
374 385
375 /* Adjust ring index. */ 386 /* Adjust ring index. */
376 ha->req_ring_index++; 387 req->ring_index++;
377 if (ha->req_ring_index == ha->request_q_length) { 388 if (req->ring_index == req->length) {
378 ha->req_ring_index = 0; 389 req->ring_index = 0;
379 ha->request_ring_ptr = ha->request_ring; 390 req->ring_ptr = req->ring;
380 } else 391 } else
381 ha->request_ring_ptr++; 392 req->ring_ptr++;
382 393
383 sp->flags |= SRB_DMA_VALID; 394 sp->flags |= SRB_DMA_VALID;
384 395
385 /* Set chip new ring index. */ 396 /* Set chip new ring index. */
386 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index); 397 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
387 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 398 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
388 399
389 /* Manage unprocessed RIO/ZIO commands in response queue. */ 400 /* Manage unprocessed RIO/ZIO commands in response queue. */
390 if (ha->flags.process_response_queue && 401 if (vha->flags.process_response_queue &&
391 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 402 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
392 qla2x00_process_response_queue(ha); 403 qla2x00_process_response_queue(rsp);
393 404
394 spin_unlock_irqrestore(&ha->hardware_lock, flags); 405 spin_unlock_irqrestore(&ha->hardware_lock, flags);
395 return (QLA_SUCCESS); 406 return (QLA_SUCCESS);
@@ -415,18 +426,20 @@ queuing_error:
415 * Returns non-zero if a failure occurred, else zero. 426 * Returns non-zero if a failure occurred, else zero.
416 */ 427 */
417int 428int
418__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 429__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
419 uint8_t type) 430 struct rsp_que *rsp, uint16_t loop_id,
431 uint16_t lun, uint8_t type)
420{ 432{
421 mrk_entry_t *mrk; 433 mrk_entry_t *mrk;
422 struct mrk_entry_24xx *mrk24; 434 struct mrk_entry_24xx *mrk24;
423 scsi_qla_host_t *pha = to_qla_parent(ha); 435 struct qla_hw_data *ha = vha->hw;
436 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
424 437
425 mrk24 = NULL; 438 mrk24 = NULL;
426 mrk = (mrk_entry_t *)qla2x00_req_pkt(pha); 439 mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
427 if (mrk == NULL) { 440 if (mrk == NULL) {
428 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", 441 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
429 __func__, ha->host_no)); 442 __func__, base_vha->host_no));
430 443
431 return (QLA_FUNCTION_FAILED); 444 return (QLA_FUNCTION_FAILED);
432 } 445 }
@@ -440,7 +453,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
440 mrk24->lun[1] = LSB(lun); 453 mrk24->lun[1] = LSB(lun);
441 mrk24->lun[2] = MSB(lun); 454 mrk24->lun[2] = MSB(lun);
442 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 455 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
443 mrk24->vp_index = ha->vp_idx; 456 mrk24->vp_index = vha->vp_idx;
444 } else { 457 } else {
445 SET_TARGET_ID(ha, mrk->target, loop_id); 458 SET_TARGET_ID(ha, mrk->target, loop_id);
446 mrk->lun = cpu_to_le16(lun); 459 mrk->lun = cpu_to_le16(lun);
@@ -448,22 +461,22 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
448 } 461 }
449 wmb(); 462 wmb();
450 463
451 qla2x00_isp_cmd(pha); 464 qla2x00_isp_cmd(vha, req);
452 465
453 return (QLA_SUCCESS); 466 return (QLA_SUCCESS);
454} 467}
455 468
456int 469int
457qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 470qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
458 uint8_t type) 471 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
472 uint8_t type)
459{ 473{
460 int ret; 474 int ret;
461 unsigned long flags = 0; 475 unsigned long flags = 0;
462 scsi_qla_host_t *pha = to_qla_parent(ha);
463 476
464 spin_lock_irqsave(&pha->hardware_lock, flags); 477 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
465 ret = __qla2x00_marker(ha, loop_id, lun, type); 478 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
466 spin_unlock_irqrestore(&pha->hardware_lock, flags); 479 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
467 480
468 return (ret); 481 return (ret);
469} 482}
@@ -477,9 +490,11 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
477 * Returns NULL if function failed, else, a pointer to the request packet. 490 * Returns NULL if function failed, else, a pointer to the request packet.
478 */ 491 */
479static request_t * 492static request_t *
480qla2x00_req_pkt(scsi_qla_host_t *ha) 493qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
494 struct rsp_que *rsp)
481{ 495{
482 device_reg_t __iomem *reg = ha->iobase; 496 struct qla_hw_data *ha = vha->hw;
497 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
483 request_t *pkt = NULL; 498 request_t *pkt = NULL;
484 uint16_t cnt; 499 uint16_t cnt;
485 uint32_t *dword_ptr; 500 uint32_t *dword_ptr;
@@ -488,24 +503,29 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
488 503
489 /* Wait 1 second for slot. */ 504 /* Wait 1 second for slot. */
490 for (timer = HZ; timer; timer--) { 505 for (timer = HZ; timer; timer--) {
491 if ((req_cnt + 2) >= ha->req_q_cnt) { 506 if ((req_cnt + 2) >= req->cnt) {
492 /* Calculate number of free request entries. */ 507 /* Calculate number of free request entries. */
493 if (IS_FWI2_CAPABLE(ha)) 508 if (ha->mqenable)
494 cnt = (uint16_t)RD_REG_DWORD( 509 cnt = (uint16_t)
495 &reg->isp24.req_q_out); 510 RD_REG_DWORD(&reg->isp25mq.req_q_out);
496 else 511 else {
497 cnt = qla2x00_debounce_register( 512 if (IS_FWI2_CAPABLE(ha))
498 ISP_REQ_Q_OUT(ha, &reg->isp)); 513 cnt = (uint16_t)RD_REG_DWORD(
499 if (ha->req_ring_index < cnt) 514 &reg->isp24.req_q_out);
500 ha->req_q_cnt = cnt - ha->req_ring_index; 515 else
516 cnt = qla2x00_debounce_register(
517 ISP_REQ_Q_OUT(ha, &reg->isp));
518 }
519 if (req->ring_index < cnt)
520 req->cnt = cnt - req->ring_index;
501 else 521 else
502 ha->req_q_cnt = ha->request_q_length - 522 req->cnt = req->length -
503 (ha->req_ring_index - cnt); 523 (req->ring_index - cnt);
504 } 524 }
505 /* If room for request in request ring. */ 525 /* If room for request in request ring. */
506 if ((req_cnt + 2) < ha->req_q_cnt) { 526 if ((req_cnt + 2) < req->cnt) {
507 ha->req_q_cnt--; 527 req->cnt--;
508 pkt = ha->request_ring_ptr; 528 pkt = req->ring_ptr;
509 529
510 /* Zero out packet. */ 530 /* Zero out packet. */
511 dword_ptr = (uint32_t *)pkt; 531 dword_ptr = (uint32_t *)pkt;
@@ -513,7 +533,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
513 *dword_ptr++ = 0; 533 *dword_ptr++ = 0;
514 534
515 /* Set system defined field. */ 535 /* Set system defined field. */
516 pkt->sys_define = (uint8_t)ha->req_ring_index; 536 pkt->sys_define = (uint8_t)req->ring_index;
517 537
518 /* Set entry count. */ 538 /* Set entry count. */
519 pkt->entry_count = 1; 539 pkt->entry_count = 1;
@@ -522,15 +542,14 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
522 } 542 }
523 543
524 /* Release ring specific lock */ 544 /* Release ring specific lock */
525 spin_unlock(&ha->hardware_lock); 545 spin_unlock_irq(&ha->hardware_lock);
526 546
527 udelay(2); /* 2 us */ 547 udelay(2); /* 2 us */
528 548
529 /* Check for pending interrupts. */ 549 /* Check for pending interrupts. */
530 /* During init we issue marker directly */ 550 /* During init we issue marker directly */
531 if (!ha->marker_needed && !ha->flags.init_done) 551 if (!vha->marker_needed && !vha->flags.init_done)
532 qla2x00_poll(ha); 552 qla2x00_poll(rsp);
533
534 spin_lock_irq(&ha->hardware_lock); 553 spin_lock_irq(&ha->hardware_lock);
535 } 554 }
536 if (!pkt) { 555 if (!pkt) {
@@ -547,29 +566,38 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
547 * Note: The caller must hold the hardware lock before calling this routine. 566 * Note: The caller must hold the hardware lock before calling this routine.
548 */ 567 */
549static void 568static void
550qla2x00_isp_cmd(scsi_qla_host_t *ha) 569qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
551{ 570{
552 device_reg_t __iomem *reg = ha->iobase; 571 struct qla_hw_data *ha = vha->hw;
572 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
573 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
553 574
554 DEBUG5(printk("%s(): IOCB data:\n", __func__)); 575 DEBUG5(printk("%s(): IOCB data:\n", __func__));
555 DEBUG5(qla2x00_dump_buffer( 576 DEBUG5(qla2x00_dump_buffer(
556 (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE)); 577 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
557 578
558 /* Adjust ring index. */ 579 /* Adjust ring index. */
559 ha->req_ring_index++; 580 req->ring_index++;
560 if (ha->req_ring_index == ha->request_q_length) { 581 if (req->ring_index == req->length) {
561 ha->req_ring_index = 0; 582 req->ring_index = 0;
562 ha->request_ring_ptr = ha->request_ring; 583 req->ring_ptr = req->ring;
563 } else 584 } else
564 ha->request_ring_ptr++; 585 req->ring_ptr++;
565 586
566 /* Set chip new ring index. */ 587 /* Set chip new ring index. */
567 if (IS_FWI2_CAPABLE(ha)) { 588 if (ha->mqenable) {
568 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index); 589 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
569 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 590 RD_REG_DWORD(&ioreg->hccr);
570 } else { 591 }
571 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index); 592 else {
572 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp)); 593 if (IS_FWI2_CAPABLE(ha)) {
594 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
595 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
596 } else {
597 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
598 req->ring_index);
599 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
600 }
573 } 601 }
574 602
575} 603}
@@ -610,10 +638,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
610{ 638{
611 uint16_t avail_dsds; 639 uint16_t avail_dsds;
612 uint32_t *cur_dsd; 640 uint32_t *cur_dsd;
613 scsi_qla_host_t *ha; 641 scsi_qla_host_t *vha;
614 struct scsi_cmnd *cmd; 642 struct scsi_cmnd *cmd;
615 struct scatterlist *sg; 643 struct scatterlist *sg;
616 int i; 644 int i;
645 struct req_que *req;
617 646
618 cmd = sp->cmd; 647 cmd = sp->cmd;
619 648
@@ -627,18 +656,19 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
627 return; 656 return;
628 } 657 }
629 658
630 ha = sp->ha; 659 vha = sp->vha;
660 req = sp->que;
631 661
632 /* Set transfer direction */ 662 /* Set transfer direction */
633 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 663 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
634 cmd_pkt->task_mgmt_flags = 664 cmd_pkt->task_mgmt_flags =
635 __constant_cpu_to_le16(TMF_WRITE_DATA); 665 __constant_cpu_to_le16(TMF_WRITE_DATA);
636 sp->fcport->ha->qla_stats.output_bytes += 666 sp->fcport->vha->hw->qla_stats.output_bytes +=
637 scsi_bufflen(sp->cmd); 667 scsi_bufflen(sp->cmd);
638 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 668 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
639 cmd_pkt->task_mgmt_flags = 669 cmd_pkt->task_mgmt_flags =
640 __constant_cpu_to_le16(TMF_READ_DATA); 670 __constant_cpu_to_le16(TMF_READ_DATA);
641 sp->fcport->ha->qla_stats.input_bytes += 671 sp->fcport->vha->hw->qla_stats.input_bytes +=
642 scsi_bufflen(sp->cmd); 672 scsi_bufflen(sp->cmd);
643 } 673 }
644 674
@@ -658,7 +688,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
658 * Five DSDs are available in the Continuation 688 * Five DSDs are available in the Continuation
659 * Type 1 IOCB. 689 * Type 1 IOCB.
660 */ 690 */
661 cont_pkt = qla2x00_prep_cont_type1_iocb(ha); 691 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
662 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 692 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
663 avail_dsds = 5; 693 avail_dsds = 5;
664 } 694 }
@@ -683,8 +713,6 @@ qla24xx_start_scsi(srb_t *sp)
683{ 713{
684 int ret, nseg; 714 int ret, nseg;
685 unsigned long flags; 715 unsigned long flags;
686 scsi_qla_host_t *ha, *pha;
687 struct scsi_cmnd *cmd;
688 uint32_t *clr_ptr; 716 uint32_t *clr_ptr;
689 uint32_t index; 717 uint32_t index;
690 uint32_t handle; 718 uint32_t handle;
@@ -692,35 +720,45 @@ qla24xx_start_scsi(srb_t *sp)
692 uint16_t cnt; 720 uint16_t cnt;
693 uint16_t req_cnt; 721 uint16_t req_cnt;
694 uint16_t tot_dsds; 722 uint16_t tot_dsds;
695 struct device_reg_24xx __iomem *reg; 723 struct req_que *req = NULL;
724 struct rsp_que *rsp = NULL;
725 struct scsi_cmnd *cmd = sp->cmd;
726 struct scsi_qla_host *vha = sp->vha;
727 struct qla_hw_data *ha = vha->hw;
728 uint16_t que_id;
696 729
697 /* Setup device pointers. */ 730 /* Setup device pointers. */
698 ret = 0; 731 ret = 0;
699 ha = sp->ha; 732 que_id = vha->req_ques[0];
700 pha = to_qla_parent(ha); 733
701 reg = &ha->iobase->isp24; 734 req = ha->req_q_map[que_id];
702 cmd = sp->cmd; 735 sp->que = req;
736
737 if (req->rsp)
738 rsp = req->rsp;
739 else
740 rsp = ha->rsp_q_map[que_id];
703 /* So we know we haven't pci_map'ed anything yet */ 741 /* So we know we haven't pci_map'ed anything yet */
704 tot_dsds = 0; 742 tot_dsds = 0;
705 743
706 /* Send marker if required */ 744 /* Send marker if required */
707 if (ha->marker_needed != 0) { 745 if (vha->marker_needed != 0) {
708 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 746 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
747 != QLA_SUCCESS)
709 return QLA_FUNCTION_FAILED; 748 return QLA_FUNCTION_FAILED;
710 } 749 vha->marker_needed = 0;
711 ha->marker_needed = 0;
712 } 750 }
713 751
714 /* Acquire ring specific lock */ 752 /* Acquire ring specific lock */
715 spin_lock_irqsave(&pha->hardware_lock, flags); 753 spin_lock_irqsave(&ha->hardware_lock, flags);
716 754
717 /* Check for room in outstanding command list. */ 755 /* Check for room in outstanding command list. */
718 handle = ha->current_outstanding_cmd; 756 handle = req->current_outstanding_cmd;
719 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 757 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
720 handle++; 758 handle++;
721 if (handle == MAX_OUTSTANDING_COMMANDS) 759 if (handle == MAX_OUTSTANDING_COMMANDS)
722 handle = 1; 760 handle = 1;
723 if (!ha->outstanding_cmds[handle]) 761 if (!req->outstanding_cmds[handle])
724 break; 762 break;
725 } 763 }
726 if (index == MAX_OUTSTANDING_COMMANDS) 764 if (index == MAX_OUTSTANDING_COMMANDS)
@@ -738,25 +776,26 @@ qla24xx_start_scsi(srb_t *sp)
738 tot_dsds = nseg; 776 tot_dsds = nseg;
739 777
740 req_cnt = qla24xx_calc_iocbs(tot_dsds); 778 req_cnt = qla24xx_calc_iocbs(tot_dsds);
741 if (ha->req_q_cnt < (req_cnt + 2)) { 779 if (req->cnt < (req_cnt + 2)) {
742 cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out); 780 cnt = ha->isp_ops->rd_req_reg(ha, req->id);
743 if (ha->req_ring_index < cnt) 781
744 ha->req_q_cnt = cnt - ha->req_ring_index; 782 if (req->ring_index < cnt)
783 req->cnt = cnt - req->ring_index;
745 else 784 else
746 ha->req_q_cnt = ha->request_q_length - 785 req->cnt = req->length -
747 (ha->req_ring_index - cnt); 786 (req->ring_index - cnt);
748 } 787 }
749 if (ha->req_q_cnt < (req_cnt + 2)) 788 if (req->cnt < (req_cnt + 2))
750 goto queuing_error; 789 goto queuing_error;
751 790
752 /* Build command packet. */ 791 /* Build command packet. */
753 ha->current_outstanding_cmd = handle; 792 req->current_outstanding_cmd = handle;
754 ha->outstanding_cmds[handle] = sp; 793 req->outstanding_cmds[handle] = sp;
755 sp->ha = ha; 794 sp->vha = vha;
756 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 795 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
757 ha->req_q_cnt -= req_cnt; 796 req->cnt -= req_cnt;
758 797
759 cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr; 798 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
760 cmd_pkt->handle = handle; 799 cmd_pkt->handle = handle;
761 800
762 /* Zero out remaining portion of packet. */ 801 /* Zero out remaining portion of packet. */
@@ -789,32 +828,63 @@ qla24xx_start_scsi(srb_t *sp)
789 wmb(); 828 wmb();
790 829
791 /* Adjust ring index. */ 830 /* Adjust ring index. */
792 ha->req_ring_index++; 831 req->ring_index++;
793 if (ha->req_ring_index == ha->request_q_length) { 832 if (req->ring_index == req->length) {
794 ha->req_ring_index = 0; 833 req->ring_index = 0;
795 ha->request_ring_ptr = ha->request_ring; 834 req->ring_ptr = req->ring;
796 } else 835 } else
797 ha->request_ring_ptr++; 836 req->ring_ptr++;
798 837
799 sp->flags |= SRB_DMA_VALID; 838 sp->flags |= SRB_DMA_VALID;
800 839
801 /* Set chip new ring index. */ 840 /* Set chip new ring index. */
802 WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index); 841 ha->isp_ops->wrt_req_reg(ha, req->id, req->ring_index);
803 RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
804 842
805 /* Manage unprocessed RIO/ZIO commands in response queue. */ 843 /* Manage unprocessed RIO/ZIO commands in response queue. */
806 if (ha->flags.process_response_queue && 844 if (vha->flags.process_response_queue &&
807 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 845 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
808 qla24xx_process_response_queue(ha); 846 qla24xx_process_response_queue(rsp);
809 847
810 spin_unlock_irqrestore(&pha->hardware_lock, flags); 848 spin_unlock_irqrestore(&ha->hardware_lock, flags);
811 return QLA_SUCCESS; 849 return QLA_SUCCESS;
812 850
813queuing_error: 851queuing_error:
814 if (tot_dsds) 852 if (tot_dsds)
815 scsi_dma_unmap(cmd); 853 scsi_dma_unmap(cmd);
816 854
817 spin_unlock_irqrestore(&pha->hardware_lock, flags); 855 spin_unlock_irqrestore(&ha->hardware_lock, flags);
818 856
819 return QLA_FUNCTION_FAILED; 857 return QLA_FUNCTION_FAILED;
820} 858}
859
860uint16_t
861qla24xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
862{
863 device_reg_t __iomem *reg = (void *) ha->iobase;
864 return RD_REG_DWORD_RELAXED(&reg->isp24.req_q_out);
865}
866
867uint16_t
868qla25xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
869{
870 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
871 return RD_REG_DWORD_RELAXED(&reg->isp25mq.req_q_out);
872}
873
874void
875qla24xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
876{
877 device_reg_t __iomem *reg = (void *) ha->iobase;
878 WRT_REG_DWORD(&reg->isp24.req_q_in, index);
879 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
880}
881
882void
883qla25xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
884{
885 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
886 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
887 WRT_REG_DWORD(&reg->isp25mq.req_q_in, index);
888 RD_REG_DWORD(&ioreg->hccr); /* PCI posting */
889}
890
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a76efd99d007..d5fb79a88001 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -10,10 +10,13 @@
10#include <scsi/scsi_tcq.h> 10#include <scsi/scsi_tcq.h>
11 11
12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t); 13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14static void qla2x00_status_entry(scsi_qla_host_t *, void *); 14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
15static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
16static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); 17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *);
19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
17 20
18/** 21/**
19 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -27,24 +30,28 @@ static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
27irqreturn_t 30irqreturn_t
28qla2100_intr_handler(int irq, void *dev_id) 31qla2100_intr_handler(int irq, void *dev_id)
29{ 32{
30 scsi_qla_host_t *ha; 33 scsi_qla_host_t *vha;
34 struct qla_hw_data *ha;
31 struct device_reg_2xxx __iomem *reg; 35 struct device_reg_2xxx __iomem *reg;
32 int status; 36 int status;
33 unsigned long iter; 37 unsigned long iter;
34 uint16_t hccr; 38 uint16_t hccr;
35 uint16_t mb[4]; 39 uint16_t mb[4];
40 struct rsp_que *rsp;
36 41
37 ha = (scsi_qla_host_t *) dev_id; 42 rsp = (struct rsp_que *) dev_id;
38 if (!ha) { 43 if (!rsp) {
39 printk(KERN_INFO 44 printk(KERN_INFO
40 "%s(): NULL host pointer\n", __func__); 45 "%s(): NULL response queue pointer\n", __func__);
41 return (IRQ_NONE); 46 return (IRQ_NONE);
42 } 47 }
43 48
49 ha = rsp->hw;
44 reg = &ha->iobase->isp; 50 reg = &ha->iobase->isp;
45 status = 0; 51 status = 0;
46 52
47 spin_lock(&ha->hardware_lock); 53 spin_lock(&ha->hardware_lock);
54 vha = qla2x00_get_rsp_host(rsp);
48 for (iter = 50; iter--; ) { 55 for (iter = 50; iter--; ) {
49 hccr = RD_REG_WORD(&reg->hccr); 56 hccr = RD_REG_WORD(&reg->hccr);
50 if (hccr & HCCR_RISC_PAUSE) { 57 if (hccr & HCCR_RISC_PAUSE) {
@@ -59,8 +66,8 @@ qla2100_intr_handler(int irq, void *dev_id)
59 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 66 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
60 RD_REG_WORD(&reg->hccr); 67 RD_REG_WORD(&reg->hccr);
61 68
62 ha->isp_ops->fw_dump(ha, 1); 69 ha->isp_ops->fw_dump(vha, 1);
63 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 70 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
64 break; 71 break;
65 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0) 72 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
66 break; 73 break;
@@ -72,24 +79,24 @@ qla2100_intr_handler(int irq, void *dev_id)
72 /* Get mailbox data. */ 79 /* Get mailbox data. */
73 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 80 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
74 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 81 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
75 qla2x00_mbx_completion(ha, mb[0]); 82 qla2x00_mbx_completion(vha, mb[0]);
76 status |= MBX_INTERRUPT; 83 status |= MBX_INTERRUPT;
77 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 84 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
78 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 85 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
79 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 86 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
80 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 87 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
81 qla2x00_async_event(ha, mb); 88 qla2x00_async_event(vha, rsp, mb);
82 } else { 89 } else {
83 /*EMPTY*/ 90 /*EMPTY*/
84 DEBUG2(printk("scsi(%ld): Unrecognized " 91 DEBUG2(printk("scsi(%ld): Unrecognized "
85 "interrupt type (%d).\n", 92 "interrupt type (%d).\n",
86 ha->host_no, mb[0])); 93 vha->host_no, mb[0]));
87 } 94 }
88 /* Release mailbox registers. */ 95 /* Release mailbox registers. */
89 WRT_REG_WORD(&reg->semaphore, 0); 96 WRT_REG_WORD(&reg->semaphore, 0);
90 RD_REG_WORD(&reg->semaphore); 97 RD_REG_WORD(&reg->semaphore);
91 } else { 98 } else {
92 qla2x00_process_response_queue(ha); 99 qla2x00_process_response_queue(rsp);
93 100
94 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 101 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
95 RD_REG_WORD(&reg->hccr); 102 RD_REG_WORD(&reg->hccr);
@@ -118,25 +125,29 @@ qla2100_intr_handler(int irq, void *dev_id)
118irqreturn_t 125irqreturn_t
119qla2300_intr_handler(int irq, void *dev_id) 126qla2300_intr_handler(int irq, void *dev_id)
120{ 127{
121 scsi_qla_host_t *ha; 128 scsi_qla_host_t *vha;
122 struct device_reg_2xxx __iomem *reg; 129 struct device_reg_2xxx __iomem *reg;
123 int status; 130 int status;
124 unsigned long iter; 131 unsigned long iter;
125 uint32_t stat; 132 uint32_t stat;
126 uint16_t hccr; 133 uint16_t hccr;
127 uint16_t mb[4]; 134 uint16_t mb[4];
135 struct rsp_que *rsp;
136 struct qla_hw_data *ha;
128 137
129 ha = (scsi_qla_host_t *) dev_id; 138 rsp = (struct rsp_que *) dev_id;
130 if (!ha) { 139 if (!rsp) {
131 printk(KERN_INFO 140 printk(KERN_INFO
132 "%s(): NULL host pointer\n", __func__); 141 "%s(): NULL response queue pointer\n", __func__);
133 return (IRQ_NONE); 142 return (IRQ_NONE);
134 } 143 }
135 144
145 ha = rsp->hw;
136 reg = &ha->iobase->isp; 146 reg = &ha->iobase->isp;
137 status = 0; 147 status = 0;
138 148
139 spin_lock(&ha->hardware_lock); 149 spin_lock(&ha->hardware_lock);
150 vha = qla2x00_get_rsp_host(rsp);
140 for (iter = 50; iter--; ) { 151 for (iter = 50; iter--; ) {
141 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 152 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
142 if (stat & HSR_RISC_PAUSED) { 153 if (stat & HSR_RISC_PAUSED) {
@@ -159,8 +170,8 @@ qla2300_intr_handler(int irq, void *dev_id)
159 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 170 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
160 RD_REG_WORD(&reg->hccr); 171 RD_REG_WORD(&reg->hccr);
161 172
162 ha->isp_ops->fw_dump(ha, 1); 173 ha->isp_ops->fw_dump(vha, 1);
163 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 174 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
164 break; 175 break;
165 } else if ((stat & HSR_RISC_INT) == 0) 176 } else if ((stat & HSR_RISC_INT) == 0)
166 break; 177 break;
@@ -170,7 +181,7 @@ qla2300_intr_handler(int irq, void *dev_id)
170 case 0x2: 181 case 0x2:
171 case 0x10: 182 case 0x10:
172 case 0x11: 183 case 0x11:
173 qla2x00_mbx_completion(ha, MSW(stat)); 184 qla2x00_mbx_completion(vha, MSW(stat));
174 status |= MBX_INTERRUPT; 185 status |= MBX_INTERRUPT;
175 186
176 /* Release mailbox registers. */ 187 /* Release mailbox registers. */
@@ -181,26 +192,26 @@ qla2300_intr_handler(int irq, void *dev_id)
181 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 192 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
182 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 193 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
183 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 194 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
184 qla2x00_async_event(ha, mb); 195 qla2x00_async_event(vha, rsp, mb);
185 break; 196 break;
186 case 0x13: 197 case 0x13:
187 qla2x00_process_response_queue(ha); 198 qla2x00_process_response_queue(rsp);
188 break; 199 break;
189 case 0x15: 200 case 0x15:
190 mb[0] = MBA_CMPLT_1_16BIT; 201 mb[0] = MBA_CMPLT_1_16BIT;
191 mb[1] = MSW(stat); 202 mb[1] = MSW(stat);
192 qla2x00_async_event(ha, mb); 203 qla2x00_async_event(vha, rsp, mb);
193 break; 204 break;
194 case 0x16: 205 case 0x16:
195 mb[0] = MBA_SCSI_COMPLETION; 206 mb[0] = MBA_SCSI_COMPLETION;
196 mb[1] = MSW(stat); 207 mb[1] = MSW(stat);
197 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 208 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
198 qla2x00_async_event(ha, mb); 209 qla2x00_async_event(vha, rsp, mb);
199 break; 210 break;
200 default: 211 default:
201 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 212 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
202 "(%d).\n", 213 "(%d).\n",
203 ha->host_no, stat & 0xff)); 214 vha->host_no, stat & 0xff));
204 break; 215 break;
205 } 216 }
206 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 217 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -223,10 +234,11 @@ qla2300_intr_handler(int irq, void *dev_id)
223 * @mb0: Mailbox0 register 234 * @mb0: Mailbox0 register
224 */ 235 */
225static void 236static void
226qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) 237qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
227{ 238{
228 uint16_t cnt; 239 uint16_t cnt;
229 uint16_t __iomem *wptr; 240 uint16_t __iomem *wptr;
241 struct qla_hw_data *ha = vha->hw;
230 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 242 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
231 243
232 /* Load return mailbox registers. */ 244 /* Load return mailbox registers. */
@@ -247,10 +259,10 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
247 259
248 if (ha->mcp) { 260 if (ha->mcp) {
249 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 261 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
250 __func__, ha->host_no, ha->mcp->mb[0])); 262 __func__, vha->host_no, ha->mcp->mb[0]));
251 } else { 263 } else {
252 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 264 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
253 __func__, ha->host_no)); 265 __func__, vha->host_no));
254 } 266 }
255} 267}
256 268
@@ -260,7 +272,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
260 * @mb: Mailbox registers (0 - 3) 272 * @mb: Mailbox registers (0 - 3)
261 */ 273 */
262void 274void
263qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) 275qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
264{ 276{
265#define LS_UNKNOWN 2 277#define LS_UNKNOWN 2
266 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 278 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
@@ -268,6 +280,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
268 uint16_t handle_cnt; 280 uint16_t handle_cnt;
269 uint16_t cnt; 281 uint16_t cnt;
270 uint32_t handles[5]; 282 uint32_t handles[5];
283 struct qla_hw_data *ha = vha->hw;
271 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 284 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
272 uint32_t rscn_entry, host_pid; 285 uint32_t rscn_entry, host_pid;
273 uint8_t rscn_queue_index; 286 uint8_t rscn_queue_index;
@@ -329,17 +342,19 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
329 342
330 switch (mb[0]) { 343 switch (mb[0]) {
331 case MBA_SCSI_COMPLETION: /* Fast Post */ 344 case MBA_SCSI_COMPLETION: /* Fast Post */
332 if (!ha->flags.online) 345 if (!vha->flags.online)
333 break; 346 break;
334 347
335 for (cnt = 0; cnt < handle_cnt; cnt++) 348 for (cnt = 0; cnt < handle_cnt; cnt++)
336 qla2x00_process_completed_request(ha, handles[cnt]); 349 qla2x00_process_completed_request(vha, rsp->req,
350 handles[cnt]);
337 break; 351 break;
338 352
339 case MBA_RESET: /* Reset */ 353 case MBA_RESET: /* Reset */
340 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no)); 354 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
355 vha->host_no));
341 356
342 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 357 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
343 break; 358 break;
344 359
345 case MBA_SYSTEM_ERR: /* System Error */ 360 case MBA_SYSTEM_ERR: /* System Error */
@@ -347,70 +362,70 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
347 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 362 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
348 mb[1], mb[2], mb[3]); 363 mb[1], mb[2], mb[3]);
349 364
350 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 365 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
351 ha->isp_ops->fw_dump(ha, 1); 366 ha->isp_ops->fw_dump(vha, 1);
352 367
353 if (IS_FWI2_CAPABLE(ha)) { 368 if (IS_FWI2_CAPABLE(ha)) {
354 if (mb[1] == 0 && mb[2] == 0) { 369 if (mb[1] == 0 && mb[2] == 0) {
355 qla_printk(KERN_ERR, ha, 370 qla_printk(KERN_ERR, ha,
356 "Unrecoverable Hardware Error: adapter " 371 "Unrecoverable Hardware Error: adapter "
357 "marked OFFLINE!\n"); 372 "marked OFFLINE!\n");
358 ha->flags.online = 0; 373 vha->flags.online = 0;
359 } else 374 } else
360 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 375 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
361 } else if (mb[1] == 0) { 376 } else if (mb[1] == 0) {
362 qla_printk(KERN_INFO, ha, 377 qla_printk(KERN_INFO, ha,
363 "Unrecoverable Hardware Error: adapter marked " 378 "Unrecoverable Hardware Error: adapter marked "
364 "OFFLINE!\n"); 379 "OFFLINE!\n");
365 ha->flags.online = 0; 380 vha->flags.online = 0;
366 } else 381 } else
367 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 382 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
368 break; 383 break;
369 384
370 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 385 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
371 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", 386 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
372 ha->host_no)); 387 vha->host_no));
373 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); 388 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
374 389
375 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 390 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
376 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 391 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
377 break; 392 break;
378 393
379 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 394 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
380 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", 395 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
381 ha->host_no)); 396 vha->host_no));
382 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); 397 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
383 398
384 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 399 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
385 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 400 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
386 break; 401 break;
387 402
388 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 403 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
389 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", 404 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
390 ha->host_no)); 405 vha->host_no));
391 break; 406 break;
392 407
393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 408 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
394 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no, 409 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
395 mb[1])); 410 mb[1]));
396 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]); 411 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
397 412
398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 413 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
399 atomic_set(&ha->loop_state, LOOP_DOWN); 414 atomic_set(&vha->loop_state, LOOP_DOWN);
400 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 415 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
401 qla2x00_mark_all_devices_lost(ha, 1); 416 qla2x00_mark_all_devices_lost(vha, 1);
402 } 417 }
403 418
404 if (ha->parent) { 419 if (vha->vp_idx) {
405 atomic_set(&ha->vp_state, VP_FAILED); 420 atomic_set(&vha->vp_state, VP_FAILED);
406 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 421 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
407 } 422 }
408 423
409 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 424 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
410 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 425 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
411 426
412 ha->flags.management_server_logged_in = 0; 427 vha->flags.management_server_logged_in = 0;
413 qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]); 428 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
414 break; 429 break;
415 430
416 case MBA_LOOP_UP: /* Loop Up Event */ 431 case MBA_LOOP_UP: /* Loop Up Event */
@@ -425,59 +440,59 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
425 } 440 }
426 441
427 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", 442 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
428 ha->host_no, link_speed)); 443 vha->host_no, link_speed));
429 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", 444 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
430 link_speed); 445 link_speed);
431 446
432 ha->flags.management_server_logged_in = 0; 447 vha->flags.management_server_logged_in = 0;
433 qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate); 448 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
434 break; 449 break;
435 450
436 case MBA_LOOP_DOWN: /* Loop Down Event */ 451 case MBA_LOOP_DOWN: /* Loop Down Event */
437 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " 452 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
438 "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3])); 453 "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
439 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", 454 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
440 mb[1], mb[2], mb[3]); 455 mb[1], mb[2], mb[3]);
441 456
442 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 457 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
443 atomic_set(&ha->loop_state, LOOP_DOWN); 458 atomic_set(&vha->loop_state, LOOP_DOWN);
444 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 459 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
445 ha->device_flags |= DFLG_NO_CABLE; 460 vha->device_flags |= DFLG_NO_CABLE;
446 qla2x00_mark_all_devices_lost(ha, 1); 461 qla2x00_mark_all_devices_lost(vha, 1);
447 } 462 }
448 463
449 if (ha->parent) { 464 if (vha->vp_idx) {
450 atomic_set(&ha->vp_state, VP_FAILED); 465 atomic_set(&vha->vp_state, VP_FAILED);
451 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 466 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
452 } 467 }
453 468
454 ha->flags.management_server_logged_in = 0; 469 vha->flags.management_server_logged_in = 0;
455 ha->link_data_rate = PORT_SPEED_UNKNOWN; 470 ha->link_data_rate = PORT_SPEED_UNKNOWN;
456 qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0); 471 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
457 break; 472 break;
458 473
459 case MBA_LIP_RESET: /* LIP reset occurred */ 474 case MBA_LIP_RESET: /* LIP reset occurred */
460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 475 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
461 ha->host_no, mb[1])); 476 vha->host_no, mb[1]));
462 qla_printk(KERN_INFO, ha, 477 qla_printk(KERN_INFO, ha,
463 "LIP reset occurred (%x).\n", mb[1]); 478 "LIP reset occurred (%x).\n", mb[1]);
464 479
465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 480 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
466 atomic_set(&ha->loop_state, LOOP_DOWN); 481 atomic_set(&vha->loop_state, LOOP_DOWN);
467 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 482 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
468 qla2x00_mark_all_devices_lost(ha, 1); 483 qla2x00_mark_all_devices_lost(vha, 1);
469 } 484 }
470 485
471 if (ha->parent) { 486 if (vha->vp_idx) {
472 atomic_set(&ha->vp_state, VP_FAILED); 487 atomic_set(&vha->vp_state, VP_FAILED);
473 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 488 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
474 } 489 }
475 490
476 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 491 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
477 492
478 ha->operating_mode = LOOP; 493 ha->operating_mode = LOOP;
479 ha->flags.management_server_logged_in = 0; 494 vha->flags.management_server_logged_in = 0;
480 qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]); 495 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
481 break; 496 break;
482 497
483 case MBA_POINT_TO_POINT: /* Point-to-Point */ 498 case MBA_POINT_TO_POINT: /* Point-to-Point */
@@ -485,33 +500,33 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
485 break; 500 break;
486 501
487 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n", 502 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
488 ha->host_no)); 503 vha->host_no));
489 504
490 /* 505 /*
491 * Until there's a transition from loop down to loop up, treat 506 * Until there's a transition from loop down to loop up, treat
492 * this as loop down only. 507 * this as loop down only.
493 */ 508 */
494 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 509 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
495 atomic_set(&ha->loop_state, LOOP_DOWN); 510 atomic_set(&vha->loop_state, LOOP_DOWN);
496 if (!atomic_read(&ha->loop_down_timer)) 511 if (!atomic_read(&vha->loop_down_timer))
497 atomic_set(&ha->loop_down_timer, 512 atomic_set(&vha->loop_down_timer,
498 LOOP_DOWN_TIME); 513 LOOP_DOWN_TIME);
499 qla2x00_mark_all_devices_lost(ha, 1); 514 qla2x00_mark_all_devices_lost(vha, 1);
500 } 515 }
501 516
502 if (ha->parent) { 517 if (vha->vp_idx) {
503 atomic_set(&ha->vp_state, VP_FAILED); 518 atomic_set(&vha->vp_state, VP_FAILED);
504 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 519 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
505 } 520 }
506 521
507 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 522 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
508 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 523 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
509 } 524
510 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 525 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
511 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 526 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
512 527
513 ha->flags.gpsc_supported = 1; 528 ha->flags.gpsc_supported = 1;
514 ha->flags.management_server_logged_in = 0; 529 vha->flags.management_server_logged_in = 0;
515 break; 530 break;
516 531
517 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 532 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
@@ -520,134 +535,137 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
520 535
521 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " 536 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
522 "received.\n", 537 "received.\n",
523 ha->host_no)); 538 vha->host_no));
524 qla_printk(KERN_INFO, ha, 539 qla_printk(KERN_INFO, ha,
525 "Configuration change detected: value=%x.\n", mb[1]); 540 "Configuration change detected: value=%x.\n", mb[1]);
526 541
527 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 542 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
528 atomic_set(&ha->loop_state, LOOP_DOWN); 543 atomic_set(&vha->loop_state, LOOP_DOWN);
529 if (!atomic_read(&ha->loop_down_timer)) 544 if (!atomic_read(&vha->loop_down_timer))
530 atomic_set(&ha->loop_down_timer, 545 atomic_set(&vha->loop_down_timer,
531 LOOP_DOWN_TIME); 546 LOOP_DOWN_TIME);
532 qla2x00_mark_all_devices_lost(ha, 1); 547 qla2x00_mark_all_devices_lost(vha, 1);
533 } 548 }
534 549
535 if (ha->parent) { 550 if (vha->vp_idx) {
536 atomic_set(&ha->vp_state, VP_FAILED); 551 atomic_set(&vha->vp_state, VP_FAILED);
537 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 552 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
538 } 553 }
539 554
540 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 555 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
541 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 556 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
542 break; 557 break;
543 558
544 case MBA_PORT_UPDATE: /* Port database update */ 559 case MBA_PORT_UPDATE: /* Port database update */
560 /* Only handle SCNs for our Vport index. */
561 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
562 break;
563
545 /* 564 /*
546 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 565 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
547 * event etc. earlier indicating loop is down) then process 566 * event etc. earlier indicating loop is down) then process
548 * it. Otherwise ignore it and Wait for RSCN to come in. 567 * it. Otherwise ignore it and Wait for RSCN to come in.
549 */ 568 */
550 atomic_set(&ha->loop_down_timer, 0); 569 atomic_set(&vha->loop_down_timer, 0);
551 if (atomic_read(&ha->loop_state) != LOOP_DOWN && 570 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
552 atomic_read(&ha->loop_state) != LOOP_DEAD) { 571 atomic_read(&vha->loop_state) != LOOP_DEAD) {
553 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " 572 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
554 "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1], 573 "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
555 mb[2], mb[3])); 574 mb[2], mb[3]));
556 break; 575 break;
557 } 576 }
558 577
559 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 578 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
560 ha->host_no)); 579 vha->host_no));
561 DEBUG(printk(KERN_INFO 580 DEBUG(printk(KERN_INFO
562 "scsi(%ld): Port database changed %04x %04x %04x.\n", 581 "scsi(%ld): Port database changed %04x %04x %04x.\n",
563 ha->host_no, mb[1], mb[2], mb[3])); 582 vha->host_no, mb[1], mb[2], mb[3]));
564 583
565 /* 584 /*
566 * Mark all devices as missing so we will login again. 585 * Mark all devices as missing so we will login again.
567 */ 586 */
568 atomic_set(&ha->loop_state, LOOP_UP); 587 atomic_set(&vha->loop_state, LOOP_UP);
569 588
570 qla2x00_mark_all_devices_lost(ha, 1); 589 qla2x00_mark_all_devices_lost(vha, 1);
571 590
572 ha->flags.rscn_queue_overflow = 1; 591 vha->flags.rscn_queue_overflow = 1;
573 592
574 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 593 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
575 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 594 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
576 break; 595 break;
577 596
578 case MBA_RSCN_UPDATE: /* State Change Registration */ 597 case MBA_RSCN_UPDATE: /* State Change Registration */
579 /* Check if the Vport has issued a SCR */ 598 /* Check if the Vport has issued a SCR */
580 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags)) 599 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
581 break; 600 break;
582 /* Only handle SCNs for our Vport index. */ 601 /* Only handle SCNs for our Vport index. */
583 if (ha->parent && ha->vp_idx != (mb[3] & 0xff)) 602 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
584 break; 603 break;
585
586 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 604 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
587 ha->host_no)); 605 vha->host_no));
588 DEBUG(printk(KERN_INFO 606 DEBUG(printk(KERN_INFO
589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", 607 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
590 ha->host_no, mb[1], mb[2], mb[3])); 608 vha->host_no, mb[1], mb[2], mb[3]));
591 609
592 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 610 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | 611 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
594 ha->d_id.b.al_pa; 612 | vha->d_id.b.al_pa;
595 if (rscn_entry == host_pid) { 613 if (rscn_entry == host_pid) {
596 DEBUG(printk(KERN_INFO 614 DEBUG(printk(KERN_INFO
597 "scsi(%ld): Ignoring RSCN update to local host " 615 "scsi(%ld): Ignoring RSCN update to local host "
598 "port ID (%06x)\n", 616 "port ID (%06x)\n",
599 ha->host_no, host_pid)); 617 vha->host_no, host_pid));
600 break; 618 break;
601 } 619 }
602 620
603 /* Ignore reserved bits from RSCN-payload. */ 621 /* Ignore reserved bits from RSCN-payload. */
604 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 622 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
605 rscn_queue_index = ha->rscn_in_ptr + 1; 623 rscn_queue_index = vha->rscn_in_ptr + 1;
606 if (rscn_queue_index == MAX_RSCN_COUNT) 624 if (rscn_queue_index == MAX_RSCN_COUNT)
607 rscn_queue_index = 0; 625 rscn_queue_index = 0;
608 if (rscn_queue_index != ha->rscn_out_ptr) { 626 if (rscn_queue_index != vha->rscn_out_ptr) {
609 ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry; 627 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
610 ha->rscn_in_ptr = rscn_queue_index; 628 vha->rscn_in_ptr = rscn_queue_index;
611 } else { 629 } else {
612 ha->flags.rscn_queue_overflow = 1; 630 vha->flags.rscn_queue_overflow = 1;
613 } 631 }
614 632
615 atomic_set(&ha->loop_state, LOOP_UPDATE); 633 atomic_set(&vha->loop_state, LOOP_UPDATE);
616 atomic_set(&ha->loop_down_timer, 0); 634 atomic_set(&vha->loop_down_timer, 0);
617 ha->flags.management_server_logged_in = 0; 635 vha->flags.management_server_logged_in = 0;
618 636
619 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 637 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
620 set_bit(RSCN_UPDATE, &ha->dpc_flags); 638 set_bit(RSCN_UPDATE, &vha->dpc_flags);
621 qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry); 639 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
622 break; 640 break;
623 641
624 /* case MBA_RIO_RESPONSE: */ 642 /* case MBA_RIO_RESPONSE: */
625 case MBA_ZIO_RESPONSE: 643 case MBA_ZIO_RESPONSE:
626 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n", 644 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
627 ha->host_no)); 645 vha->host_no));
628 DEBUG(printk(KERN_INFO 646 DEBUG(printk(KERN_INFO
629 "scsi(%ld): [R|Z]IO update completion.\n", 647 "scsi(%ld): [R|Z]IO update completion.\n",
630 ha->host_no)); 648 vha->host_no));
631 649
632 if (IS_FWI2_CAPABLE(ha)) 650 if (IS_FWI2_CAPABLE(ha))
633 qla24xx_process_response_queue(ha); 651 qla24xx_process_response_queue(rsp);
634 else 652 else
635 qla2x00_process_response_queue(ha); 653 qla2x00_process_response_queue(rsp);
636 break; 654 break;
637 655
638 case MBA_DISCARD_RND_FRAME: 656 case MBA_DISCARD_RND_FRAME:
639 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 657 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
640 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); 658 "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
641 break; 659 break;
642 660
643 case MBA_TRACE_NOTIFICATION: 661 case MBA_TRACE_NOTIFICATION:
644 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", 662 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
645 ha->host_no, mb[1], mb[2])); 663 vha->host_no, mb[1], mb[2]));
646 break; 664 break;
647 665
648 case MBA_ISP84XX_ALERT: 666 case MBA_ISP84XX_ALERT:
649 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " 667 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
650 "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3])); 668 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
651 669
652 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 670 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
653 switch (mb[1]) { 671 switch (mb[1]) {
@@ -682,16 +700,22 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
682 break; 700 break;
683 } 701 }
684 702
685 if (!ha->parent && ha->num_vhosts) 703 if (!vha->vp_idx && ha->num_vhosts)
686 qla2x00_alert_all_vps(ha, mb); 704 qla2x00_alert_all_vps(rsp, mb);
687} 705}
688 706
689static void 707static void
690qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) 708qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
691{ 709{
692 fc_port_t *fcport = data; 710 fc_port_t *fcport = data;
711 struct scsi_qla_host *vha = fcport->vha;
712 struct qla_hw_data *ha = vha->hw;
713 struct req_que *req = NULL;
693 714
694 if (fcport->ha->max_q_depth <= sdev->queue_depth) 715 req = ha->req_q_map[vha->req_ques[0]];
716 if (!req)
717 return;
718 if (req->max_q_depth <= sdev->queue_depth)
695 return; 719 return;
696 720
697 if (sdev->ordered_tags) 721 if (sdev->ordered_tags)
@@ -703,9 +727,9 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
703 727
704 fcport->last_ramp_up = jiffies; 728 fcport->last_ramp_up = jiffies;
705 729
706 DEBUG2(qla_printk(KERN_INFO, fcport->ha, 730 DEBUG2(qla_printk(KERN_INFO, ha,
707 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", 731 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
708 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, 732 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
709 sdev->queue_depth)); 733 sdev->queue_depth));
710} 734}
711 735
@@ -717,20 +741,21 @@ qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
717 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1)) 741 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
718 return; 742 return;
719 743
720 DEBUG2(qla_printk(KERN_INFO, fcport->ha, 744 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
721 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", 745 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
722 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, 746 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
723 sdev->queue_depth)); 747 sdev->queue_depth));
724} 748}
725 749
726static inline void 750static inline void
727qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp) 751qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
752 srb_t *sp)
728{ 753{
729 fc_port_t *fcport; 754 fc_port_t *fcport;
730 struct scsi_device *sdev; 755 struct scsi_device *sdev;
731 756
732 sdev = sp->cmd->device; 757 sdev = sp->cmd->device;
733 if (sdev->queue_depth >= ha->max_q_depth) 758 if (sdev->queue_depth >= req->max_q_depth)
734 return; 759 return;
735 760
736 fcport = sp->fcport; 761 fcport = sp->fcport;
@@ -751,25 +776,27 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
751 * @index: SRB index 776 * @index: SRB index
752 */ 777 */
753static void 778static void
754qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index) 779qla2x00_process_completed_request(struct scsi_qla_host *vha,
780 struct req_que *req, uint32_t index)
755{ 781{
756 srb_t *sp; 782 srb_t *sp;
783 struct qla_hw_data *ha = vha->hw;
757 784
758 /* Validate handle. */ 785 /* Validate handle. */
759 if (index >= MAX_OUTSTANDING_COMMANDS) { 786 if (index >= MAX_OUTSTANDING_COMMANDS) {
760 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", 787 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
761 ha->host_no, index)); 788 vha->host_no, index));
762 qla_printk(KERN_WARNING, ha, 789 qla_printk(KERN_WARNING, ha,
763 "Invalid SCSI completion handle %d.\n", index); 790 "Invalid SCSI completion handle %d.\n", index);
764 791
765 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 792 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
766 return; 793 return;
767 } 794 }
768 795
769 sp = ha->outstanding_cmds[index]; 796 sp = req->outstanding_cmds[index];
770 if (sp) { 797 if (sp) {
771 /* Free outstanding command slot. */ 798 /* Free outstanding command slot. */
772 ha->outstanding_cmds[index] = NULL; 799 req->outstanding_cmds[index] = NULL;
773 800
774 CMD_COMPL_STATUS(sp->cmd) = 0L; 801 CMD_COMPL_STATUS(sp->cmd) = 0L;
775 CMD_SCSI_STATUS(sp->cmd) = 0L; 802 CMD_SCSI_STATUS(sp->cmd) = 0L;
@@ -777,15 +804,15 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
777 /* Save ISP completion status */ 804 /* Save ISP completion status */
778 sp->cmd->result = DID_OK << 16; 805 sp->cmd->result = DID_OK << 16;
779 806
780 qla2x00_ramp_up_queue_depth(ha, sp); 807 qla2x00_ramp_up_queue_depth(vha, req, sp);
781 qla2x00_sp_compl(ha, sp); 808 qla2x00_sp_compl(ha, sp);
782 } else { 809 } else {
783 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 810 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
784 ha->host_no)); 811 vha->host_no));
785 qla_printk(KERN_WARNING, ha, 812 qla_printk(KERN_WARNING, ha,
786 "Invalid ISP SCSI completion handle\n"); 813 "Invalid ISP SCSI completion handle\n");
787 814
788 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 815 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
789 } 816 }
790} 817}
791 818
@@ -794,32 +821,36 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
794 * @ha: SCSI driver HA context 821 * @ha: SCSI driver HA context
795 */ 822 */
796void 823void
797qla2x00_process_response_queue(struct scsi_qla_host *ha) 824qla2x00_process_response_queue(struct rsp_que *rsp)
798{ 825{
826 struct scsi_qla_host *vha;
827 struct qla_hw_data *ha = rsp->hw;
799 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 828 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
800 sts_entry_t *pkt; 829 sts_entry_t *pkt;
801 uint16_t handle_cnt; 830 uint16_t handle_cnt;
802 uint16_t cnt; 831 uint16_t cnt;
803 832
804 if (!ha->flags.online) 833 vha = qla2x00_get_rsp_host(rsp);
834
835 if (!vha->flags.online)
805 return; 836 return;
806 837
807 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { 838 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
808 pkt = (sts_entry_t *)ha->response_ring_ptr; 839 pkt = (sts_entry_t *)rsp->ring_ptr;
809 840
810 ha->rsp_ring_index++; 841 rsp->ring_index++;
811 if (ha->rsp_ring_index == ha->response_q_length) { 842 if (rsp->ring_index == rsp->length) {
812 ha->rsp_ring_index = 0; 843 rsp->ring_index = 0;
813 ha->response_ring_ptr = ha->response_ring; 844 rsp->ring_ptr = rsp->ring;
814 } else { 845 } else {
815 ha->response_ring_ptr++; 846 rsp->ring_ptr++;
816 } 847 }
817 848
818 if (pkt->entry_status != 0) { 849 if (pkt->entry_status != 0) {
819 DEBUG3(printk(KERN_INFO 850 DEBUG3(printk(KERN_INFO
820 "scsi(%ld): Process error entry.\n", ha->host_no)); 851 "scsi(%ld): Process error entry.\n", vha->host_no));
821 852
822 qla2x00_error_entry(ha, pkt); 853 qla2x00_error_entry(vha, rsp, pkt);
823 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 854 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
824 wmb(); 855 wmb();
825 continue; 856 continue;
@@ -827,31 +858,31 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
827 858
828 switch (pkt->entry_type) { 859 switch (pkt->entry_type) {
829 case STATUS_TYPE: 860 case STATUS_TYPE:
830 qla2x00_status_entry(ha, pkt); 861 qla2x00_status_entry(vha, rsp, pkt);
831 break; 862 break;
832 case STATUS_TYPE_21: 863 case STATUS_TYPE_21:
833 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 864 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
834 for (cnt = 0; cnt < handle_cnt; cnt++) { 865 for (cnt = 0; cnt < handle_cnt; cnt++) {
835 qla2x00_process_completed_request(ha, 866 qla2x00_process_completed_request(vha, rsp->req,
836 ((sts21_entry_t *)pkt)->handle[cnt]); 867 ((sts21_entry_t *)pkt)->handle[cnt]);
837 } 868 }
838 break; 869 break;
839 case STATUS_TYPE_22: 870 case STATUS_TYPE_22:
840 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 871 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
841 for (cnt = 0; cnt < handle_cnt; cnt++) { 872 for (cnt = 0; cnt < handle_cnt; cnt++) {
842 qla2x00_process_completed_request(ha, 873 qla2x00_process_completed_request(vha, rsp->req,
843 ((sts22_entry_t *)pkt)->handle[cnt]); 874 ((sts22_entry_t *)pkt)->handle[cnt]);
844 } 875 }
845 break; 876 break;
846 case STATUS_CONT_TYPE: 877 case STATUS_CONT_TYPE:
847 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); 878 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
848 break; 879 break;
849 default: 880 default:
850 /* Type Not Supported. */ 881 /* Type Not Supported. */
851 DEBUG4(printk(KERN_WARNING 882 DEBUG4(printk(KERN_WARNING
852 "scsi(%ld): Received unknown response pkt type %x " 883 "scsi(%ld): Received unknown response pkt type %x "
853 "entry status=%x.\n", 884 "entry status=%x.\n",
854 ha->host_no, pkt->entry_type, pkt->entry_status)); 885 vha->host_no, pkt->entry_type, pkt->entry_status));
855 break; 886 break;
856 } 887 }
857 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 888 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -859,7 +890,7 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
859 } 890 }
860 891
861 /* Adjust ring index */ 892 /* Adjust ring index */
862 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index); 893 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
863} 894}
864 895
865static inline void 896static inline void
@@ -881,10 +912,10 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
881 sp->request_sense_ptr += sense_len; 912 sp->request_sense_ptr += sense_len;
882 sp->request_sense_length -= sense_len; 913 sp->request_sense_length -= sense_len;
883 if (sp->request_sense_length != 0) 914 if (sp->request_sense_length != 0)
884 sp->fcport->ha->status_srb = sp; 915 sp->fcport->vha->status_srb = sp;
885 916
886 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 917 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
887 "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no, 918 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
888 cp->device->channel, cp->device->id, cp->device->lun, cp, 919 cp->device->channel, cp->device->id, cp->device->lun, cp,
889 cp->serial_number)); 920 cp->serial_number));
890 if (sense_len) 921 if (sense_len)
@@ -898,7 +929,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
898 * @pkt: Entry pointer 929 * @pkt: Entry pointer
899 */ 930 */
900static void 931static void
901qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) 932qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
902{ 933{
903 srb_t *sp; 934 srb_t *sp;
904 fc_port_t *fcport; 935 fc_port_t *fcport;
@@ -911,6 +942,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
911 int32_t resid; 942 int32_t resid;
912 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 943 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
913 uint8_t *rsp_info, *sense_data; 944 uint8_t *rsp_info, *sense_data;
945 struct qla_hw_data *ha = vha->hw;
946 struct req_que *req = rsp->req;
914 947
915 sts = (sts_entry_t *) pkt; 948 sts = (sts_entry_t *) pkt;
916 sts24 = (struct sts_entry_24xx *) pkt; 949 sts24 = (struct sts_entry_24xx *) pkt;
@@ -924,31 +957,31 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
924 957
925 /* Fast path completion. */ 958 /* Fast path completion. */
926 if (comp_status == CS_COMPLETE && scsi_status == 0) { 959 if (comp_status == CS_COMPLETE && scsi_status == 0) {
927 qla2x00_process_completed_request(ha, sts->handle); 960 qla2x00_process_completed_request(vha, req, sts->handle);
928 961
929 return; 962 return;
930 } 963 }
931 964
932 /* Validate handle. */ 965 /* Validate handle. */
933 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 966 if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
934 sp = ha->outstanding_cmds[sts->handle]; 967 sp = req->outstanding_cmds[sts->handle];
935 ha->outstanding_cmds[sts->handle] = NULL; 968 req->outstanding_cmds[sts->handle] = NULL;
936 } else 969 } else
937 sp = NULL; 970 sp = NULL;
938 971
939 if (sp == NULL) { 972 if (sp == NULL) {
940 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n", 973 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
941 ha->host_no)); 974 vha->host_no));
942 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n"); 975 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
943 976
944 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 977 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
945 qla2xxx_wake_dpc(ha); 978 qla2xxx_wake_dpc(vha);
946 return; 979 return;
947 } 980 }
948 cp = sp->cmd; 981 cp = sp->cmd;
949 if (cp == NULL) { 982 if (cp == NULL) {
950 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 983 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
951 "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp)); 984 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
952 qla_printk(KERN_WARNING, ha, 985 qla_printk(KERN_WARNING, ha,
953 "Command is NULL: already returned to OS (sp=%p)\n", sp); 986 "Command is NULL: already returned to OS (sp=%p)\n", sp);
954 987
@@ -987,7 +1020,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
987 if (rsp_info_len > 3 && rsp_info[3]) { 1020 if (rsp_info_len > 3 && rsp_info[3]) {
988 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol " 1021 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
989 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..." 1022 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
990 "retrying command\n", ha->host_no, 1023 "retrying command\n", vha->host_no,
991 cp->device->channel, cp->device->id, 1024 cp->device->channel, cp->device->id,
992 cp->device->lun, rsp_info_len, rsp_info[0], 1025 cp->device->lun, rsp_info_len, rsp_info[0],
993 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4], 1026 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
@@ -1025,7 +1058,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1025 qla_printk(KERN_INFO, ha, 1058 qla_printk(KERN_INFO, ha,
1026 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1059 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1027 "detected (%x of %x bytes)...returning " 1060 "detected (%x of %x bytes)...returning "
1028 "error status.\n", ha->host_no, 1061 "error status.\n", vha->host_no,
1029 cp->device->channel, cp->device->id, 1062 cp->device->channel, cp->device->id,
1030 cp->device->lun, resid, 1063 cp->device->lun, resid,
1031 scsi_bufflen(cp)); 1064 scsi_bufflen(cp));
@@ -1039,7 +1072,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1039 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1072 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1040 DEBUG2(printk(KERN_INFO 1073 DEBUG2(printk(KERN_INFO
1041 "scsi(%ld): QUEUE FULL status detected " 1074 "scsi(%ld): QUEUE FULL status detected "
1042 "0x%x-0x%x.\n", ha->host_no, comp_status, 1075 "0x%x-0x%x.\n", vha->host_no, comp_status,
1043 scsi_status)); 1076 scsi_status));
1044 1077
1045 /* Adjust queue depth for all luns on the port. */ 1078 /* Adjust queue depth for all luns on the port. */
@@ -1078,7 +1111,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1078 DEBUG2(printk(KERN_INFO 1111 DEBUG2(printk(KERN_INFO
1079 "scsi(%ld:%d:%d) UNDERRUN status detected " 1112 "scsi(%ld:%d:%d) UNDERRUN status detected "
1080 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " 1113 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1081 "os_underflow=0x%x\n", ha->host_no, 1114 "os_underflow=0x%x\n", vha->host_no,
1082 cp->device->id, cp->device->lun, comp_status, 1115 cp->device->id, cp->device->lun, comp_status,
1083 scsi_status, resid_len, resid, cp->cmnd[0], 1116 scsi_status, resid_len, resid, cp->cmnd[0],
1084 cp->underflow)); 1117 cp->underflow));
@@ -1095,7 +1128,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1095 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1128 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1096 DEBUG2(printk(KERN_INFO 1129 DEBUG2(printk(KERN_INFO
1097 "scsi(%ld): QUEUE FULL status detected " 1130 "scsi(%ld): QUEUE FULL status detected "
1098 "0x%x-0x%x.\n", ha->host_no, comp_status, 1131 "0x%x-0x%x.\n", vha->host_no, comp_status,
1099 scsi_status)); 1132 scsi_status));
1100 1133
1101 /* 1134 /*
@@ -1125,10 +1158,10 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1125 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1158 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1126 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1159 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1127 "frame(s) detected (%x of %x bytes)..." 1160 "frame(s) detected (%x of %x bytes)..."
1128 "retrying command.\n", ha->host_no, 1161 "retrying command.\n",
1129 cp->device->channel, cp->device->id, 1162 vha->host_no, cp->device->channel,
1130 cp->device->lun, resid, 1163 cp->device->id, cp->device->lun, resid,
1131 scsi_bufflen(cp))); 1164 scsi_bufflen(cp)));
1132 1165
1133 cp->result = DID_BUS_BUSY << 16; 1166 cp->result = DID_BUS_BUSY << 16;
1134 break; 1167 break;
@@ -1140,7 +1173,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1140 qla_printk(KERN_INFO, ha, 1173 qla_printk(KERN_INFO, ha,
1141 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1174 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1142 "detected (%x of %x bytes)...returning " 1175 "detected (%x of %x bytes)...returning "
1143 "error status.\n", ha->host_no, 1176 "error status.\n", vha->host_no,
1144 cp->device->channel, cp->device->id, 1177 cp->device->channel, cp->device->id,
1145 cp->device->lun, resid, 1178 cp->device->lun, resid,
1146 scsi_bufflen(cp)); 1179 scsi_bufflen(cp));
@@ -1157,7 +1190,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1157 case CS_DATA_OVERRUN: 1190 case CS_DATA_OVERRUN:
1158 DEBUG2(printk(KERN_INFO 1191 DEBUG2(printk(KERN_INFO
1159 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n", 1192 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1160 ha->host_no, cp->device->id, cp->device->lun, comp_status, 1193 vha->host_no, cp->device->id, cp->device->lun, comp_status,
1161 scsi_status)); 1194 scsi_status));
1162 DEBUG2(printk(KERN_INFO 1195 DEBUG2(printk(KERN_INFO
1163 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 1196 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
@@ -1183,7 +1216,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1183 */ 1216 */
1184 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down " 1217 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1185 "pid=%ld, compl status=0x%x, port state=0x%x\n", 1218 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1186 ha->host_no, cp->device->id, cp->device->lun, 1219 vha->host_no, cp->device->id, cp->device->lun,
1187 cp->serial_number, comp_status, 1220 cp->serial_number, comp_status,
1188 atomic_read(&fcport->state))); 1221 atomic_read(&fcport->state)));
1189 1222
@@ -1194,13 +1227,13 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1194 */ 1227 */
1195 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1228 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1196 if (atomic_read(&fcport->state) == FCS_ONLINE) 1229 if (atomic_read(&fcport->state) == FCS_ONLINE)
1197 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); 1230 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1198 break; 1231 break;
1199 1232
1200 case CS_RESET: 1233 case CS_RESET:
1201 DEBUG2(printk(KERN_INFO 1234 DEBUG2(printk(KERN_INFO
1202 "scsi(%ld): RESET status detected 0x%x-0x%x.\n", 1235 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1203 ha->host_no, comp_status, scsi_status)); 1236 vha->host_no, comp_status, scsi_status));
1204 1237
1205 cp->result = DID_RESET << 16; 1238 cp->result = DID_RESET << 16;
1206 break; 1239 break;
@@ -1213,7 +1246,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1213 */ 1246 */
1214 DEBUG2(printk(KERN_INFO 1247 DEBUG2(printk(KERN_INFO
1215 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n", 1248 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1216 ha->host_no, comp_status, scsi_status)); 1249 vha->host_no, comp_status, scsi_status));
1217 1250
1218 cp->result = DID_RESET << 16; 1251 cp->result = DID_RESET << 16;
1219 break; 1252 break;
@@ -1229,25 +1262,25 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1229 if (IS_FWI2_CAPABLE(ha)) { 1262 if (IS_FWI2_CAPABLE(ha)) {
1230 DEBUG2(printk(KERN_INFO 1263 DEBUG2(printk(KERN_INFO
1231 "scsi(%ld:%d:%d:%d): TIMEOUT status detected " 1264 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1232 "0x%x-0x%x\n", ha->host_no, cp->device->channel, 1265 "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1233 cp->device->id, cp->device->lun, comp_status, 1266 cp->device->id, cp->device->lun, comp_status,
1234 scsi_status)); 1267 scsi_status));
1235 break; 1268 break;
1236 } 1269 }
1237 DEBUG2(printk(KERN_INFO 1270 DEBUG2(printk(KERN_INFO
1238 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x " 1271 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1239 "sflags=%x.\n", ha->host_no, cp->device->channel, 1272 "sflags=%x.\n", vha->host_no, cp->device->channel,
1240 cp->device->id, cp->device->lun, comp_status, scsi_status, 1273 cp->device->id, cp->device->lun, comp_status, scsi_status,
1241 le16_to_cpu(sts->status_flags))); 1274 le16_to_cpu(sts->status_flags)));
1242 1275
1243 /* Check to see if logout occurred. */ 1276 /* Check to see if logout occurred. */
1244 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) 1277 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1245 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); 1278 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1246 break; 1279 break;
1247 1280
1248 default: 1281 default:
1249 DEBUG3(printk("scsi(%ld): Error detected (unknown status) " 1282 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1250 "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status)); 1283 "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1251 qla_printk(KERN_INFO, ha, 1284 qla_printk(KERN_INFO, ha,
1252 "Unknown status detected 0x%x-0x%x.\n", 1285 "Unknown status detected 0x%x-0x%x.\n",
1253 comp_status, scsi_status); 1286 comp_status, scsi_status);
@@ -1257,7 +1290,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1257 } 1290 }
1258 1291
1259 /* Place command on done queue. */ 1292 /* Place command on done queue. */
1260 if (ha->status_srb == NULL) 1293 if (vha->status_srb == NULL)
1261 qla2x00_sp_compl(ha, sp); 1294 qla2x00_sp_compl(ha, sp);
1262} 1295}
1263 1296
@@ -1269,10 +1302,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1269 * Extended sense data. 1302 * Extended sense data.
1270 */ 1303 */
1271static void 1304static void
1272qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt) 1305qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1273{ 1306{
1274 uint8_t sense_sz = 0; 1307 uint8_t sense_sz = 0;
1275 srb_t *sp = ha->status_srb; 1308 struct qla_hw_data *ha = vha->hw;
1309 srb_t *sp = vha->status_srb;
1276 struct scsi_cmnd *cp; 1310 struct scsi_cmnd *cp;
1277 1311
1278 if (sp != NULL && sp->request_sense_length != 0) { 1312 if (sp != NULL && sp->request_sense_length != 0) {
@@ -1284,7 +1318,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1284 "cmd is NULL: already returned to OS (sp=%p)\n", 1318 "cmd is NULL: already returned to OS (sp=%p)\n",
1285 sp); 1319 sp);
1286 1320
1287 ha->status_srb = NULL; 1321 vha->status_srb = NULL;
1288 return; 1322 return;
1289 } 1323 }
1290 1324
@@ -1305,7 +1339,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1305 1339
1306 /* Place command on done queue. */ 1340 /* Place command on done queue. */
1307 if (sp->request_sense_length == 0) { 1341 if (sp->request_sense_length == 0) {
1308 ha->status_srb = NULL; 1342 vha->status_srb = NULL;
1309 qla2x00_sp_compl(ha, sp); 1343 qla2x00_sp_compl(ha, sp);
1310 } 1344 }
1311 } 1345 }
@@ -1317,10 +1351,11 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1317 * @pkt: Entry pointer 1351 * @pkt: Entry pointer
1318 */ 1352 */
1319static void 1353static void
1320qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt) 1354qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1321{ 1355{
1322 srb_t *sp; 1356 srb_t *sp;
1323 1357 struct qla_hw_data *ha = vha->hw;
1358 struct req_que *req = rsp->req;
1324#if defined(QL_DEBUG_LEVEL_2) 1359#if defined(QL_DEBUG_LEVEL_2)
1325 if (pkt->entry_status & RF_INV_E_ORDER) 1360 if (pkt->entry_status & RF_INV_E_ORDER)
1326 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1361 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1339,13 +1374,13 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1339 1374
1340 /* Validate handle. */ 1375 /* Validate handle. */
1341 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1376 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1342 sp = ha->outstanding_cmds[pkt->handle]; 1377 sp = req->outstanding_cmds[pkt->handle];
1343 else 1378 else
1344 sp = NULL; 1379 sp = NULL;
1345 1380
1346 if (sp) { 1381 if (sp) {
1347 /* Free outstanding command slot. */ 1382 /* Free outstanding command slot. */
1348 ha->outstanding_cmds[pkt->handle] = NULL; 1383 req->outstanding_cmds[pkt->handle] = NULL;
1349 1384
1350 /* Bad payload or header */ 1385 /* Bad payload or header */
1351 if (pkt->entry_status & 1386 if (pkt->entry_status &
@@ -1362,12 +1397,12 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1362 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1397 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1363 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { 1398 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1364 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1399 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1365 ha->host_no)); 1400 vha->host_no));
1366 qla_printk(KERN_WARNING, ha, 1401 qla_printk(KERN_WARNING, ha,
1367 "Error entry - invalid handle\n"); 1402 "Error entry - invalid handle\n");
1368 1403
1369 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1404 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1370 qla2xxx_wake_dpc(ha); 1405 qla2xxx_wake_dpc(vha);
1371 } 1406 }
1372} 1407}
1373 1408
@@ -1377,10 +1412,11 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1377 * @mb0: Mailbox0 register 1412 * @mb0: Mailbox0 register
1378 */ 1413 */
1379static void 1414static void
1380qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) 1415qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1381{ 1416{
1382 uint16_t cnt; 1417 uint16_t cnt;
1383 uint16_t __iomem *wptr; 1418 uint16_t __iomem *wptr;
1419 struct qla_hw_data *ha = vha->hw;
1384 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1420 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1385 1421
1386 /* Load return mailbox registers. */ 1422 /* Load return mailbox registers. */
@@ -1395,10 +1431,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1395 1431
1396 if (ha->mcp) { 1432 if (ha->mcp) {
1397 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 1433 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1398 __func__, ha->host_no, ha->mcp->mb[0])); 1434 __func__, vha->host_no, ha->mcp->mb[0]));
1399 } else { 1435 } else {
1400 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 1436 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1401 __func__, ha->host_no)); 1437 __func__, vha->host_no));
1402 } 1438 }
1403} 1439}
1404 1440
@@ -1407,30 +1443,33 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1407 * @ha: SCSI driver HA context 1443 * @ha: SCSI driver HA context
1408 */ 1444 */
1409void 1445void
1410qla24xx_process_response_queue(struct scsi_qla_host *ha) 1446qla24xx_process_response_queue(struct rsp_que *rsp)
1411{ 1447{
1412 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1448 struct qla_hw_data *ha = rsp->hw;
1413 struct sts_entry_24xx *pkt; 1449 struct sts_entry_24xx *pkt;
1450 struct scsi_qla_host *vha;
1451
1452 vha = qla2x00_get_rsp_host(rsp);
1414 1453
1415 if (!ha->flags.online) 1454 if (!vha->flags.online)
1416 return; 1455 return;
1417 1456
1418 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { 1457 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1419 pkt = (struct sts_entry_24xx *)ha->response_ring_ptr; 1458 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1420 1459
1421 ha->rsp_ring_index++; 1460 rsp->ring_index++;
1422 if (ha->rsp_ring_index == ha->response_q_length) { 1461 if (rsp->ring_index == rsp->length) {
1423 ha->rsp_ring_index = 0; 1462 rsp->ring_index = 0;
1424 ha->response_ring_ptr = ha->response_ring; 1463 rsp->ring_ptr = rsp->ring;
1425 } else { 1464 } else {
1426 ha->response_ring_ptr++; 1465 rsp->ring_ptr++;
1427 } 1466 }
1428 1467
1429 if (pkt->entry_status != 0) { 1468 if (pkt->entry_status != 0) {
1430 DEBUG3(printk(KERN_INFO 1469 DEBUG3(printk(KERN_INFO
1431 "scsi(%ld): Process error entry.\n", ha->host_no)); 1470 "scsi(%ld): Process error entry.\n", vha->host_no));
1432 1471
1433 qla2x00_error_entry(ha, (sts_entry_t *) pkt); 1472 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1434 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1473 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1435 wmb(); 1474 wmb();
1436 continue; 1475 continue;
@@ -1438,13 +1477,13 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1438 1477
1439 switch (pkt->entry_type) { 1478 switch (pkt->entry_type) {
1440 case STATUS_TYPE: 1479 case STATUS_TYPE:
1441 qla2x00_status_entry(ha, pkt); 1480 qla2x00_status_entry(vha, rsp, pkt);
1442 break; 1481 break;
1443 case STATUS_CONT_TYPE: 1482 case STATUS_CONT_TYPE:
1444 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); 1483 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1445 break; 1484 break;
1446 case VP_RPT_ID_IOCB_TYPE: 1485 case VP_RPT_ID_IOCB_TYPE:
1447 qla24xx_report_id_acquisition(ha, 1486 qla24xx_report_id_acquisition(vha,
1448 (struct vp_rpt_id_entry_24xx *)pkt); 1487 (struct vp_rpt_id_entry_24xx *)pkt);
1449 break; 1488 break;
1450 default: 1489 default:
@@ -1452,7 +1491,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1452 DEBUG4(printk(KERN_WARNING 1491 DEBUG4(printk(KERN_WARNING
1453 "scsi(%ld): Received unknown response pkt type %x " 1492 "scsi(%ld): Received unknown response pkt type %x "
1454 "entry status=%x.\n", 1493 "entry status=%x.\n",
1455 ha->host_no, pkt->entry_type, pkt->entry_status)); 1494 vha->host_no, pkt->entry_type, pkt->entry_status));
1456 break; 1495 break;
1457 } 1496 }
1458 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1497 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1460,14 +1499,15 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1460 } 1499 }
1461 1500
1462 /* Adjust ring index */ 1501 /* Adjust ring index */
1463 WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index); 1502 ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
1464} 1503}
1465 1504
1466static void 1505static void
1467qla2xxx_check_risc_status(scsi_qla_host_t *ha) 1506qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1468{ 1507{
1469 int rval; 1508 int rval;
1470 uint32_t cnt; 1509 uint32_t cnt;
1510 struct qla_hw_data *ha = vha->hw;
1471 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1511 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1472 1512
1473 if (!IS_QLA25XX(ha)) 1513 if (!IS_QLA25XX(ha))
@@ -1521,25 +1561,29 @@ done:
1521irqreturn_t 1561irqreturn_t
1522qla24xx_intr_handler(int irq, void *dev_id) 1562qla24xx_intr_handler(int irq, void *dev_id)
1523{ 1563{
1524 scsi_qla_host_t *ha; 1564 scsi_qla_host_t *vha;
1565 struct qla_hw_data *ha;
1525 struct device_reg_24xx __iomem *reg; 1566 struct device_reg_24xx __iomem *reg;
1526 int status; 1567 int status;
1527 unsigned long iter; 1568 unsigned long iter;
1528 uint32_t stat; 1569 uint32_t stat;
1529 uint32_t hccr; 1570 uint32_t hccr;
1530 uint16_t mb[4]; 1571 uint16_t mb[4];
1572 struct rsp_que *rsp;
1531 1573
1532 ha = (scsi_qla_host_t *) dev_id; 1574 rsp = (struct rsp_que *) dev_id;
1533 if (!ha) { 1575 if (!rsp) {
1534 printk(KERN_INFO 1576 printk(KERN_INFO
1535 "%s(): NULL host pointer\n", __func__); 1577 "%s(): NULL response queue pointer\n", __func__);
1536 return IRQ_NONE; 1578 return IRQ_NONE;
1537 } 1579 }
1538 1580
1581 ha = rsp->hw;
1539 reg = &ha->iobase->isp24; 1582 reg = &ha->iobase->isp24;
1540 status = 0; 1583 status = 0;
1541 1584
1542 spin_lock(&ha->hardware_lock); 1585 spin_lock(&ha->hardware_lock);
1586 vha = qla2x00_get_rsp_host(rsp);
1543 for (iter = 50; iter--; ) { 1587 for (iter = 50; iter--; ) {
1544 stat = RD_REG_DWORD(&reg->host_status); 1588 stat = RD_REG_DWORD(&reg->host_status);
1545 if (stat & HSRX_RISC_PAUSED) { 1589 if (stat & HSRX_RISC_PAUSED) {
@@ -1547,7 +1591,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1547 break; 1591 break;
1548 1592
1549 if (ha->hw_event_pause_errors == 0) 1593 if (ha->hw_event_pause_errors == 0)
1550 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, 1594 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1551 0, MSW(stat), LSW(stat)); 1595 0, MSW(stat), LSW(stat));
1552 else if (ha->hw_event_pause_errors < 0xffffffff) 1596 else if (ha->hw_event_pause_errors < 0xffffffff)
1553 ha->hw_event_pause_errors++; 1597 ha->hw_event_pause_errors++;
@@ -1557,10 +1601,10 @@ qla24xx_intr_handler(int irq, void *dev_id)
1557 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1601 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1558 "Dumping firmware!\n", hccr); 1602 "Dumping firmware!\n", hccr);
1559 1603
1560 qla2xxx_check_risc_status(ha); 1604 qla2xxx_check_risc_status(vha);
1561 1605
1562 ha->isp_ops->fw_dump(ha, 1); 1606 ha->isp_ops->fw_dump(vha, 1);
1563 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1607 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1564 break; 1608 break;
1565 } else if ((stat & HSRX_RISC_INT) == 0) 1609 } else if ((stat & HSRX_RISC_INT) == 0)
1566 break; 1610 break;
@@ -1570,7 +1614,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1570 case 0x2: 1614 case 0x2:
1571 case 0x10: 1615 case 0x10:
1572 case 0x11: 1616 case 0x11:
1573 qla24xx_mbx_completion(ha, MSW(stat)); 1617 qla24xx_mbx_completion(vha, MSW(stat));
1574 status |= MBX_INTERRUPT; 1618 status |= MBX_INTERRUPT;
1575 1619
1576 break; 1620 break;
@@ -1579,15 +1623,16 @@ qla24xx_intr_handler(int irq, void *dev_id)
1579 mb[1] = RD_REG_WORD(&reg->mailbox1); 1623 mb[1] = RD_REG_WORD(&reg->mailbox1);
1580 mb[2] = RD_REG_WORD(&reg->mailbox2); 1624 mb[2] = RD_REG_WORD(&reg->mailbox2);
1581 mb[3] = RD_REG_WORD(&reg->mailbox3); 1625 mb[3] = RD_REG_WORD(&reg->mailbox3);
1582 qla2x00_async_event(ha, mb); 1626 qla2x00_async_event(vha, rsp, mb);
1583 break; 1627 break;
1584 case 0x13: 1628 case 0x13:
1585 qla24xx_process_response_queue(ha); 1629 case 0x14:
1630 qla24xx_process_response_queue(rsp);
1586 break; 1631 break;
1587 default: 1632 default:
1588 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1633 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1589 "(%d).\n", 1634 "(%d).\n",
1590 ha->host_no, stat & 0xff)); 1635 vha->host_no, stat & 0xff));
1591 break; 1636 break;
1592 } 1637 }
1593 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1638 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1607,15 +1652,22 @@ qla24xx_intr_handler(int irq, void *dev_id)
1607static irqreturn_t 1652static irqreturn_t
1608qla24xx_msix_rsp_q(int irq, void *dev_id) 1653qla24xx_msix_rsp_q(int irq, void *dev_id)
1609{ 1654{
1610 scsi_qla_host_t *ha; 1655 struct qla_hw_data *ha;
1656 struct rsp_que *rsp;
1611 struct device_reg_24xx __iomem *reg; 1657 struct device_reg_24xx __iomem *reg;
1612 1658
1613 ha = dev_id; 1659 rsp = (struct rsp_que *) dev_id;
1660 if (!rsp) {
1661 printk(KERN_INFO
1662 "%s(): NULL response queue pointer\n", __func__);
1663 return IRQ_NONE;
1664 }
1665 ha = rsp->hw;
1614 reg = &ha->iobase->isp24; 1666 reg = &ha->iobase->isp24;
1615 1667
1616 spin_lock_irq(&ha->hardware_lock); 1668 spin_lock_irq(&ha->hardware_lock);
1617 1669
1618 qla24xx_process_response_queue(ha); 1670 qla24xx_process_response_queue(rsp);
1619 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1671 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1620 1672
1621 spin_unlock_irq(&ha->hardware_lock); 1673 spin_unlock_irq(&ha->hardware_lock);
@@ -1624,20 +1676,64 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1624} 1676}
1625 1677
1626static irqreturn_t 1678static irqreturn_t
1679qla25xx_msix_rsp_q(int irq, void *dev_id)
1680{
1681 struct qla_hw_data *ha;
1682 struct rsp_que *rsp;
1683 struct device_reg_24xx __iomem *reg;
1684 uint16_t msix_disabled_hccr = 0;
1685
1686 rsp = (struct rsp_que *) dev_id;
1687 if (!rsp) {
1688 printk(KERN_INFO
1689 "%s(): NULL response queue pointer\n", __func__);
1690 return IRQ_NONE;
1691 }
1692 ha = rsp->hw;
1693 reg = &ha->iobase->isp24;
1694
1695 spin_lock_irq(&ha->hardware_lock);
1696
1697 msix_disabled_hccr = rsp->options;
1698 if (!rsp->id)
1699 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
1700 else
1701 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6);
1702
1703 qla24xx_process_response_queue(rsp);
1704
1705 if (!msix_disabled_hccr)
1706 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1707
1708 spin_unlock_irq(&ha->hardware_lock);
1709
1710 return IRQ_HANDLED;
1711}
1712
1713static irqreturn_t
1627qla24xx_msix_default(int irq, void *dev_id) 1714qla24xx_msix_default(int irq, void *dev_id)
1628{ 1715{
1629 scsi_qla_host_t *ha; 1716 scsi_qla_host_t *vha;
1717 struct qla_hw_data *ha;
1718 struct rsp_que *rsp;
1630 struct device_reg_24xx __iomem *reg; 1719 struct device_reg_24xx __iomem *reg;
1631 int status; 1720 int status;
1632 uint32_t stat; 1721 uint32_t stat;
1633 uint32_t hccr; 1722 uint32_t hccr;
1634 uint16_t mb[4]; 1723 uint16_t mb[4];
1635 1724
1636 ha = dev_id; 1725 rsp = (struct rsp_que *) dev_id;
1726 if (!rsp) {
1727 DEBUG(printk(
1728 "%s(): NULL response queue pointer\n", __func__));
1729 return IRQ_NONE;
1730 }
1731 ha = rsp->hw;
1637 reg = &ha->iobase->isp24; 1732 reg = &ha->iobase->isp24;
1638 status = 0; 1733 status = 0;
1639 1734
1640 spin_lock_irq(&ha->hardware_lock); 1735 spin_lock_irq(&ha->hardware_lock);
1736 vha = qla2x00_get_rsp_host(rsp);
1641 do { 1737 do {
1642 stat = RD_REG_DWORD(&reg->host_status); 1738 stat = RD_REG_DWORD(&reg->host_status);
1643 if (stat & HSRX_RISC_PAUSED) { 1739 if (stat & HSRX_RISC_PAUSED) {
@@ -1645,7 +1741,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1645 break; 1741 break;
1646 1742
1647 if (ha->hw_event_pause_errors == 0) 1743 if (ha->hw_event_pause_errors == 0)
1648 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, 1744 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1649 0, MSW(stat), LSW(stat)); 1745 0, MSW(stat), LSW(stat));
1650 else if (ha->hw_event_pause_errors < 0xffffffff) 1746 else if (ha->hw_event_pause_errors < 0xffffffff)
1651 ha->hw_event_pause_errors++; 1747 ha->hw_event_pause_errors++;
@@ -1655,10 +1751,10 @@ qla24xx_msix_default(int irq, void *dev_id)
1655 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1751 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1656 "Dumping firmware!\n", hccr); 1752 "Dumping firmware!\n", hccr);
1657 1753
1658 qla2xxx_check_risc_status(ha); 1754 qla2xxx_check_risc_status(vha);
1659 1755
1660 ha->isp_ops->fw_dump(ha, 1); 1756 ha->isp_ops->fw_dump(vha, 1);
1661 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1757 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1662 break; 1758 break;
1663 } else if ((stat & HSRX_RISC_INT) == 0) 1759 } else if ((stat & HSRX_RISC_INT) == 0)
1664 break; 1760 break;
@@ -1668,7 +1764,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1668 case 0x2: 1764 case 0x2:
1669 case 0x10: 1765 case 0x10:
1670 case 0x11: 1766 case 0x11:
1671 qla24xx_mbx_completion(ha, MSW(stat)); 1767 qla24xx_mbx_completion(vha, MSW(stat));
1672 status |= MBX_INTERRUPT; 1768 status |= MBX_INTERRUPT;
1673 1769
1674 break; 1770 break;
@@ -1677,15 +1773,16 @@ qla24xx_msix_default(int irq, void *dev_id)
1677 mb[1] = RD_REG_WORD(&reg->mailbox1); 1773 mb[1] = RD_REG_WORD(&reg->mailbox1);
1678 mb[2] = RD_REG_WORD(&reg->mailbox2); 1774 mb[2] = RD_REG_WORD(&reg->mailbox2);
1679 mb[3] = RD_REG_WORD(&reg->mailbox3); 1775 mb[3] = RD_REG_WORD(&reg->mailbox3);
1680 qla2x00_async_event(ha, mb); 1776 qla2x00_async_event(vha, rsp, mb);
1681 break; 1777 break;
1682 case 0x13: 1778 case 0x13:
1683 qla24xx_process_response_queue(ha); 1779 case 0x14:
1780 qla24xx_process_response_queue(rsp);
1684 break; 1781 break;
1685 default: 1782 default:
1686 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1783 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1687 "(%d).\n", 1784 "(%d).\n",
1688 ha->host_no, stat & 0xff)); 1785 vha->host_no, stat & 0xff));
1689 break; 1786 break;
1690 } 1787 }
1691 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1788 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1710,70 +1807,138 @@ struct qla_init_msix_entry {
1710 irq_handler_t handler; 1807 irq_handler_t handler;
1711}; 1808};
1712 1809
1713static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = { 1810static struct qla_init_msix_entry base_queue = {
1714 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT, 1811 .entry = 0,
1715 "qla2xxx (default)", qla24xx_msix_default }, 1812 .index = 0,
1813 .name = "qla2xxx (default)",
1814 .handler = qla24xx_msix_default,
1815};
1816
1817static struct qla_init_msix_entry base_rsp_queue = {
1818 .entry = 1,
1819 .index = 1,
1820 .name = "qla2xxx (rsp_q)",
1821 .handler = qla24xx_msix_rsp_q,
1822};
1716 1823
1717 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q, 1824static struct qla_init_msix_entry multi_rsp_queue = {
1718 "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 1825 .entry = 1,
1826 .index = 1,
1827 .name = "qla2xxx (multi_q)",
1828 .handler = qla25xx_msix_rsp_q,
1719}; 1829};
1720 1830
1721static void 1831static void
1722qla24xx_disable_msix(scsi_qla_host_t *ha) 1832qla24xx_disable_msix(struct qla_hw_data *ha)
1723{ 1833{
1724 int i; 1834 int i;
1725 struct qla_msix_entry *qentry; 1835 struct qla_msix_entry *qentry;
1726 1836
1727 for (i = 0; i < QLA_MSIX_ENTRIES; i++) { 1837 for (i = 0; i < ha->msix_count; i++) {
1728 qentry = &ha->msix_entries[imsix_entries[i].index]; 1838 qentry = &ha->msix_entries[i];
1729 if (qentry->have_irq) 1839 if (qentry->have_irq)
1730 free_irq(qentry->msix_vector, ha); 1840 free_irq(qentry->vector, qentry->rsp);
1731 } 1841 }
1732 pci_disable_msix(ha->pdev); 1842 pci_disable_msix(ha->pdev);
1843 kfree(ha->msix_entries);
1844 ha->msix_entries = NULL;
1845 ha->flags.msix_enabled = 0;
1733} 1846}
1734 1847
1735static int 1848static int
1736qla24xx_enable_msix(scsi_qla_host_t *ha) 1849qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1737{ 1850{
1738 int i, ret; 1851 int i, ret;
1739 struct msix_entry entries[QLA_MSIX_ENTRIES]; 1852 struct msix_entry *entries;
1740 struct qla_msix_entry *qentry; 1853 struct qla_msix_entry *qentry;
1854 struct qla_init_msix_entry *msix_queue;
1741 1855
1742 for (i = 0; i < QLA_MSIX_ENTRIES; i++) 1856 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1743 entries[i].entry = imsix_entries[i].entry; 1857 GFP_KERNEL);
1858 if (!entries)
1859 return -ENOMEM;
1744 1860
1745 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries)); 1861 for (i = 0; i < ha->msix_count; i++)
1862 entries[i].entry = i;
1863
1864 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1746 if (ret) { 1865 if (ret) {
1747 qla_printk(KERN_WARNING, ha, 1866 qla_printk(KERN_WARNING, ha,
1748 "MSI-X: Failed to enable support -- %d/%d\n", 1867 "MSI-X: Failed to enable support -- %d/%d\n"
1749 QLA_MSIX_ENTRIES, ret); 1868 " Retry with %d vectors\n", ha->msix_count, ret, ret);
1869 ha->msix_count = ret;
1870 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1871 if (ret) {
1872 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1873 " support, giving up -- %d/%d\n",
1874 ha->msix_count, ret);
1875 goto msix_out;
1876 }
1877 ha->max_queues = ha->msix_count - 1;
1878 }
1879 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1880 ha->msix_count, GFP_KERNEL);
1881 if (!ha->msix_entries) {
1882 ret = -ENOMEM;
1750 goto msix_out; 1883 goto msix_out;
1751 } 1884 }
1752 ha->flags.msix_enabled = 1; 1885 ha->flags.msix_enabled = 1;
1753 1886
1754 for (i = 0; i < QLA_MSIX_ENTRIES; i++) { 1887 for (i = 0; i < ha->msix_count; i++) {
1755 qentry = &ha->msix_entries[imsix_entries[i].index]; 1888 qentry = &ha->msix_entries[i];
1756 qentry->msix_vector = entries[i].vector; 1889 qentry->vector = entries[i].vector;
1757 qentry->msix_entry = entries[i].entry; 1890 qentry->entry = entries[i].entry;
1758 qentry->have_irq = 0; 1891 qentry->have_irq = 0;
1759 ret = request_irq(qentry->msix_vector, 1892 qentry->rsp = NULL;
1760 imsix_entries[i].handler, 0, imsix_entries[i].name, ha); 1893 }
1761 if (ret) { 1894
1762 qla_printk(KERN_WARNING, ha, 1895 /* Enable MSI-X for AENs for queue 0 */
1763 "MSI-X: Unable to register handler -- %x/%d.\n", 1896 qentry = &ha->msix_entries[0];
1764 imsix_entries[i].index, ret); 1897 ret = request_irq(qentry->vector, base_queue.handler, 0,
1765 qla24xx_disable_msix(ha); 1898 base_queue.name, rsp);
1766 goto msix_out; 1899 if (ret) {
1767 } 1900 qla_printk(KERN_WARNING, ha,
1768 qentry->have_irq = 1; 1901 "MSI-X: Unable to register handler -- %x/%d.\n",
1902 qentry->vector, ret);
1903 qla24xx_disable_msix(ha);
1904 goto msix_out;
1769 } 1905 }
1906 qentry->have_irq = 1;
1907 qentry->rsp = rsp;
1908
1909 /* Enable MSI-X vector for response queue update for queue 0 */
1910 if (ha->max_queues > 1 && ha->mqiobase) {
1911 ha->mqenable = 1;
1912 msix_queue = &multi_rsp_queue;
1913 qla_printk(KERN_INFO, ha,
1914 "MQ enabled, Number of Queue Resources: %d \n",
1915 ha->max_queues);
1916 } else {
1917 ha->mqenable = 0;
1918 msix_queue = &base_rsp_queue;
1919 }
1920
1921 qentry = &ha->msix_entries[1];
1922 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1923 msix_queue->name, rsp);
1924 if (ret) {
1925 qla_printk(KERN_WARNING, ha,
1926 "MSI-X: Unable to register handler -- %x/%d.\n",
1927 qentry->vector, ret);
1928 qla24xx_disable_msix(ha);
1929 ha->mqenable = 0;
1930 goto msix_out;
1931 }
1932 qentry->have_irq = 1;
1933 qentry->rsp = rsp;
1770 1934
1771msix_out: 1935msix_out:
1936 kfree(entries);
1772 return ret; 1937 return ret;
1773} 1938}
1774 1939
1775int 1940int
1776qla2x00_request_irqs(scsi_qla_host_t *ha) 1941qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1777{ 1942{
1778 int ret; 1943 int ret;
1779 device_reg_t __iomem *reg = ha->iobase; 1944 device_reg_t __iomem *reg = ha->iobase;
@@ -1782,11 +1947,11 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1782 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) 1947 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1783 goto skip_msix; 1948 goto skip_msix;
1784 1949
1785 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX || 1950 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1786 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 1951 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1787 DEBUG2(qla_printk(KERN_WARNING, ha, 1952 DEBUG2(qla_printk(KERN_WARNING, ha,
1788 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 1953 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1789 ha->chip_revision, ha->fw_attributes)); 1954 ha->pdev->revision, ha->fw_attributes));
1790 1955
1791 goto skip_msix; 1956 goto skip_msix;
1792 } 1957 }
@@ -1803,7 +1968,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1803 goto skip_msi; 1968 goto skip_msi;
1804 } 1969 }
1805 1970
1806 ret = qla24xx_enable_msix(ha); 1971 ret = qla24xx_enable_msix(ha, rsp);
1807 if (!ret) { 1972 if (!ret) {
1808 DEBUG2(qla_printk(KERN_INFO, ha, 1973 DEBUG2(qla_printk(KERN_INFO, ha,
1809 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 1974 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
@@ -1825,7 +1990,7 @@ skip_msix:
1825skip_msi: 1990skip_msi:
1826 1991
1827 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1992 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1828 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1993 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
1829 if (ret) { 1994 if (ret) {
1830 qla_printk(KERN_WARNING, ha, 1995 qla_printk(KERN_WARNING, ha,
1831 "Failed to reserve interrupt %d already in use.\n", 1996 "Failed to reserve interrupt %d already in use.\n",
@@ -1833,10 +1998,8 @@ skip_msi:
1833 goto fail; 1998 goto fail;
1834 } 1999 }
1835 ha->flags.inta_enabled = 1; 2000 ha->flags.inta_enabled = 1;
1836 ha->host->irq = ha->pdev->irq;
1837clear_risc_ints: 2001clear_risc_ints:
1838 2002
1839 ha->isp_ops->disable_intrs(ha);
1840 spin_lock_irq(&ha->hardware_lock); 2003 spin_lock_irq(&ha->hardware_lock);
1841 if (IS_FWI2_CAPABLE(ha)) { 2004 if (IS_FWI2_CAPABLE(ha)) {
1842 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); 2005 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
@@ -1853,13 +2016,74 @@ fail:
1853} 2016}
1854 2017
1855void 2018void
1856qla2x00_free_irqs(scsi_qla_host_t *ha) 2019qla2x00_free_irqs(scsi_qla_host_t *vha)
1857{ 2020{
2021 struct qla_hw_data *ha = vha->hw;
2022 struct rsp_que *rsp = ha->rsp_q_map[0];
1858 2023
1859 if (ha->flags.msix_enabled) 2024 if (ha->flags.msix_enabled)
1860 qla24xx_disable_msix(ha); 2025 qla24xx_disable_msix(ha);
1861 else if (ha->flags.inta_enabled) { 2026 else if (ha->flags.inta_enabled) {
1862 free_irq(ha->host->irq, ha); 2027 free_irq(ha->pdev->irq, rsp);
1863 pci_disable_msi(ha->pdev); 2028 pci_disable_msi(ha->pdev);
1864 } 2029 }
1865} 2030}
2031
2032static struct scsi_qla_host *
2033qla2x00_get_rsp_host(struct rsp_que *rsp)
2034{
2035 srb_t *sp;
2036 struct qla_hw_data *ha = rsp->hw;
2037 struct scsi_qla_host *vha = NULL;
2038 struct sts_entry_24xx *pkt;
2039 struct req_que *req;
2040
2041 if (rsp->id) {
2042 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2043 req = rsp->req;
2044 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2045 sp = req->outstanding_cmds[pkt->handle];
2046 if (sp)
2047 vha = sp->vha;
2048 }
2049 }
2050 if (!vha)
2051 /* handle it in base queue */
2052 vha = pci_get_drvdata(ha->pdev);
2053
2054 return vha;
2055}
2056
2057int qla25xx_request_irq(struct rsp_que *rsp)
2058{
2059 struct qla_hw_data *ha = rsp->hw;
2060 struct qla_init_msix_entry *intr = &multi_rsp_queue;
2061 struct qla_msix_entry *msix = rsp->msix;
2062 int ret;
2063
2064 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2065 if (ret) {
2066 qla_printk(KERN_WARNING, ha,
2067 "MSI-X: Unable to register handler -- %x/%d.\n",
2068 msix->vector, ret);
2069 return ret;
2070 }
2071 msix->have_irq = 1;
2072 msix->rsp = rsp;
2073 return ret;
2074}
2075
2076void
2077qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2078{
2079 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
2080 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
2081}
2082
2083void
2084qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2085{
2086 device_reg_t __iomem *reg = (void *) ha->iobase;
2087 WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
2088}
2089
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3402746ec128..a99976f5fabd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -29,7 +29,7 @@
29 * Kernel context. 29 * Kernel context.
30 */ 30 */
31static int 31static int
32qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp) 32qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
33{ 33{
34 int rval; 34 int rval;
35 unsigned long flags = 0; 35 unsigned long flags = 0;
@@ -42,15 +42,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
42 uint32_t cnt; 42 uint32_t cnt;
43 uint32_t mboxes; 43 uint32_t mboxes;
44 unsigned long wait_time; 44 unsigned long wait_time;
45 scsi_qla_host_t *ha = to_qla_parent(pvha); 45 struct qla_hw_data *ha = vha->hw;
46 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
46 47
47 reg = ha->iobase; 48 reg = ha->iobase;
48 io_lock_on = ha->flags.init_done; 49 io_lock_on = base_vha->flags.init_done;
49 50
50 rval = QLA_SUCCESS; 51 rval = QLA_SUCCESS;
51 abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 52 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
52 53
53 DEBUG11(printk("%s(%ld): entered.\n", __func__, pvha->host_no)); 54 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
54 55
55 /* 56 /*
56 * Wait for active mailbox commands to finish by waiting at most tov 57 * Wait for active mailbox commands to finish by waiting at most tov
@@ -62,7 +63,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
62 mcp->tov * HZ)) { 63 mcp->tov * HZ)) {
63 /* Timeout occurred. Return error. */ 64 /* Timeout occurred. Return error. */
64 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " 65 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
65 "Exiting.\n", __func__, ha->host_no)); 66 "Exiting.\n", __func__, base_vha->host_no));
66 return QLA_FUNCTION_TIMEOUT; 67 return QLA_FUNCTION_TIMEOUT;
67 } 68 }
68 } 69 }
@@ -72,7 +73,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
72 ha->mcp = mcp; 73 ha->mcp = mcp;
73 74
74 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", 75 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n",
75 ha->host_no, mcp->mb[0])); 76 base_vha->host_no, mcp->mb[0]));
76 77
77 spin_lock_irqsave(&ha->hardware_lock, flags); 78 spin_lock_irqsave(&ha->hardware_lock, flags);
78 79
@@ -100,15 +101,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
100 101
101#if defined(QL_DEBUG_LEVEL_1) 102#if defined(QL_DEBUG_LEVEL_1)
102 printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n", 103 printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n",
103 __func__, ha->host_no); 104 __func__, base_vha->host_no);
104 qla2x00_dump_buffer((uint8_t *)mcp->mb, 16); 105 qla2x00_dump_buffer((uint8_t *)mcp->mb, 16);
105 printk("\n"); 106 printk("\n");
106 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16); 107 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16);
107 printk("\n"); 108 printk("\n");
108 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8); 109 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8);
109 printk("\n"); 110 printk("\n");
110 printk("%s(%ld): I/O address = %p.\n", __func__, ha->host_no, optr); 111 printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no,
111 qla2x00_dump_regs(ha); 112 optr);
113 qla2x00_dump_regs(base_vha);
112#endif 114#endif
113 115
114 /* Issue set host interrupt command to send cmd out. */ 116 /* Issue set host interrupt command to send cmd out. */
@@ -117,7 +119,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
117 119
118 /* Unlock mbx registers and wait for interrupt */ 120 /* Unlock mbx registers and wait for interrupt */
119 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. " 121 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. "
120 "jiffies=%lx.\n", __func__, ha->host_no, jiffies)); 122 "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies));
121 123
122 /* Wait for mbx cmd completion until timeout */ 124 /* Wait for mbx cmd completion until timeout */
123 125
@@ -137,7 +139,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
137 139
138 } else { 140 } else {
139 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 141 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
140 ha->host_no, command)); 142 base_vha->host_no, command));
141 143
142 if (IS_FWI2_CAPABLE(ha)) 144 if (IS_FWI2_CAPABLE(ha))
143 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 145 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
@@ -151,7 +153,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
151 break; 153 break;
152 154
153 /* Check for pending interrupts. */ 155 /* Check for pending interrupts. */
154 qla2x00_poll(ha); 156 qla2x00_poll(ha->rsp_q_map[0]);
155 157
156 if (command != MBC_LOAD_RISC_RAM_EXTENDED && 158 if (command != MBC_LOAD_RISC_RAM_EXTENDED &&
157 !ha->flags.mbox_int) 159 !ha->flags.mbox_int)
@@ -164,7 +166,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
164 uint16_t *iptr2; 166 uint16_t *iptr2;
165 167
166 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__, 168 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__,
167 ha->host_no, command)); 169 base_vha->host_no, command));
168 170
169 /* Got interrupt. Clear the flag. */ 171 /* Got interrupt. Clear the flag. */
170 ha->flags.mbox_int = 0; 172 ha->flags.mbox_int = 0;
@@ -200,12 +202,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
200 ictrl = RD_REG_WORD(&reg->isp.ictrl); 202 ictrl = RD_REG_WORD(&reg->isp.ictrl);
201 } 203 }
202 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", 204 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n",
203 __func__, ha->host_no, command); 205 __func__, base_vha->host_no, command);
204 printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__, 206 printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__,
205 ha->host_no, ictrl, jiffies); 207 base_vha->host_no, ictrl, jiffies);
206 printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__, 208 printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__,
207 ha->host_no, mb0); 209 base_vha->host_no, mb0);
208 qla2x00_dump_regs(ha); 210 qla2x00_dump_regs(base_vha);
209#endif 211#endif
210 212
211 rval = QLA_FUNCTION_TIMEOUT; 213 rval = QLA_FUNCTION_TIMEOUT;
@@ -218,10 +220,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
218 220
219 if (abort_active || !io_lock_on) { 221 if (abort_active || !io_lock_on) {
220 DEBUG11(printk("%s(%ld): checking for additional resp " 222 DEBUG11(printk("%s(%ld): checking for additional resp "
221 "interrupt.\n", __func__, ha->host_no)); 223 "interrupt.\n", __func__, base_vha->host_no));
222 224
223 /* polling mode for non isp_abort commands. */ 225 /* polling mode for non isp_abort commands. */
224 qla2x00_poll(ha); 226 qla2x00_poll(ha->rsp_q_map[0]);
225 } 227 }
226 228
227 if (rval == QLA_FUNCTION_TIMEOUT && 229 if (rval == QLA_FUNCTION_TIMEOUT &&
@@ -229,35 +231,37 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
229 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) { 231 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) {
230 /* not in dpc. schedule it for dpc to take over. */ 232 /* not in dpc. schedule it for dpc to take over. */
231 DEBUG(printk("%s(%ld): timeout schedule " 233 DEBUG(printk("%s(%ld): timeout schedule "
232 "isp_abort_needed.\n", __func__, ha->host_no)); 234 "isp_abort_needed.\n", __func__,
235 base_vha->host_no));
233 DEBUG2_3_11(printk("%s(%ld): timeout schedule " 236 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
234 "isp_abort_needed.\n", __func__, ha->host_no)); 237 "isp_abort_needed.\n", __func__,
238 base_vha->host_no));
235 qla_printk(KERN_WARNING, ha, 239 qla_printk(KERN_WARNING, ha,
236 "Mailbox command timeout occurred. Scheduling ISP " 240 "Mailbox command timeout occurred. Scheduling ISP "
237 "abort.\n"); 241 "abort.\n");
238 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 242 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
239 qla2xxx_wake_dpc(ha); 243 qla2xxx_wake_dpc(vha);
240 } else if (!abort_active) { 244 } else if (!abort_active) {
241 /* call abort directly since we are in the DPC thread */ 245 /* call abort directly since we are in the DPC thread */
242 DEBUG(printk("%s(%ld): timeout calling abort_isp\n", 246 DEBUG(printk("%s(%ld): timeout calling abort_isp\n",
243 __func__, ha->host_no)); 247 __func__, base_vha->host_no));
244 DEBUG2_3_11(printk("%s(%ld): timeout calling " 248 DEBUG2_3_11(printk("%s(%ld): timeout calling "
245 "abort_isp\n", __func__, ha->host_no)); 249 "abort_isp\n", __func__, base_vha->host_no));
246 qla_printk(KERN_WARNING, ha, 250 qla_printk(KERN_WARNING, ha,
247 "Mailbox command timeout occurred. Issuing ISP " 251 "Mailbox command timeout occurred. Issuing ISP "
248 "abort.\n"); 252 "abort.\n");
249 253
250 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 254 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
251 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 255 clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
252 if (qla2x00_abort_isp(ha)) { 256 if (qla2x00_abort_isp(base_vha)) {
253 /* Failed. retry later. */ 257 /* Failed. retry later. */
254 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 258 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
255 } 259 }
256 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 260 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
257 DEBUG(printk("%s(%ld): finished abort_isp\n", __func__, 261 DEBUG(printk("%s(%ld): finished abort_isp\n", __func__,
258 ha->host_no)); 262 base_vha->host_no));
259 DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n", 263 DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n",
260 __func__, ha->host_no)); 264 __func__, base_vha->host_no));
261 } 265 }
262 } 266 }
263 267
@@ -267,24 +271,26 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
267 271
268 if (rval) { 272 if (rval) {
269 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " 273 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
270 "mbx2=%x, cmd=%x ****\n", __func__, ha->host_no, 274 "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no,
271 mcp->mb[0], mcp->mb[1], mcp->mb[2], command)); 275 mcp->mb[0], mcp->mb[1], mcp->mb[2], command));
272 } else { 276 } else {
273 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 277 DEBUG11(printk("%s(%ld): done.\n", __func__,
278 base_vha->host_no));
274 } 279 }
275 280
276 return rval; 281 return rval;
277} 282}
278 283
279int 284int
280qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr, 285qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
281 uint32_t risc_code_size) 286 uint32_t risc_code_size)
282{ 287{
283 int rval; 288 int rval;
289 struct qla_hw_data *ha = vha->hw;
284 mbx_cmd_t mc; 290 mbx_cmd_t mc;
285 mbx_cmd_t *mcp = &mc; 291 mbx_cmd_t *mcp = &mc;
286 292
287 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 293 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
288 294
289 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 295 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
290 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 296 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -312,13 +318,13 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
312 mcp->in_mb = MBX_0; 318 mcp->in_mb = MBX_0;
313 mcp->tov = MBX_TOV_SECONDS; 319 mcp->tov = MBX_TOV_SECONDS;
314 mcp->flags = 0; 320 mcp->flags = 0;
315 rval = qla2x00_mailbox_command(ha, mcp); 321 rval = qla2x00_mailbox_command(vha, mcp);
316 322
317 if (rval != QLA_SUCCESS) { 323 if (rval != QLA_SUCCESS) {
318 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 324 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
319 ha->host_no, rval, mcp->mb[0])); 325 vha->host_no, rval, mcp->mb[0]));
320 } else { 326 } else {
321 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 327 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
322 } 328 }
323 329
324 return rval; 330 return rval;
@@ -340,13 +346,14 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
340 * Kernel context. 346 * Kernel context.
341 */ 347 */
342int 348int
343qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr) 349qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
344{ 350{
345 int rval; 351 int rval;
352 struct qla_hw_data *ha = vha->hw;
346 mbx_cmd_t mc; 353 mbx_cmd_t mc;
347 mbx_cmd_t *mcp = &mc; 354 mbx_cmd_t *mcp = &mc;
348 355
349 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 356 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
350 357
351 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 358 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
352 mcp->out_mb = MBX_0; 359 mcp->out_mb = MBX_0;
@@ -369,18 +376,18 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
369 376
370 mcp->tov = MBX_TOV_SECONDS; 377 mcp->tov = MBX_TOV_SECONDS;
371 mcp->flags = 0; 378 mcp->flags = 0;
372 rval = qla2x00_mailbox_command(ha, mcp); 379 rval = qla2x00_mailbox_command(vha, mcp);
373 380
374 if (rval != QLA_SUCCESS) { 381 if (rval != QLA_SUCCESS) {
375 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 382 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
376 ha->host_no, rval, mcp->mb[0])); 383 vha->host_no, rval, mcp->mb[0]));
377 } else { 384 } else {
378 if (IS_FWI2_CAPABLE(ha)) { 385 if (IS_FWI2_CAPABLE(ha)) {
379 DEBUG11(printk("%s(%ld): done exchanges=%x.\n", 386 DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
380 __func__, ha->host_no, mcp->mb[1])); 387 __func__, vha->host_no, mcp->mb[1]));
381 } else { 388 } else {
382 DEBUG11(printk("%s(%ld): done.\n", __func__, 389 DEBUG11(printk("%s(%ld): done.\n", __func__,
383 ha->host_no)); 390 vha->host_no));
384 } 391 }
385 } 392 }
386 393
@@ -404,28 +411,28 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
404 * Kernel context. 411 * Kernel context.
405 */ 412 */
406void 413void
407qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor, 414qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
408 uint16_t *subminor, uint16_t *attributes, uint32_t *memory) 415 uint16_t *subminor, uint16_t *attributes, uint32_t *memory)
409{ 416{
410 int rval; 417 int rval;
411 mbx_cmd_t mc; 418 mbx_cmd_t mc;
412 mbx_cmd_t *mcp = &mc; 419 mbx_cmd_t *mcp = &mc;
413 420
414 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 421 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
415 422
416 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 423 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
417 mcp->out_mb = MBX_0; 424 mcp->out_mb = MBX_0;
418 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 425 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
419 mcp->flags = 0; 426 mcp->flags = 0;
420 mcp->tov = MBX_TOV_SECONDS; 427 mcp->tov = MBX_TOV_SECONDS;
421 rval = qla2x00_mailbox_command(ha, mcp); 428 rval = qla2x00_mailbox_command(vha, mcp);
422 429
423 /* Return mailbox data. */ 430 /* Return mailbox data. */
424 *major = mcp->mb[1]; 431 *major = mcp->mb[1];
425 *minor = mcp->mb[2]; 432 *minor = mcp->mb[2];
426 *subminor = mcp->mb[3]; 433 *subminor = mcp->mb[3];
427 *attributes = mcp->mb[6]; 434 *attributes = mcp->mb[6];
428 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 435 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
429 *memory = 0x1FFFF; /* Defaults to 128KB. */ 436 *memory = 0x1FFFF; /* Defaults to 128KB. */
430 else 437 else
431 *memory = (mcp->mb[5] << 16) | mcp->mb[4]; 438 *memory = (mcp->mb[5] << 16) | mcp->mb[4];
@@ -433,10 +440,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
433 if (rval != QLA_SUCCESS) { 440 if (rval != QLA_SUCCESS) {
434 /*EMPTY*/ 441 /*EMPTY*/
435 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 442 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
436 ha->host_no, rval)); 443 vha->host_no, rval));
437 } else { 444 } else {
438 /*EMPTY*/ 445 /*EMPTY*/
439 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 446 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
440 } 447 }
441} 448}
442 449
@@ -455,32 +462,32 @@ qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
455 * Kernel context. 462 * Kernel context.
456 */ 463 */
457int 464int
458qla2x00_get_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts) 465qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
459{ 466{
460 int rval; 467 int rval;
461 mbx_cmd_t mc; 468 mbx_cmd_t mc;
462 mbx_cmd_t *mcp = &mc; 469 mbx_cmd_t *mcp = &mc;
463 470
464 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 471 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
465 472
466 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 473 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
467 mcp->out_mb = MBX_0; 474 mcp->out_mb = MBX_0;
468 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 475 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
469 mcp->tov = MBX_TOV_SECONDS; 476 mcp->tov = MBX_TOV_SECONDS;
470 mcp->flags = 0; 477 mcp->flags = 0;
471 rval = qla2x00_mailbox_command(ha, mcp); 478 rval = qla2x00_mailbox_command(vha, mcp);
472 479
473 if (rval != QLA_SUCCESS) { 480 if (rval != QLA_SUCCESS) {
474 /*EMPTY*/ 481 /*EMPTY*/
475 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 482 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
476 ha->host_no, rval)); 483 vha->host_no, rval));
477 } else { 484 } else {
478 fwopts[0] = mcp->mb[0]; 485 fwopts[0] = mcp->mb[0];
479 fwopts[1] = mcp->mb[1]; 486 fwopts[1] = mcp->mb[1];
480 fwopts[2] = mcp->mb[2]; 487 fwopts[2] = mcp->mb[2];
481 fwopts[3] = mcp->mb[3]; 488 fwopts[3] = mcp->mb[3];
482 489
483 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 490 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
484 } 491 }
485 492
486 return rval; 493 return rval;
@@ -502,13 +509,13 @@ qla2x00_get_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
502 * Kernel context. 509 * Kernel context.
503 */ 510 */
504int 511int
505qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts) 512qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
506{ 513{
507 int rval; 514 int rval;
508 mbx_cmd_t mc; 515 mbx_cmd_t mc;
509 mbx_cmd_t *mcp = &mc; 516 mbx_cmd_t *mcp = &mc;
510 517
511 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 518 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
512 519
513 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 520 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
514 mcp->mb[1] = fwopts[1]; 521 mcp->mb[1] = fwopts[1];
@@ -516,7 +523,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
516 mcp->mb[3] = fwopts[3]; 523 mcp->mb[3] = fwopts[3];
517 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 524 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
518 mcp->in_mb = MBX_0; 525 mcp->in_mb = MBX_0;
519 if (IS_FWI2_CAPABLE(ha)) { 526 if (IS_FWI2_CAPABLE(vha->hw)) {
520 mcp->in_mb |= MBX_1; 527 mcp->in_mb |= MBX_1;
521 } else { 528 } else {
522 mcp->mb[10] = fwopts[10]; 529 mcp->mb[10] = fwopts[10];
@@ -526,17 +533,17 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
526 } 533 }
527 mcp->tov = MBX_TOV_SECONDS; 534 mcp->tov = MBX_TOV_SECONDS;
528 mcp->flags = 0; 535 mcp->flags = 0;
529 rval = qla2x00_mailbox_command(ha, mcp); 536 rval = qla2x00_mailbox_command(vha, mcp);
530 537
531 fwopts[0] = mcp->mb[0]; 538 fwopts[0] = mcp->mb[0];
532 539
533 if (rval != QLA_SUCCESS) { 540 if (rval != QLA_SUCCESS) {
534 /*EMPTY*/ 541 /*EMPTY*/
535 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__, 542 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__,
536 ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 543 vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
537 } else { 544 } else {
538 /*EMPTY*/ 545 /*EMPTY*/
539 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 546 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
540 } 547 }
541 548
542 return rval; 549 return rval;
@@ -558,13 +565,14 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
558 * Kernel context. 565 * Kernel context.
559 */ 566 */
560int 567int
561qla2x00_mbx_reg_test(scsi_qla_host_t *ha) 568qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
562{ 569{
563 int rval; 570 int rval;
571 struct qla_hw_data *ha = vha->hw;
564 mbx_cmd_t mc; 572 mbx_cmd_t mc;
565 mbx_cmd_t *mcp = &mc; 573 mbx_cmd_t *mcp = &mc;
566 574
567 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", ha->host_no)); 575 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no));
568 576
569 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 577 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
570 mcp->mb[1] = 0xAAAA; 578 mcp->mb[1] = 0xAAAA;
@@ -578,7 +586,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
578 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 586 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
579 mcp->tov = MBX_TOV_SECONDS; 587 mcp->tov = MBX_TOV_SECONDS;
580 mcp->flags = 0; 588 mcp->flags = 0;
581 rval = qla2x00_mailbox_command(ha, mcp); 589 rval = qla2x00_mailbox_command(vha, mcp);
582 590
583 if (rval == QLA_SUCCESS) { 591 if (rval == QLA_SUCCESS) {
584 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 592 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
@@ -591,7 +599,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
591 struct device_reg_24xx __iomem *reg = 599 struct device_reg_24xx __iomem *reg =
592 &ha->iobase->isp24; 600 &ha->iobase->isp24;
593 601
594 qla2xxx_hw_event_log(ha, HW_EVENT_ISP_ERR, 0, 602 qla2xxx_hw_event_log(vha, HW_EVENT_ISP_ERR, 0,
595 LSW(RD_REG_DWORD(&reg->hccr)), 603 LSW(RD_REG_DWORD(&reg->hccr)),
596 LSW(RD_REG_DWORD(&reg->istatus))); 604 LSW(RD_REG_DWORD(&reg->istatus)));
597 } 605 }
@@ -600,11 +608,11 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
600 if (rval != QLA_SUCCESS) { 608 if (rval != QLA_SUCCESS) {
601 /*EMPTY*/ 609 /*EMPTY*/
602 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n", 610 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n",
603 ha->host_no, rval)); 611 vha->host_no, rval));
604 } else { 612 } else {
605 /*EMPTY*/ 613 /*EMPTY*/
606 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n", 614 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n",
607 ha->host_no)); 615 vha->host_no));
608 } 616 }
609 617
610 return rval; 618 return rval;
@@ -626,18 +634,18 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
626 * Kernel context. 634 * Kernel context.
627 */ 635 */
628int 636int
629qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr) 637qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
630{ 638{
631 int rval; 639 int rval;
632 mbx_cmd_t mc; 640 mbx_cmd_t mc;
633 mbx_cmd_t *mcp = &mc; 641 mbx_cmd_t *mcp = &mc;
634 642
635 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 643 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
636 644
637 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 645 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
638 mcp->out_mb = MBX_0; 646 mcp->out_mb = MBX_0;
639 mcp->in_mb = MBX_0; 647 mcp->in_mb = MBX_0;
640 if (IS_FWI2_CAPABLE(ha)) { 648 if (IS_FWI2_CAPABLE(vha->hw)) {
641 mcp->mb[1] = MSW(risc_addr); 649 mcp->mb[1] = MSW(risc_addr);
642 mcp->mb[2] = LSW(risc_addr); 650 mcp->mb[2] = LSW(risc_addr);
643 mcp->out_mb |= MBX_2|MBX_1; 651 mcp->out_mb |= MBX_2|MBX_1;
@@ -650,14 +658,14 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
650 658
651 mcp->tov = MBX_TOV_SECONDS; 659 mcp->tov = MBX_TOV_SECONDS;
652 mcp->flags = 0; 660 mcp->flags = 0;
653 rval = qla2x00_mailbox_command(ha, mcp); 661 rval = qla2x00_mailbox_command(vha, mcp);
654 662
655 if (rval != QLA_SUCCESS) { 663 if (rval != QLA_SUCCESS) {
656 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, 664 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
657 ha->host_no, rval, IS_FWI2_CAPABLE(ha) ? 665 vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ?
658 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])); 666 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));
659 } else { 667 } else {
660 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 668 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
661 } 669 }
662 670
663 return rval; 671 return rval;
@@ -682,7 +690,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
682 * Kernel context. 690 * Kernel context.
683 */ 691 */
684static int 692static int
685qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer, 693qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
686 dma_addr_t phys_addr, size_t size, uint32_t tov) 694 dma_addr_t phys_addr, size_t size, uint32_t tov)
687{ 695{
688 int rval; 696 int rval;
@@ -699,30 +707,28 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
699 mcp->in_mb = MBX_2|MBX_0; 707 mcp->in_mb = MBX_2|MBX_0;
700 mcp->tov = tov; 708 mcp->tov = tov;
701 mcp->flags = 0; 709 mcp->flags = 0;
702 rval = qla2x00_mailbox_command(ha, mcp); 710 rval = qla2x00_mailbox_command(vha, mcp);
703 711
704 if (rval != QLA_SUCCESS) { 712 if (rval != QLA_SUCCESS) {
705 /*EMPTY*/ 713 /*EMPTY*/
706 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", 714 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
707 ha->host_no, rval)); 715 vha->host_no, rval));
708 DEBUG2(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
709 ha->host_no, rval));
710 } else { 716 } else {
711 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 717 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
712 718
713 /* Mask reserved bits. */ 719 /* Mask reserved bits. */
714 sts_entry->entry_status &= 720 sts_entry->entry_status &=
715 IS_FWI2_CAPABLE(ha) ? RF_MASK_24XX :RF_MASK; 721 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
716 } 722 }
717 723
718 return rval; 724 return rval;
719} 725}
720 726
721int 727int
722qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr, 728qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
723 size_t size) 729 size_t size)
724{ 730{
725 return qla2x00_issue_iocb_timeout(ha, buffer, phys_addr, size, 731 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
726 MBX_TOV_SECONDS); 732 MBX_TOV_SECONDS);
727} 733}
728 734
@@ -741,22 +747,23 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr,
741 * Kernel context. 747 * Kernel context.
742 */ 748 */
743int 749int
744qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp) 750qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
745{ 751{
746 unsigned long flags = 0; 752 unsigned long flags = 0;
747 fc_port_t *fcport; 753 fc_port_t *fcport;
748 int rval; 754 int rval;
749 uint32_t handle; 755 uint32_t handle = 0;
750 mbx_cmd_t mc; 756 mbx_cmd_t mc;
751 mbx_cmd_t *mcp = &mc; 757 mbx_cmd_t *mcp = &mc;
758 struct qla_hw_data *ha = vha->hw;
752 759
753 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no)); 760 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
754 761
755 fcport = sp->fcport; 762 fcport = sp->fcport;
756 763
757 spin_lock_irqsave(&ha->hardware_lock, flags); 764 spin_lock_irqsave(&ha->hardware_lock, flags);
758 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 765 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
759 if (ha->outstanding_cmds[handle] == sp) 766 if (req->outstanding_cmds[handle] == sp)
760 break; 767 break;
761 } 768 }
762 spin_unlock_irqrestore(&ha->hardware_lock, flags); 769 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -778,14 +785,14 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
778 mcp->in_mb = MBX_0; 785 mcp->in_mb = MBX_0;
779 mcp->tov = MBX_TOV_SECONDS; 786 mcp->tov = MBX_TOV_SECONDS;
780 mcp->flags = 0; 787 mcp->flags = 0;
781 rval = qla2x00_mailbox_command(ha, mcp); 788 rval = qla2x00_mailbox_command(vha, mcp);
782 789
783 if (rval != QLA_SUCCESS) { 790 if (rval != QLA_SUCCESS) {
784 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", 791 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
785 ha->host_no, rval)); 792 vha->host_no, rval));
786 } else { 793 } else {
787 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", 794 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
788 ha->host_no)); 795 vha->host_no));
789 } 796 }
790 797
791 return rval; 798 return rval;
@@ -797,40 +804,45 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
797 int rval, rval2; 804 int rval, rval2;
798 mbx_cmd_t mc; 805 mbx_cmd_t mc;
799 mbx_cmd_t *mcp = &mc; 806 mbx_cmd_t *mcp = &mc;
800 scsi_qla_host_t *ha; 807 scsi_qla_host_t *vha;
808 struct req_que *req;
809 struct rsp_que *rsp;
801 810
802 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); 811 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
803 812
804 l = l; 813 l = l;
805 ha = fcport->ha; 814 vha = fcport->vha;
815 req = vha->hw->req_q_map[0];
816 rsp = vha->hw->rsp_q_map[0];
806 mcp->mb[0] = MBC_ABORT_TARGET; 817 mcp->mb[0] = MBC_ABORT_TARGET;
807 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 818 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
808 if (HAS_EXTENDED_IDS(ha)) { 819 if (HAS_EXTENDED_IDS(vha->hw)) {
809 mcp->mb[1] = fcport->loop_id; 820 mcp->mb[1] = fcport->loop_id;
810 mcp->mb[10] = 0; 821 mcp->mb[10] = 0;
811 mcp->out_mb |= MBX_10; 822 mcp->out_mb |= MBX_10;
812 } else { 823 } else {
813 mcp->mb[1] = fcport->loop_id << 8; 824 mcp->mb[1] = fcport->loop_id << 8;
814 } 825 }
815 mcp->mb[2] = ha->loop_reset_delay; 826 mcp->mb[2] = vha->hw->loop_reset_delay;
816 mcp->mb[9] = ha->vp_idx; 827 mcp->mb[9] = vha->vp_idx;
817 828
818 mcp->in_mb = MBX_0; 829 mcp->in_mb = MBX_0;
819 mcp->tov = MBX_TOV_SECONDS; 830 mcp->tov = MBX_TOV_SECONDS;
820 mcp->flags = 0; 831 mcp->flags = 0;
821 rval = qla2x00_mailbox_command(ha, mcp); 832 rval = qla2x00_mailbox_command(vha, mcp);
822 if (rval != QLA_SUCCESS) { 833 if (rval != QLA_SUCCESS) {
823 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 834 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
824 ha->host_no, rval)); 835 vha->host_no, rval));
825 } 836 }
826 837
827 /* Issue marker IOCB. */ 838 /* Issue marker IOCB. */
828 rval2 = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID); 839 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
840 MK_SYNC_ID);
829 if (rval2 != QLA_SUCCESS) { 841 if (rval2 != QLA_SUCCESS) {
830 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 842 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
831 "(%x).\n", __func__, ha->host_no, rval2)); 843 "(%x).\n", __func__, vha->host_no, rval2));
832 } else { 844 } else {
833 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 845 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
834 } 846 }
835 847
836 return rval; 848 return rval;
@@ -842,37 +854,42 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
842 int rval, rval2; 854 int rval, rval2;
843 mbx_cmd_t mc; 855 mbx_cmd_t mc;
844 mbx_cmd_t *mcp = &mc; 856 mbx_cmd_t *mcp = &mc;
845 scsi_qla_host_t *ha; 857 scsi_qla_host_t *vha;
858 struct req_que *req;
859 struct rsp_que *rsp;
846 860
847 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); 861 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
848 862
849 ha = fcport->ha; 863 vha = fcport->vha;
864 req = vha->hw->req_q_map[0];
865 rsp = vha->hw->rsp_q_map[0];
850 mcp->mb[0] = MBC_LUN_RESET; 866 mcp->mb[0] = MBC_LUN_RESET;
851 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 867 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
852 if (HAS_EXTENDED_IDS(ha)) 868 if (HAS_EXTENDED_IDS(vha->hw))
853 mcp->mb[1] = fcport->loop_id; 869 mcp->mb[1] = fcport->loop_id;
854 else 870 else
855 mcp->mb[1] = fcport->loop_id << 8; 871 mcp->mb[1] = fcport->loop_id << 8;
856 mcp->mb[2] = l; 872 mcp->mb[2] = l;
857 mcp->mb[3] = 0; 873 mcp->mb[3] = 0;
858 mcp->mb[9] = ha->vp_idx; 874 mcp->mb[9] = vha->vp_idx;
859 875
860 mcp->in_mb = MBX_0; 876 mcp->in_mb = MBX_0;
861 mcp->tov = MBX_TOV_SECONDS; 877 mcp->tov = MBX_TOV_SECONDS;
862 mcp->flags = 0; 878 mcp->flags = 0;
863 rval = qla2x00_mailbox_command(ha, mcp); 879 rval = qla2x00_mailbox_command(vha, mcp);
864 if (rval != QLA_SUCCESS) { 880 if (rval != QLA_SUCCESS) {
865 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 881 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
866 ha->host_no, rval)); 882 vha->host_no, rval));
867 } 883 }
868 884
869 /* Issue marker IOCB. */ 885 /* Issue marker IOCB. */
870 rval2 = qla2x00_marker(ha, fcport->loop_id, l, MK_SYNC_ID_LUN); 886 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
887 MK_SYNC_ID_LUN);
871 if (rval2 != QLA_SUCCESS) { 888 if (rval2 != QLA_SUCCESS) {
872 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 889 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
873 "(%x).\n", __func__, ha->host_no, rval2)); 890 "(%x).\n", __func__, vha->host_no, rval2));
874 } else { 891 } else {
875 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 892 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
876 } 893 }
877 894
878 return rval; 895 return rval;
@@ -899,7 +916,7 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
899 * Kernel context. 916 * Kernel context.
900 */ 917 */
901int 918int
902qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa, 919qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
903 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 920 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
904{ 921{
905 int rval; 922 int rval;
@@ -907,15 +924,15 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
907 mbx_cmd_t *mcp = &mc; 924 mbx_cmd_t *mcp = &mc;
908 925
909 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n", 926 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n",
910 ha->host_no)); 927 vha->host_no));
911 928
912 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 929 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
913 mcp->mb[9] = ha->vp_idx; 930 mcp->mb[9] = vha->vp_idx;
914 mcp->out_mb = MBX_9|MBX_0; 931 mcp->out_mb = MBX_9|MBX_0;
915 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 932 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
916 mcp->tov = MBX_TOV_SECONDS; 933 mcp->tov = MBX_TOV_SECONDS;
917 mcp->flags = 0; 934 mcp->flags = 0;
918 rval = qla2x00_mailbox_command(ha, mcp); 935 rval = qla2x00_mailbox_command(vha, mcp);
919 if (mcp->mb[0] == MBS_COMMAND_ERROR) 936 if (mcp->mb[0] == MBS_COMMAND_ERROR)
920 rval = QLA_COMMAND_ERROR; 937 rval = QLA_COMMAND_ERROR;
921 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 938 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
@@ -932,11 +949,11 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
932 if (rval != QLA_SUCCESS) { 949 if (rval != QLA_SUCCESS) {
933 /*EMPTY*/ 950 /*EMPTY*/
934 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 951 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
935 ha->host_no, rval)); 952 vha->host_no, rval));
936 } else { 953 } else {
937 /*EMPTY*/ 954 /*EMPTY*/
938 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 955 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
939 ha->host_no)); 956 vha->host_no));
940 } 957 }
941 958
942 return rval; 959 return rval;
@@ -958,7 +975,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
958 * Kernel context. 975 * Kernel context.
959 */ 976 */
960int 977int
961qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov, 978qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
962 uint16_t *r_a_tov) 979 uint16_t *r_a_tov)
963{ 980{
964 int rval; 981 int rval;
@@ -967,19 +984,19 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
967 mbx_cmd_t *mcp = &mc; 984 mbx_cmd_t *mcp = &mc;
968 985
969 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n", 986 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n",
970 ha->host_no)); 987 vha->host_no));
971 988
972 mcp->mb[0] = MBC_GET_RETRY_COUNT; 989 mcp->mb[0] = MBC_GET_RETRY_COUNT;
973 mcp->out_mb = MBX_0; 990 mcp->out_mb = MBX_0;
974 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 991 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
975 mcp->tov = MBX_TOV_SECONDS; 992 mcp->tov = MBX_TOV_SECONDS;
976 mcp->flags = 0; 993 mcp->flags = 0;
977 rval = qla2x00_mailbox_command(ha, mcp); 994 rval = qla2x00_mailbox_command(vha, mcp);
978 995
979 if (rval != QLA_SUCCESS) { 996 if (rval != QLA_SUCCESS) {
980 /*EMPTY*/ 997 /*EMPTY*/
981 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n", 998 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n",
982 ha->host_no, mcp->mb[0])); 999 vha->host_no, mcp->mb[0]));
983 } else { 1000 } else {
984 /* Convert returned data and check our values. */ 1001 /* Convert returned data and check our values. */
985 *r_a_tov = mcp->mb[3] / 2; 1002 *r_a_tov = mcp->mb[3] / 2;
@@ -991,7 +1008,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
991 } 1008 }
992 1009
993 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d " 1010 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d "
994 "ratov=%d.\n", ha->host_no, mcp->mb[3], ratov)); 1011 "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov));
995 } 1012 }
996 1013
997 return rval; 1014 return rval;
@@ -1015,14 +1032,15 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
1015 * Kernel context. 1032 * Kernel context.
1016 */ 1033 */
1017int 1034int
1018qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size) 1035qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1019{ 1036{
1020 int rval; 1037 int rval;
1021 mbx_cmd_t mc; 1038 mbx_cmd_t mc;
1022 mbx_cmd_t *mcp = &mc; 1039 mbx_cmd_t *mcp = &mc;
1040 struct qla_hw_data *ha = vha->hw;
1023 1041
1024 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 1042 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
1025 ha->host_no)); 1043 vha->host_no));
1026 1044
1027 if (ha->flags.npiv_supported) 1045 if (ha->flags.npiv_supported)
1028 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1046 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
@@ -1040,17 +1058,17 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1040 mcp->buf_size = size; 1058 mcp->buf_size = size;
1041 mcp->flags = MBX_DMA_OUT; 1059 mcp->flags = MBX_DMA_OUT;
1042 mcp->tov = MBX_TOV_SECONDS; 1060 mcp->tov = MBX_TOV_SECONDS;
1043 rval = qla2x00_mailbox_command(ha, mcp); 1061 rval = qla2x00_mailbox_command(vha, mcp);
1044 1062
1045 if (rval != QLA_SUCCESS) { 1063 if (rval != QLA_SUCCESS) {
1046 /*EMPTY*/ 1064 /*EMPTY*/
1047 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x " 1065 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x "
1048 "mb0=%x.\n", 1066 "mb0=%x.\n",
1049 ha->host_no, rval, mcp->mb[0])); 1067 vha->host_no, rval, mcp->mb[0]));
1050 } else { 1068 } else {
1051 /*EMPTY*/ 1069 /*EMPTY*/
1052 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n", 1070 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n",
1053 ha->host_no)); 1071 vha->host_no));
1054 } 1072 }
1055 1073
1056 return rval; 1074 return rval;
@@ -1073,7 +1091,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1073 * Kernel context. 1091 * Kernel context.
1074 */ 1092 */
1075int 1093int
1076qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt) 1094qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1077{ 1095{
1078 int rval; 1096 int rval;
1079 mbx_cmd_t mc; 1097 mbx_cmd_t mc;
@@ -1081,14 +1099,15 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1081 port_database_t *pd; 1099 port_database_t *pd;
1082 struct port_database_24xx *pd24; 1100 struct port_database_24xx *pd24;
1083 dma_addr_t pd_dma; 1101 dma_addr_t pd_dma;
1102 struct qla_hw_data *ha = vha->hw;
1084 1103
1085 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1104 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1086 1105
1087 pd24 = NULL; 1106 pd24 = NULL;
1088 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1107 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1089 if (pd == NULL) { 1108 if (pd == NULL) {
1090 DEBUG2_3(printk("%s(%ld): failed to allocate Port Database " 1109 DEBUG2_3(printk("%s(%ld): failed to allocate Port Database "
1091 "structure.\n", __func__, ha->host_no)); 1110 "structure.\n", __func__, vha->host_no));
1092 return QLA_MEMORY_ALLOC_FAILED; 1111 return QLA_MEMORY_ALLOC_FAILED;
1093 } 1112 }
1094 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 1113 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -1100,7 +1119,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1100 mcp->mb[3] = LSW(pd_dma); 1119 mcp->mb[3] = LSW(pd_dma);
1101 mcp->mb[6] = MSW(MSD(pd_dma)); 1120 mcp->mb[6] = MSW(MSD(pd_dma));
1102 mcp->mb[7] = LSW(MSD(pd_dma)); 1121 mcp->mb[7] = LSW(MSD(pd_dma));
1103 mcp->mb[9] = ha->vp_idx; 1122 mcp->mb[9] = vha->vp_idx;
1104 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1123 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1105 mcp->in_mb = MBX_0; 1124 mcp->in_mb = MBX_0;
1106 if (IS_FWI2_CAPABLE(ha)) { 1125 if (IS_FWI2_CAPABLE(ha)) {
@@ -1120,7 +1139,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1120 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1139 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1121 mcp->flags = MBX_DMA_IN; 1140 mcp->flags = MBX_DMA_IN;
1122 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1141 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1123 rval = qla2x00_mailbox_command(ha, mcp); 1142 rval = qla2x00_mailbox_command(vha, mcp);
1124 if (rval != QLA_SUCCESS) 1143 if (rval != QLA_SUCCESS)
1125 goto gpd_error_out; 1144 goto gpd_error_out;
1126 1145
@@ -1132,7 +1151,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1132 pd24->last_login_state != PDS_PRLI_COMPLETE) { 1151 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1133 DEBUG2(printk("%s(%ld): Unable to verify " 1152 DEBUG2(printk("%s(%ld): Unable to verify "
1134 "login-state (%x/%x) for loop_id %x\n", 1153 "login-state (%x/%x) for loop_id %x\n",
1135 __func__, ha->host_no, 1154 __func__, vha->host_no,
1136 pd24->current_login_state, 1155 pd24->current_login_state,
1137 pd24->last_login_state, fcport->loop_id)); 1156 pd24->last_login_state, fcport->loop_id));
1138 rval = QLA_FUNCTION_FAILED; 1157 rval = QLA_FUNCTION_FAILED;
@@ -1192,9 +1211,9 @@ gpd_error_out:
1192 1211
1193 if (rval != QLA_SUCCESS) { 1212 if (rval != QLA_SUCCESS) {
1194 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 1213 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
1195 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1214 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1196 } else { 1215 } else {
1197 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1216 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1198 } 1217 }
1199 1218
1200 return rval; 1219 return rval;
@@ -1217,21 +1236,21 @@ gpd_error_out:
1217 * Kernel context. 1236 * Kernel context.
1218 */ 1237 */
1219int 1238int
1220qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states) 1239qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1221{ 1240{
1222 int rval; 1241 int rval;
1223 mbx_cmd_t mc; 1242 mbx_cmd_t mc;
1224 mbx_cmd_t *mcp = &mc; 1243 mbx_cmd_t *mcp = &mc;
1225 1244
1226 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n", 1245 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n",
1227 ha->host_no)); 1246 vha->host_no));
1228 1247
1229 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1248 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1230 mcp->out_mb = MBX_0; 1249 mcp->out_mb = MBX_0;
1231 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1250 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1232 mcp->tov = MBX_TOV_SECONDS; 1251 mcp->tov = MBX_TOV_SECONDS;
1233 mcp->flags = 0; 1252 mcp->flags = 0;
1234 rval = qla2x00_mailbox_command(ha, mcp); 1253 rval = qla2x00_mailbox_command(vha, mcp);
1235 1254
1236 /* Return firmware states. */ 1255 /* Return firmware states. */
1237 states[0] = mcp->mb[1]; 1256 states[0] = mcp->mb[1];
@@ -1241,11 +1260,11 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
1241 if (rval != QLA_SUCCESS) { 1260 if (rval != QLA_SUCCESS) {
1242 /*EMPTY*/ 1261 /*EMPTY*/
1243 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): " 1262 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): "
1244 "failed=%x.\n", ha->host_no, rval)); 1263 "failed=%x.\n", vha->host_no, rval));
1245 } else { 1264 } else {
1246 /*EMPTY*/ 1265 /*EMPTY*/
1247 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n", 1266 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n",
1248 ha->host_no)); 1267 vha->host_no));
1249 } 1268 }
1250 1269
1251 return rval; 1270 return rval;
@@ -1270,7 +1289,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
1270 * Kernel context. 1289 * Kernel context.
1271 */ 1290 */
1272int 1291int
1273qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name, 1292qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1274 uint8_t opt) 1293 uint8_t opt)
1275{ 1294{
1276 int rval; 1295 int rval;
@@ -1278,12 +1297,12 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1278 mbx_cmd_t *mcp = &mc; 1297 mbx_cmd_t *mcp = &mc;
1279 1298
1280 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n", 1299 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n",
1281 ha->host_no)); 1300 vha->host_no));
1282 1301
1283 mcp->mb[0] = MBC_GET_PORT_NAME; 1302 mcp->mb[0] = MBC_GET_PORT_NAME;
1284 mcp->mb[9] = ha->vp_idx; 1303 mcp->mb[9] = vha->vp_idx;
1285 mcp->out_mb = MBX_9|MBX_1|MBX_0; 1304 mcp->out_mb = MBX_9|MBX_1|MBX_0;
1286 if (HAS_EXTENDED_IDS(ha)) { 1305 if (HAS_EXTENDED_IDS(vha->hw)) {
1287 mcp->mb[1] = loop_id; 1306 mcp->mb[1] = loop_id;
1288 mcp->mb[10] = opt; 1307 mcp->mb[10] = opt;
1289 mcp->out_mb |= MBX_10; 1308 mcp->out_mb |= MBX_10;
@@ -1294,12 +1313,12 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1294 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1313 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1295 mcp->tov = MBX_TOV_SECONDS; 1314 mcp->tov = MBX_TOV_SECONDS;
1296 mcp->flags = 0; 1315 mcp->flags = 0;
1297 rval = qla2x00_mailbox_command(ha, mcp); 1316 rval = qla2x00_mailbox_command(vha, mcp);
1298 1317
1299 if (rval != QLA_SUCCESS) { 1318 if (rval != QLA_SUCCESS) {
1300 /*EMPTY*/ 1319 /*EMPTY*/
1301 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n", 1320 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n",
1302 ha->host_no, rval)); 1321 vha->host_no, rval));
1303 } else { 1322 } else {
1304 if (name != NULL) { 1323 if (name != NULL) {
1305 /* This function returns name in big endian. */ 1324 /* This function returns name in big endian. */
@@ -1314,7 +1333,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1314 } 1333 }
1315 1334
1316 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", 1335 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
1317 ha->host_no)); 1336 vha->host_no));
1318 } 1337 }
1319 1338
1320 return rval; 1339 return rval;
@@ -1336,45 +1355,45 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1336 * Kernel context. 1355 * Kernel context.
1337 */ 1356 */
1338int 1357int
1339qla2x00_lip_reset(scsi_qla_host_t *ha) 1358qla2x00_lip_reset(scsi_qla_host_t *vha)
1340{ 1359{
1341 int rval; 1360 int rval;
1342 mbx_cmd_t mc; 1361 mbx_cmd_t mc;
1343 mbx_cmd_t *mcp = &mc; 1362 mbx_cmd_t *mcp = &mc;
1344 1363
1345 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1364 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1346 1365
1347 if (IS_FWI2_CAPABLE(ha)) { 1366 if (IS_FWI2_CAPABLE(vha->hw)) {
1348 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1367 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1349 mcp->mb[1] = BIT_6; 1368 mcp->mb[1] = BIT_6;
1350 mcp->mb[2] = 0; 1369 mcp->mb[2] = 0;
1351 mcp->mb[3] = ha->loop_reset_delay; 1370 mcp->mb[3] = vha->hw->loop_reset_delay;
1352 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1371 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1353 } else { 1372 } else {
1354 mcp->mb[0] = MBC_LIP_RESET; 1373 mcp->mb[0] = MBC_LIP_RESET;
1355 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1374 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1356 if (HAS_EXTENDED_IDS(ha)) { 1375 if (HAS_EXTENDED_IDS(vha->hw)) {
1357 mcp->mb[1] = 0x00ff; 1376 mcp->mb[1] = 0x00ff;
1358 mcp->mb[10] = 0; 1377 mcp->mb[10] = 0;
1359 mcp->out_mb |= MBX_10; 1378 mcp->out_mb |= MBX_10;
1360 } else { 1379 } else {
1361 mcp->mb[1] = 0xff00; 1380 mcp->mb[1] = 0xff00;
1362 } 1381 }
1363 mcp->mb[2] = ha->loop_reset_delay; 1382 mcp->mb[2] = vha->hw->loop_reset_delay;
1364 mcp->mb[3] = 0; 1383 mcp->mb[3] = 0;
1365 } 1384 }
1366 mcp->in_mb = MBX_0; 1385 mcp->in_mb = MBX_0;
1367 mcp->tov = MBX_TOV_SECONDS; 1386 mcp->tov = MBX_TOV_SECONDS;
1368 mcp->flags = 0; 1387 mcp->flags = 0;
1369 rval = qla2x00_mailbox_command(ha, mcp); 1388 rval = qla2x00_mailbox_command(vha, mcp);
1370 1389
1371 if (rval != QLA_SUCCESS) { 1390 if (rval != QLA_SUCCESS) {
1372 /*EMPTY*/ 1391 /*EMPTY*/
1373 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", 1392 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n",
1374 __func__, ha->host_no, rval)); 1393 __func__, vha->host_no, rval));
1375 } else { 1394 } else {
1376 /*EMPTY*/ 1395 /*EMPTY*/
1377 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1396 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1378 } 1397 }
1379 1398
1380 return rval; 1399 return rval;
@@ -1399,7 +1418,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
1399 * Kernel context. 1418 * Kernel context.
1400 */ 1419 */
1401int 1420int
1402qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address, 1421qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1403 uint16_t cmd_size, size_t buf_size) 1422 uint16_t cmd_size, size_t buf_size)
1404{ 1423{
1405 int rval; 1424 int rval;
@@ -1407,10 +1426,11 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
1407 mbx_cmd_t *mcp = &mc; 1426 mbx_cmd_t *mcp = &mc;
1408 1427
1409 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n", 1428 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n",
1410 ha->host_no)); 1429 vha->host_no));
1411 1430
1412 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total " 1431 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total "
1413 "tov=%d.\n", ha->retry_count, ha->login_timeout, mcp->tov)); 1432 "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout,
1433 mcp->tov));
1414 1434
1415 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 1435 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1416 mcp->mb[1] = cmd_size; 1436 mcp->mb[1] = cmd_size;
@@ -1422,25 +1442,25 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
1422 mcp->in_mb = MBX_0|MBX_1; 1442 mcp->in_mb = MBX_0|MBX_1;
1423 mcp->buf_size = buf_size; 1443 mcp->buf_size = buf_size;
1424 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 1444 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
1425 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1445 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
1426 rval = qla2x00_mailbox_command(ha, mcp); 1446 rval = qla2x00_mailbox_command(vha, mcp);
1427 1447
1428 if (rval != QLA_SUCCESS) { 1448 if (rval != QLA_SUCCESS) {
1429 /*EMPTY*/ 1449 /*EMPTY*/
1430 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1450 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
1431 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1451 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1432 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1452 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
1433 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1453 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1434 } else { 1454 } else {
1435 /*EMPTY*/ 1455 /*EMPTY*/
1436 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", ha->host_no)); 1456 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no));
1437 } 1457 }
1438 1458
1439 return rval; 1459 return rval;
1440} 1460}
1441 1461
1442int 1462int
1443qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1463qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1444 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 1464 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1445{ 1465{
1446 int rval; 1466 int rval;
@@ -1448,13 +1468,14 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1448 struct logio_entry_24xx *lg; 1468 struct logio_entry_24xx *lg;
1449 dma_addr_t lg_dma; 1469 dma_addr_t lg_dma;
1450 uint32_t iop[2]; 1470 uint32_t iop[2];
1471 struct qla_hw_data *ha = vha->hw;
1451 1472
1452 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1473 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1453 1474
1454 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1475 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1455 if (lg == NULL) { 1476 if (lg == NULL) {
1456 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1477 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
1457 __func__, ha->host_no)); 1478 __func__, vha->host_no));
1458 return QLA_MEMORY_ALLOC_FAILED; 1479 return QLA_MEMORY_ALLOC_FAILED;
1459 } 1480 }
1460 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1481 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1470,14 +1491,14 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1470 lg->port_id[0] = al_pa; 1491 lg->port_id[0] = al_pa;
1471 lg->port_id[1] = area; 1492 lg->port_id[1] = area;
1472 lg->port_id[2] = domain; 1493 lg->port_id[2] = domain;
1473 lg->vp_index = ha->vp_idx; 1494 lg->vp_index = vha->vp_idx;
1474 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1495 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1475 if (rval != QLA_SUCCESS) { 1496 if (rval != QLA_SUCCESS) {
1476 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1497 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
1477 "(%x).\n", __func__, ha->host_no, rval)); 1498 "(%x).\n", __func__, vha->host_no, rval));
1478 } else if (lg->entry_status != 0) { 1499 } else if (lg->entry_status != 0) {
1479 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1500 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1480 "-- error status (%x).\n", __func__, ha->host_no, 1501 "-- error status (%x).\n", __func__, vha->host_no,
1481 lg->entry_status)); 1502 lg->entry_status));
1482 rval = QLA_FUNCTION_FAILED; 1503 rval = QLA_FUNCTION_FAILED;
1483 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1504 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
@@ -1486,7 +1507,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1486 1507
1487 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1508 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1488 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1509 "-- completion status (%x) ioparam=%x/%x.\n", __func__,
1489 ha->host_no, le16_to_cpu(lg->comp_status), iop[0], 1510 vha->host_no, le16_to_cpu(lg->comp_status), iop[0],
1490 iop[1])); 1511 iop[1]));
1491 1512
1492 switch (iop[0]) { 1513 switch (iop[0]) {
@@ -1515,7 +1536,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1515 break; 1536 break;
1516 } 1537 }
1517 } else { 1538 } else {
1518 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1539 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1519 1540
1520 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1541 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1521 1542
@@ -1562,14 +1583,15 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1562 * Kernel context. 1583 * Kernel context.
1563 */ 1584 */
1564int 1585int
1565qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1586qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1566 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 1587 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1567{ 1588{
1568 int rval; 1589 int rval;
1569 mbx_cmd_t mc; 1590 mbx_cmd_t mc;
1570 mbx_cmd_t *mcp = &mc; 1591 mbx_cmd_t *mcp = &mc;
1592 struct qla_hw_data *ha = vha->hw;
1571 1593
1572 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", ha->host_no)); 1594 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no));
1573 1595
1574 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1596 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1575 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1597 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1586,7 +1608,7 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1586 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 1608 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
1587 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1609 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1588 mcp->flags = 0; 1610 mcp->flags = 0;
1589 rval = qla2x00_mailbox_command(ha, mcp); 1611 rval = qla2x00_mailbox_command(vha, mcp);
1590 1612
1591 /* Return mailbox statuses. */ 1613 /* Return mailbox statuses. */
1592 if (mb != NULL) { 1614 if (mb != NULL) {
@@ -1613,12 +1635,12 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1613 1635
1614 /*EMPTY*/ 1636 /*EMPTY*/
1615 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x " 1637 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x "
1616 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", ha->host_no, rval, 1638 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval,
1617 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 1639 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
1618 } else { 1640 } else {
1619 /*EMPTY*/ 1641 /*EMPTY*/
1620 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n", 1642 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n",
1621 ha->host_no)); 1643 vha->host_no));
1622 } 1644 }
1623 1645
1624 return rval; 1646 return rval;
@@ -1641,19 +1663,20 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1641 * 1663 *
1642 */ 1664 */
1643int 1665int
1644qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport, 1666qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1645 uint16_t *mb_ret, uint8_t opt) 1667 uint16_t *mb_ret, uint8_t opt)
1646{ 1668{
1647 int rval; 1669 int rval;
1648 mbx_cmd_t mc; 1670 mbx_cmd_t mc;
1649 mbx_cmd_t *mcp = &mc; 1671 mbx_cmd_t *mcp = &mc;
1672 struct qla_hw_data *ha = vha->hw;
1650 1673
1651 if (IS_FWI2_CAPABLE(ha)) 1674 if (IS_FWI2_CAPABLE(ha))
1652 return qla24xx_login_fabric(ha, fcport->loop_id, 1675 return qla24xx_login_fabric(vha, fcport->loop_id,
1653 fcport->d_id.b.domain, fcport->d_id.b.area, 1676 fcport->d_id.b.domain, fcport->d_id.b.area,
1654 fcport->d_id.b.al_pa, mb_ret, opt); 1677 fcport->d_id.b.al_pa, mb_ret, opt);
1655 1678
1656 DEBUG3(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1679 DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1657 1680
1658 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 1681 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1659 if (HAS_EXTENDED_IDS(ha)) 1682 if (HAS_EXTENDED_IDS(ha))
@@ -1665,7 +1688,7 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1665 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 1688 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
1666 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1689 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1667 mcp->flags = 0; 1690 mcp->flags = 0;
1668 rval = qla2x00_mailbox_command(ha, mcp); 1691 rval = qla2x00_mailbox_command(vha, mcp);
1669 1692
1670 /* Return mailbox statuses. */ 1693 /* Return mailbox statuses. */
1671 if (mb_ret != NULL) { 1694 if (mb_ret != NULL) {
@@ -1686,33 +1709,34 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1686 rval = QLA_SUCCESS; 1709 rval = QLA_SUCCESS;
1687 1710
1688 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1711 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1689 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval, 1712 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
1690 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); 1713 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1691 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1714 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1692 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval, 1715 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
1693 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); 1716 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1694 } else { 1717 } else {
1695 /*EMPTY*/ 1718 /*EMPTY*/
1696 DEBUG3(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1719 DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no));
1697 } 1720 }
1698 1721
1699 return (rval); 1722 return (rval);
1700} 1723}
1701 1724
1702int 1725int
1703qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1726qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1704 uint8_t area, uint8_t al_pa) 1727 uint8_t area, uint8_t al_pa)
1705{ 1728{
1706 int rval; 1729 int rval;
1707 struct logio_entry_24xx *lg; 1730 struct logio_entry_24xx *lg;
1708 dma_addr_t lg_dma; 1731 dma_addr_t lg_dma;
1732 struct qla_hw_data *ha = vha->hw;
1709 1733
1710 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1734 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1711 1735
1712 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1736 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1713 if (lg == NULL) { 1737 if (lg == NULL) {
1714 DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n", 1738 DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n",
1715 __func__, ha->host_no)); 1739 __func__, vha->host_no));
1716 return QLA_MEMORY_ALLOC_FAILED; 1740 return QLA_MEMORY_ALLOC_FAILED;
1717 } 1741 }
1718 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1742 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1725,25 +1749,26 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1725 lg->port_id[0] = al_pa; 1749 lg->port_id[0] = al_pa;
1726 lg->port_id[1] = area; 1750 lg->port_id[1] = area;
1727 lg->port_id[2] = domain; 1751 lg->port_id[2] = domain;
1728 lg->vp_index = ha->vp_idx; 1752 lg->vp_index = vha->vp_idx;
1729 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1753
1754 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1730 if (rval != QLA_SUCCESS) { 1755 if (rval != QLA_SUCCESS) {
1731 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1756 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
1732 "(%x).\n", __func__, ha->host_no, rval)); 1757 "(%x).\n", __func__, vha->host_no, rval));
1733 } else if (lg->entry_status != 0) { 1758 } else if (lg->entry_status != 0) {
1734 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1759 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1735 "-- error status (%x).\n", __func__, ha->host_no, 1760 "-- error status (%x).\n", __func__, vha->host_no,
1736 lg->entry_status)); 1761 lg->entry_status));
1737 rval = QLA_FUNCTION_FAILED; 1762 rval = QLA_FUNCTION_FAILED;
1738 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1763 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1739 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1764 DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB "
1740 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1765 "-- completion status (%x) ioparam=%x/%x.\n", __func__,
1741 ha->host_no, le16_to_cpu(lg->comp_status), 1766 vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status),
1742 le32_to_cpu(lg->io_parameter[0]), 1767 le32_to_cpu(lg->io_parameter[0]),
1743 le32_to_cpu(lg->io_parameter[1]))); 1768 le32_to_cpu(lg->io_parameter[1])));
1744 } else { 1769 } else {
1745 /*EMPTY*/ 1770 /*EMPTY*/
1746 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1771 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1747 } 1772 }
1748 1773
1749 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1774 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1768,7 +1793,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1768 * Kernel context. 1793 * Kernel context.
1769 */ 1794 */
1770int 1795int
1771qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1796qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1772 uint8_t area, uint8_t al_pa) 1797 uint8_t area, uint8_t al_pa)
1773{ 1798{
1774 int rval; 1799 int rval;
@@ -1776,11 +1801,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1776 mbx_cmd_t *mcp = &mc; 1801 mbx_cmd_t *mcp = &mc;
1777 1802
1778 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n", 1803 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n",
1779 ha->host_no)); 1804 vha->host_no));
1780 1805
1781 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 1806 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1782 mcp->out_mb = MBX_1|MBX_0; 1807 mcp->out_mb = MBX_1|MBX_0;
1783 if (HAS_EXTENDED_IDS(ha)) { 1808 if (HAS_EXTENDED_IDS(vha->hw)) {
1784 mcp->mb[1] = loop_id; 1809 mcp->mb[1] = loop_id;
1785 mcp->mb[10] = 0; 1810 mcp->mb[10] = 0;
1786 mcp->out_mb |= MBX_10; 1811 mcp->out_mb |= MBX_10;
@@ -1791,16 +1816,16 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1791 mcp->in_mb = MBX_1|MBX_0; 1816 mcp->in_mb = MBX_1|MBX_0;
1792 mcp->tov = MBX_TOV_SECONDS; 1817 mcp->tov = MBX_TOV_SECONDS;
1793 mcp->flags = 0; 1818 mcp->flags = 0;
1794 rval = qla2x00_mailbox_command(ha, mcp); 1819 rval = qla2x00_mailbox_command(vha, mcp);
1795 1820
1796 if (rval != QLA_SUCCESS) { 1821 if (rval != QLA_SUCCESS) {
1797 /*EMPTY*/ 1822 /*EMPTY*/
1798 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x " 1823 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x "
1799 "mbx1=%x.\n", ha->host_no, rval, mcp->mb[1])); 1824 "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1]));
1800 } else { 1825 } else {
1801 /*EMPTY*/ 1826 /*EMPTY*/
1802 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n", 1827 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n",
1803 ha->host_no)); 1828 vha->host_no));
1804 } 1829 }
1805 1830
1806 return rval; 1831 return rval;
@@ -1822,33 +1847,33 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1822 * Kernel context. 1847 * Kernel context.
1823 */ 1848 */
1824int 1849int
1825qla2x00_full_login_lip(scsi_qla_host_t *ha) 1850qla2x00_full_login_lip(scsi_qla_host_t *vha)
1826{ 1851{
1827 int rval; 1852 int rval;
1828 mbx_cmd_t mc; 1853 mbx_cmd_t mc;
1829 mbx_cmd_t *mcp = &mc; 1854 mbx_cmd_t *mcp = &mc;
1830 1855
1831 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1856 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1832 ha->host_no)); 1857 vha->host_no));
1833 1858
1834 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1859 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1835 mcp->mb[1] = IS_FWI2_CAPABLE(ha) ? BIT_3: 0; 1860 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
1836 mcp->mb[2] = 0; 1861 mcp->mb[2] = 0;
1837 mcp->mb[3] = 0; 1862 mcp->mb[3] = 0;
1838 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1863 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1839 mcp->in_mb = MBX_0; 1864 mcp->in_mb = MBX_0;
1840 mcp->tov = MBX_TOV_SECONDS; 1865 mcp->tov = MBX_TOV_SECONDS;
1841 mcp->flags = 0; 1866 mcp->flags = 0;
1842 rval = qla2x00_mailbox_command(ha, mcp); 1867 rval = qla2x00_mailbox_command(vha, mcp);
1843 1868
1844 if (rval != QLA_SUCCESS) { 1869 if (rval != QLA_SUCCESS) {
1845 /*EMPTY*/ 1870 /*EMPTY*/
1846 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n", 1871 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n",
1847 ha->host_no, rval)); 1872 vha->host_no, rval));
1848 } else { 1873 } else {
1849 /*EMPTY*/ 1874 /*EMPTY*/
1850 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n", 1875 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n",
1851 ha->host_no)); 1876 vha->host_no));
1852 } 1877 }
1853 1878
1854 return rval; 1879 return rval;
@@ -1867,7 +1892,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
1867 * Kernel context. 1892 * Kernel context.
1868 */ 1893 */
1869int 1894int
1870qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma, 1895qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
1871 uint16_t *entries) 1896 uint16_t *entries)
1872{ 1897{
1873 int rval; 1898 int rval;
@@ -1875,20 +1900,20 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1875 mbx_cmd_t *mcp = &mc; 1900 mbx_cmd_t *mcp = &mc;
1876 1901
1877 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n", 1902 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n",
1878 ha->host_no)); 1903 vha->host_no));
1879 1904
1880 if (id_list == NULL) 1905 if (id_list == NULL)
1881 return QLA_FUNCTION_FAILED; 1906 return QLA_FUNCTION_FAILED;
1882 1907
1883 mcp->mb[0] = MBC_GET_ID_LIST; 1908 mcp->mb[0] = MBC_GET_ID_LIST;
1884 mcp->out_mb = MBX_0; 1909 mcp->out_mb = MBX_0;
1885 if (IS_FWI2_CAPABLE(ha)) { 1910 if (IS_FWI2_CAPABLE(vha->hw)) {
1886 mcp->mb[2] = MSW(id_list_dma); 1911 mcp->mb[2] = MSW(id_list_dma);
1887 mcp->mb[3] = LSW(id_list_dma); 1912 mcp->mb[3] = LSW(id_list_dma);
1888 mcp->mb[6] = MSW(MSD(id_list_dma)); 1913 mcp->mb[6] = MSW(MSD(id_list_dma));
1889 mcp->mb[7] = LSW(MSD(id_list_dma)); 1914 mcp->mb[7] = LSW(MSD(id_list_dma));
1890 mcp->mb[8] = 0; 1915 mcp->mb[8] = 0;
1891 mcp->mb[9] = ha->vp_idx; 1916 mcp->mb[9] = vha->vp_idx;
1892 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 1917 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
1893 } else { 1918 } else {
1894 mcp->mb[1] = MSW(id_list_dma); 1919 mcp->mb[1] = MSW(id_list_dma);
@@ -1900,16 +1925,16 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1900 mcp->in_mb = MBX_1|MBX_0; 1925 mcp->in_mb = MBX_1|MBX_0;
1901 mcp->tov = MBX_TOV_SECONDS; 1926 mcp->tov = MBX_TOV_SECONDS;
1902 mcp->flags = 0; 1927 mcp->flags = 0;
1903 rval = qla2x00_mailbox_command(ha, mcp); 1928 rval = qla2x00_mailbox_command(vha, mcp);
1904 1929
1905 if (rval != QLA_SUCCESS) { 1930 if (rval != QLA_SUCCESS) {
1906 /*EMPTY*/ 1931 /*EMPTY*/
1907 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n", 1932 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n",
1908 ha->host_no, rval)); 1933 vha->host_no, rval));
1909 } else { 1934 } else {
1910 *entries = mcp->mb[1]; 1935 *entries = mcp->mb[1];
1911 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n", 1936 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n",
1912 ha->host_no)); 1937 vha->host_no));
1913 } 1938 }
1914 1939
1915 return rval; 1940 return rval;
@@ -1929,7 +1954,7 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1929 * Kernel context. 1954 * Kernel context.
1930 */ 1955 */
1931int 1956int
1932qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt, 1957qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
1933 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt, 1958 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
1934 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports) 1959 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports)
1935{ 1960{
@@ -1937,22 +1962,22 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1937 mbx_cmd_t mc; 1962 mbx_cmd_t mc;
1938 mbx_cmd_t *mcp = &mc; 1963 mbx_cmd_t *mcp = &mc;
1939 1964
1940 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1965 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1941 1966
1942 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 1967 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
1943 mcp->out_mb = MBX_0; 1968 mcp->out_mb = MBX_0;
1944 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1969 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1945 mcp->tov = MBX_TOV_SECONDS; 1970 mcp->tov = MBX_TOV_SECONDS;
1946 mcp->flags = 0; 1971 mcp->flags = 0;
1947 rval = qla2x00_mailbox_command(ha, mcp); 1972 rval = qla2x00_mailbox_command(vha, mcp);
1948 1973
1949 if (rval != QLA_SUCCESS) { 1974 if (rval != QLA_SUCCESS) {
1950 /*EMPTY*/ 1975 /*EMPTY*/
1951 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__, 1976 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__,
1952 ha->host_no, mcp->mb[0])); 1977 vha->host_no, mcp->mb[0]));
1953 } else { 1978 } else {
1954 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " 1979 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x "
1955 "mb7=%x mb10=%x mb11=%x.\n", __func__, ha->host_no, 1980 "mb7=%x mb10=%x mb11=%x.\n", __func__, vha->host_no,
1956 mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7], 1981 mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7],
1957 mcp->mb[10], mcp->mb[11])); 1982 mcp->mb[10], mcp->mb[11]));
1958 1983
@@ -1964,7 +1989,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1964 *cur_iocb_cnt = mcp->mb[7]; 1989 *cur_iocb_cnt = mcp->mb[7];
1965 if (orig_iocb_cnt) 1990 if (orig_iocb_cnt)
1966 *orig_iocb_cnt = mcp->mb[10]; 1991 *orig_iocb_cnt = mcp->mb[10];
1967 if (ha->flags.npiv_supported && max_npiv_vports) 1992 if (vha->hw->flags.npiv_supported && max_npiv_vports)
1968 *max_npiv_vports = mcp->mb[11]; 1993 *max_npiv_vports = mcp->mb[11];
1969 } 1994 }
1970 1995
@@ -1987,18 +2012,19 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1987 * Kernel context. 2012 * Kernel context.
1988 */ 2013 */
1989int 2014int
1990qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map) 2015qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
1991{ 2016{
1992 int rval; 2017 int rval;
1993 mbx_cmd_t mc; 2018 mbx_cmd_t mc;
1994 mbx_cmd_t *mcp = &mc; 2019 mbx_cmd_t *mcp = &mc;
1995 char *pmap; 2020 char *pmap;
1996 dma_addr_t pmap_dma; 2021 dma_addr_t pmap_dma;
2022 struct qla_hw_data *ha = vha->hw;
1997 2023
1998 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2024 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
1999 if (pmap == NULL) { 2025 if (pmap == NULL) {
2000 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", 2026 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
2001 __func__, ha->host_no)); 2027 __func__, vha->host_no));
2002 return QLA_MEMORY_ALLOC_FAILED; 2028 return QLA_MEMORY_ALLOC_FAILED;
2003 } 2029 }
2004 memset(pmap, 0, FCAL_MAP_SIZE); 2030 memset(pmap, 0, FCAL_MAP_SIZE);
@@ -2013,11 +2039,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2013 mcp->buf_size = FCAL_MAP_SIZE; 2039 mcp->buf_size = FCAL_MAP_SIZE;
2014 mcp->flags = MBX_DMA_IN; 2040 mcp->flags = MBX_DMA_IN;
2015 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2041 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2016 rval = qla2x00_mailbox_command(ha, mcp); 2042 rval = qla2x00_mailbox_command(vha, mcp);
2017 2043
2018 if (rval == QLA_SUCCESS) { 2044 if (rval == QLA_SUCCESS) {
2019 DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map " 2045 DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map "
2020 "size (%x)\n", __func__, ha->host_no, mcp->mb[0], 2046 "size (%x)\n", __func__, vha->host_no, mcp->mb[0],
2021 mcp->mb[1], (unsigned)pmap[0])); 2047 mcp->mb[1], (unsigned)pmap[0]));
2022 DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1)); 2048 DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1));
2023 2049
@@ -2028,9 +2054,9 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2028 2054
2029 if (rval != QLA_SUCCESS) { 2055 if (rval != QLA_SUCCESS) {
2030 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2056 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2031 ha->host_no, rval)); 2057 vha->host_no, rval));
2032 } else { 2058 } else {
2033 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2059 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2034 } 2060 }
2035 2061
2036 return rval; 2062 return rval;
@@ -2051,15 +2077,16 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2051 * BIT_1 = mailbox error. 2077 * BIT_1 = mailbox error.
2052 */ 2078 */
2053int 2079int
2054qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id, 2080qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2055 struct link_statistics *stats, dma_addr_t stats_dma) 2081 struct link_statistics *stats, dma_addr_t stats_dma)
2056{ 2082{
2057 int rval; 2083 int rval;
2058 mbx_cmd_t mc; 2084 mbx_cmd_t mc;
2059 mbx_cmd_t *mcp = &mc; 2085 mbx_cmd_t *mcp = &mc;
2060 uint32_t *siter, *diter, dwords; 2086 uint32_t *siter, *diter, dwords;
2087 struct qla_hw_data *ha = vha->hw;
2061 2088
2062 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2089 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2063 2090
2064 mcp->mb[0] = MBC_GET_LINK_STATUS; 2091 mcp->mb[0] = MBC_GET_LINK_STATUS;
2065 mcp->mb[2] = MSW(stats_dma); 2092 mcp->mb[2] = MSW(stats_dma);
@@ -2084,12 +2111,12 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2084 } 2111 }
2085 mcp->tov = MBX_TOV_SECONDS; 2112 mcp->tov = MBX_TOV_SECONDS;
2086 mcp->flags = IOCTL_CMD; 2113 mcp->flags = IOCTL_CMD;
2087 rval = qla2x00_mailbox_command(ha, mcp); 2114 rval = qla2x00_mailbox_command(vha, mcp);
2088 2115
2089 if (rval == QLA_SUCCESS) { 2116 if (rval == QLA_SUCCESS) {
2090 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2117 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2091 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2118 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2092 __func__, ha->host_no, mcp->mb[0])); 2119 __func__, vha->host_no, mcp->mb[0]));
2093 rval = QLA_FUNCTION_FAILED; 2120 rval = QLA_FUNCTION_FAILED;
2094 } else { 2121 } else {
2095 /* Copy over data -- firmware data is LE. */ 2122 /* Copy over data -- firmware data is LE. */
@@ -2101,14 +2128,14 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2101 } else { 2128 } else {
2102 /* Failed. */ 2129 /* Failed. */
2103 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2130 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2104 ha->host_no, rval)); 2131 vha->host_no, rval));
2105 } 2132 }
2106 2133
2107 return rval; 2134 return rval;
2108} 2135}
2109 2136
2110int 2137int
2111qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats, 2138qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2112 dma_addr_t stats_dma) 2139 dma_addr_t stats_dma)
2113{ 2140{
2114 int rval; 2141 int rval;
@@ -2116,7 +2143,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
2116 mbx_cmd_t *mcp = &mc; 2143 mbx_cmd_t *mcp = &mc;
2117 uint32_t *siter, *diter, dwords; 2144 uint32_t *siter, *diter, dwords;
2118 2145
2119 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2146 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2120 2147
2121 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2148 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2122 mcp->mb[2] = MSW(stats_dma); 2149 mcp->mb[2] = MSW(stats_dma);
@@ -2124,18 +2151,18 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
2124 mcp->mb[6] = MSW(MSD(stats_dma)); 2151 mcp->mb[6] = MSW(MSD(stats_dma));
2125 mcp->mb[7] = LSW(MSD(stats_dma)); 2152 mcp->mb[7] = LSW(MSD(stats_dma));
2126 mcp->mb[8] = sizeof(struct link_statistics) / 4; 2153 mcp->mb[8] = sizeof(struct link_statistics) / 4;
2127 mcp->mb[9] = ha->vp_idx; 2154 mcp->mb[9] = vha->vp_idx;
2128 mcp->mb[10] = 0; 2155 mcp->mb[10] = 0;
2129 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2156 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2130 mcp->in_mb = MBX_2|MBX_1|MBX_0; 2157 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2131 mcp->tov = MBX_TOV_SECONDS; 2158 mcp->tov = MBX_TOV_SECONDS;
2132 mcp->flags = IOCTL_CMD; 2159 mcp->flags = IOCTL_CMD;
2133 rval = qla2x00_mailbox_command(ha, mcp); 2160 rval = qla2x00_mailbox_command(vha, mcp);
2134 2161
2135 if (rval == QLA_SUCCESS) { 2162 if (rval == QLA_SUCCESS) {
2136 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2163 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2137 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2164 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2138 __func__, ha->host_no, mcp->mb[0])); 2165 __func__, vha->host_no, mcp->mb[0]));
2139 rval = QLA_FUNCTION_FAILED; 2166 rval = QLA_FUNCTION_FAILED;
2140 } else { 2167 } else {
2141 /* Copy over data -- firmware data is LE. */ 2168 /* Copy over data -- firmware data is LE. */
@@ -2147,14 +2174,14 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
2147 } else { 2174 } else {
2148 /* Failed. */ 2175 /* Failed. */
2149 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2176 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2150 ha->host_no, rval)); 2177 vha->host_no, rval));
2151 } 2178 }
2152 2179
2153 return rval; 2180 return rval;
2154} 2181}
2155 2182
2156int 2183int
2157qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp) 2184qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
2158{ 2185{
2159 int rval; 2186 int rval;
2160 fc_port_t *fcport; 2187 fc_port_t *fcport;
@@ -2163,18 +2190,18 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2163 struct abort_entry_24xx *abt; 2190 struct abort_entry_24xx *abt;
2164 dma_addr_t abt_dma; 2191 dma_addr_t abt_dma;
2165 uint32_t handle; 2192 uint32_t handle;
2166 scsi_qla_host_t *pha = to_qla_parent(ha); 2193 struct qla_hw_data *ha = vha->hw;
2167 2194
2168 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2195 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2169 2196
2170 fcport = sp->fcport; 2197 fcport = sp->fcport;
2171 2198
2172 spin_lock_irqsave(&pha->hardware_lock, flags); 2199 spin_lock_irqsave(&ha->hardware_lock, flags);
2173 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2200 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2174 if (pha->outstanding_cmds[handle] == sp) 2201 if (req->outstanding_cmds[handle] == sp)
2175 break; 2202 break;
2176 } 2203 }
2177 spin_unlock_irqrestore(&pha->hardware_lock, flags); 2204 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2178 if (handle == MAX_OUTSTANDING_COMMANDS) { 2205 if (handle == MAX_OUTSTANDING_COMMANDS) {
2179 /* Command not found. */ 2206 /* Command not found. */
2180 return QLA_FUNCTION_FAILED; 2207 return QLA_FUNCTION_FAILED;
@@ -2183,7 +2210,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2183 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 2210 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2184 if (abt == NULL) { 2211 if (abt == NULL) {
2185 DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n", 2212 DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n",
2186 __func__, ha->host_no)); 2213 __func__, vha->host_no));
2187 return QLA_MEMORY_ALLOC_FAILED; 2214 return QLA_MEMORY_ALLOC_FAILED;
2188 } 2215 }
2189 memset(abt, 0, sizeof(struct abort_entry_24xx)); 2216 memset(abt, 0, sizeof(struct abort_entry_24xx));
@@ -2196,22 +2223,25 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2196 abt->port_id[1] = fcport->d_id.b.area; 2223 abt->port_id[1] = fcport->d_id.b.area;
2197 abt->port_id[2] = fcport->d_id.b.domain; 2224 abt->port_id[2] = fcport->d_id.b.domain;
2198 abt->vp_index = fcport->vp_idx; 2225 abt->vp_index = fcport->vp_idx;
2199 rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0); 2226
2227 abt->req_que_no = cpu_to_le16(req->id);
2228
2229 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2200 if (rval != QLA_SUCCESS) { 2230 if (rval != QLA_SUCCESS) {
2201 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", 2231 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
2202 __func__, ha->host_no, rval)); 2232 __func__, vha->host_no, rval));
2203 } else if (abt->entry_status != 0) { 2233 } else if (abt->entry_status != 0) {
2204 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2234 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2205 "-- error status (%x).\n", __func__, ha->host_no, 2235 "-- error status (%x).\n", __func__, vha->host_no,
2206 abt->entry_status)); 2236 abt->entry_status));
2207 rval = QLA_FUNCTION_FAILED; 2237 rval = QLA_FUNCTION_FAILED;
2208 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { 2238 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2209 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2239 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2210 "-- completion status (%x).\n", __func__, ha->host_no, 2240 "-- completion status (%x).\n", __func__, vha->host_no,
2211 le16_to_cpu(abt->nport_handle))); 2241 le16_to_cpu(abt->nport_handle)));
2212 rval = QLA_FUNCTION_FAILED; 2242 rval = QLA_FUNCTION_FAILED;
2213 } else { 2243 } else {
2214 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2244 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2215 } 2245 }
2216 2246
2217 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2247 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2233,16 +2263,21 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2233 int rval, rval2; 2263 int rval, rval2;
2234 struct tsk_mgmt_cmd *tsk; 2264 struct tsk_mgmt_cmd *tsk;
2235 dma_addr_t tsk_dma; 2265 dma_addr_t tsk_dma;
2236 scsi_qla_host_t *ha, *pha; 2266 scsi_qla_host_t *vha;
2267 struct qla_hw_data *ha;
2268 struct req_que *req;
2269 struct rsp_que *rsp;
2237 2270
2238 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); 2271 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
2239 2272
2240 ha = fcport->ha; 2273 vha = fcport->vha;
2241 pha = to_qla_parent(ha); 2274 ha = vha->hw;
2242 tsk = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2275 req = ha->req_q_map[0];
2276 rsp = ha->rsp_q_map[0];
2277 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2243 if (tsk == NULL) { 2278 if (tsk == NULL) {
2244 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2279 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
2245 "IOCB.\n", __func__, ha->host_no)); 2280 "IOCB.\n", __func__, vha->host_no));
2246 return QLA_MEMORY_ALLOC_FAILED; 2281 return QLA_MEMORY_ALLOC_FAILED;
2247 } 2282 }
2248 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); 2283 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
@@ -2262,34 +2297,34 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2262 sizeof(tsk->p.tsk.lun)); 2297 sizeof(tsk->p.tsk.lun));
2263 } 2298 }
2264 2299
2265 rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0); 2300 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2266 if (rval != QLA_SUCCESS) { 2301 if (rval != QLA_SUCCESS) {
2267 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " 2302 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB "
2268 "(%x).\n", __func__, ha->host_no, name, rval)); 2303 "(%x).\n", __func__, vha->host_no, name, rval));
2269 } else if (tsk->p.sts.entry_status != 0) { 2304 } else if (tsk->p.sts.entry_status != 0) {
2270 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2305 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2271 "-- error status (%x).\n", __func__, ha->host_no, 2306 "-- error status (%x).\n", __func__, vha->host_no,
2272 tsk->p.sts.entry_status)); 2307 tsk->p.sts.entry_status));
2273 rval = QLA_FUNCTION_FAILED; 2308 rval = QLA_FUNCTION_FAILED;
2274 } else if (tsk->p.sts.comp_status != 2309 } else if (tsk->p.sts.comp_status !=
2275 __constant_cpu_to_le16(CS_COMPLETE)) { 2310 __constant_cpu_to_le16(CS_COMPLETE)) {
2276 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2311 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2277 "-- completion status (%x).\n", __func__, 2312 "-- completion status (%x).\n", __func__,
2278 ha->host_no, le16_to_cpu(tsk->p.sts.comp_status))); 2313 vha->host_no, le16_to_cpu(tsk->p.sts.comp_status)));
2279 rval = QLA_FUNCTION_FAILED; 2314 rval = QLA_FUNCTION_FAILED;
2280 } 2315 }
2281 2316
2282 /* Issue marker IOCB. */ 2317 /* Issue marker IOCB. */
2283 rval2 = qla2x00_marker(ha, fcport->loop_id, l, 2318 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2284 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); 2319 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2285 if (rval2 != QLA_SUCCESS) { 2320 if (rval2 != QLA_SUCCESS) {
2286 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 2321 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
2287 "(%x).\n", __func__, ha->host_no, rval2)); 2322 "(%x).\n", __func__, vha->host_no, rval2));
2288 } else { 2323 } else {
2289 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2324 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2290 } 2325 }
2291 2326
2292 dma_pool_free(pha->s_dma_pool, tsk, tsk_dma); 2327 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
2293 2328
2294 return rval; 2329 return rval;
2295} 2330}
@@ -2307,29 +2342,30 @@ qla24xx_lun_reset(struct fc_port *fcport, unsigned int l)
2307} 2342}
2308 2343
2309int 2344int
2310qla2x00_system_error(scsi_qla_host_t *ha) 2345qla2x00_system_error(scsi_qla_host_t *vha)
2311{ 2346{
2312 int rval; 2347 int rval;
2313 mbx_cmd_t mc; 2348 mbx_cmd_t mc;
2314 mbx_cmd_t *mcp = &mc; 2349 mbx_cmd_t *mcp = &mc;
2350 struct qla_hw_data *ha = vha->hw;
2315 2351
2316 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 2352 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2317 return QLA_FUNCTION_FAILED; 2353 return QLA_FUNCTION_FAILED;
2318 2354
2319 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2355 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2320 2356
2321 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 2357 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2322 mcp->out_mb = MBX_0; 2358 mcp->out_mb = MBX_0;
2323 mcp->in_mb = MBX_0; 2359 mcp->in_mb = MBX_0;
2324 mcp->tov = 5; 2360 mcp->tov = 5;
2325 mcp->flags = 0; 2361 mcp->flags = 0;
2326 rval = qla2x00_mailbox_command(ha, mcp); 2362 rval = qla2x00_mailbox_command(vha, mcp);
2327 2363
2328 if (rval != QLA_SUCCESS) { 2364 if (rval != QLA_SUCCESS) {
2329 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2365 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2330 ha->host_no, rval)); 2366 vha->host_no, rval));
2331 } else { 2367 } else {
2332 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2368 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2333 } 2369 }
2334 2370
2335 return rval; 2371 return rval;
@@ -2342,14 +2378,14 @@ qla2x00_system_error(scsi_qla_host_t *ha)
2342 * Returns 2378 * Returns
2343 */ 2379 */
2344int 2380int
2345qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g, 2381qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2346 uint16_t sw_em_2g, uint16_t sw_em_4g) 2382 uint16_t sw_em_2g, uint16_t sw_em_4g)
2347{ 2383{
2348 int rval; 2384 int rval;
2349 mbx_cmd_t mc; 2385 mbx_cmd_t mc;
2350 mbx_cmd_t *mcp = &mc; 2386 mbx_cmd_t *mcp = &mc;
2351 2387
2352 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2388 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2353 2389
2354 mcp->mb[0] = MBC_SERDES_PARAMS; 2390 mcp->mb[0] = MBC_SERDES_PARAMS;
2355 mcp->mb[1] = BIT_0; 2391 mcp->mb[1] = BIT_0;
@@ -2360,61 +2396,61 @@ qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g,
2360 mcp->in_mb = MBX_0; 2396 mcp->in_mb = MBX_0;
2361 mcp->tov = MBX_TOV_SECONDS; 2397 mcp->tov = MBX_TOV_SECONDS;
2362 mcp->flags = 0; 2398 mcp->flags = 0;
2363 rval = qla2x00_mailbox_command(ha, mcp); 2399 rval = qla2x00_mailbox_command(vha, mcp);
2364 2400
2365 if (rval != QLA_SUCCESS) { 2401 if (rval != QLA_SUCCESS) {
2366 /*EMPTY*/ 2402 /*EMPTY*/
2367 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 2403 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2368 ha->host_no, rval, mcp->mb[0])); 2404 vha->host_no, rval, mcp->mb[0]));
2369 } else { 2405 } else {
2370 /*EMPTY*/ 2406 /*EMPTY*/
2371 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2407 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2372 } 2408 }
2373 2409
2374 return rval; 2410 return rval;
2375} 2411}
2376 2412
2377int 2413int
2378qla2x00_stop_firmware(scsi_qla_host_t *ha) 2414qla2x00_stop_firmware(scsi_qla_host_t *vha)
2379{ 2415{
2380 int rval; 2416 int rval;
2381 mbx_cmd_t mc; 2417 mbx_cmd_t mc;
2382 mbx_cmd_t *mcp = &mc; 2418 mbx_cmd_t *mcp = &mc;
2383 2419
2384 if (!IS_FWI2_CAPABLE(ha)) 2420 if (!IS_FWI2_CAPABLE(vha->hw))
2385 return QLA_FUNCTION_FAILED; 2421 return QLA_FUNCTION_FAILED;
2386 2422
2387 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2423 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2388 2424
2389 mcp->mb[0] = MBC_STOP_FIRMWARE; 2425 mcp->mb[0] = MBC_STOP_FIRMWARE;
2390 mcp->out_mb = MBX_0; 2426 mcp->out_mb = MBX_0;
2391 mcp->in_mb = MBX_0; 2427 mcp->in_mb = MBX_0;
2392 mcp->tov = 5; 2428 mcp->tov = 5;
2393 mcp->flags = 0; 2429 mcp->flags = 0;
2394 rval = qla2x00_mailbox_command(ha, mcp); 2430 rval = qla2x00_mailbox_command(vha, mcp);
2395 2431
2396 if (rval != QLA_SUCCESS) { 2432 if (rval != QLA_SUCCESS) {
2397 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2433 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2398 ha->host_no, rval)); 2434 vha->host_no, rval));
2399 } else { 2435 } else {
2400 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2436 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2401 } 2437 }
2402 2438
2403 return rval; 2439 return rval;
2404} 2440}
2405 2441
2406int 2442int
2407qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma, 2443qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2408 uint16_t buffers) 2444 uint16_t buffers)
2409{ 2445{
2410 int rval; 2446 int rval;
2411 mbx_cmd_t mc; 2447 mbx_cmd_t mc;
2412 mbx_cmd_t *mcp = &mc; 2448 mbx_cmd_t *mcp = &mc;
2413 2449
2414 if (!IS_FWI2_CAPABLE(ha)) 2450 if (!IS_FWI2_CAPABLE(vha->hw))
2415 return QLA_FUNCTION_FAILED; 2451 return QLA_FUNCTION_FAILED;
2416 2452
2417 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2453 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2418 2454
2419 mcp->mb[0] = MBC_TRACE_CONTROL; 2455 mcp->mb[0] = MBC_TRACE_CONTROL;
2420 mcp->mb[1] = TC_EFT_ENABLE; 2456 mcp->mb[1] = TC_EFT_ENABLE;
@@ -2428,28 +2464,28 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma,
2428 mcp->in_mb = MBX_1|MBX_0; 2464 mcp->in_mb = MBX_1|MBX_0;
2429 mcp->tov = MBX_TOV_SECONDS; 2465 mcp->tov = MBX_TOV_SECONDS;
2430 mcp->flags = 0; 2466 mcp->flags = 0;
2431 rval = qla2x00_mailbox_command(ha, mcp); 2467 rval = qla2x00_mailbox_command(vha, mcp);
2432 if (rval != QLA_SUCCESS) { 2468 if (rval != QLA_SUCCESS) {
2433 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2469 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2434 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2470 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2435 } else { 2471 } else {
2436 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2472 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2437 } 2473 }
2438 2474
2439 return rval; 2475 return rval;
2440} 2476}
2441 2477
2442int 2478int
2443qla2x00_disable_eft_trace(scsi_qla_host_t *ha) 2479qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2444{ 2480{
2445 int rval; 2481 int rval;
2446 mbx_cmd_t mc; 2482 mbx_cmd_t mc;
2447 mbx_cmd_t *mcp = &mc; 2483 mbx_cmd_t *mcp = &mc;
2448 2484
2449 if (!IS_FWI2_CAPABLE(ha)) 2485 if (!IS_FWI2_CAPABLE(vha->hw))
2450 return QLA_FUNCTION_FAILED; 2486 return QLA_FUNCTION_FAILED;
2451 2487
2452 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2488 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2453 2489
2454 mcp->mb[0] = MBC_TRACE_CONTROL; 2490 mcp->mb[0] = MBC_TRACE_CONTROL;
2455 mcp->mb[1] = TC_EFT_DISABLE; 2491 mcp->mb[1] = TC_EFT_DISABLE;
@@ -2457,29 +2493,29 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *ha)
2457 mcp->in_mb = MBX_1|MBX_0; 2493 mcp->in_mb = MBX_1|MBX_0;
2458 mcp->tov = MBX_TOV_SECONDS; 2494 mcp->tov = MBX_TOV_SECONDS;
2459 mcp->flags = 0; 2495 mcp->flags = 0;
2460 rval = qla2x00_mailbox_command(ha, mcp); 2496 rval = qla2x00_mailbox_command(vha, mcp);
2461 if (rval != QLA_SUCCESS) { 2497 if (rval != QLA_SUCCESS) {
2462 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2498 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2463 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2499 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2464 } else { 2500 } else {
2465 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2501 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2466 } 2502 }
2467 2503
2468 return rval; 2504 return rval;
2469} 2505}
2470 2506
2471int 2507int
2472qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma, 2508qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2473 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 2509 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
2474{ 2510{
2475 int rval; 2511 int rval;
2476 mbx_cmd_t mc; 2512 mbx_cmd_t mc;
2477 mbx_cmd_t *mcp = &mc; 2513 mbx_cmd_t *mcp = &mc;
2478 2514
2479 if (!IS_QLA25XX(ha)) 2515 if (!IS_QLA25XX(vha->hw))
2480 return QLA_FUNCTION_FAILED; 2516 return QLA_FUNCTION_FAILED;
2481 2517
2482 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2518 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2483 2519
2484 mcp->mb[0] = MBC_TRACE_CONTROL; 2520 mcp->mb[0] = MBC_TRACE_CONTROL;
2485 mcp->mb[1] = TC_FCE_ENABLE; 2521 mcp->mb[1] = TC_FCE_ENABLE;
@@ -2497,12 +2533,12 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
2497 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2533 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2498 mcp->tov = MBX_TOV_SECONDS; 2534 mcp->tov = MBX_TOV_SECONDS;
2499 mcp->flags = 0; 2535 mcp->flags = 0;
2500 rval = qla2x00_mailbox_command(ha, mcp); 2536 rval = qla2x00_mailbox_command(vha, mcp);
2501 if (rval != QLA_SUCCESS) { 2537 if (rval != QLA_SUCCESS) {
2502 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2538 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2503 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2539 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2504 } else { 2540 } else {
2505 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2541 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2506 2542
2507 if (mb) 2543 if (mb)
2508 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 2544 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2514,16 +2550,16 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
2514} 2550}
2515 2551
2516int 2552int
2517qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd) 2553qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2518{ 2554{
2519 int rval; 2555 int rval;
2520 mbx_cmd_t mc; 2556 mbx_cmd_t mc;
2521 mbx_cmd_t *mcp = &mc; 2557 mbx_cmd_t *mcp = &mc;
2522 2558
2523 if (!IS_FWI2_CAPABLE(ha)) 2559 if (!IS_FWI2_CAPABLE(vha->hw))
2524 return QLA_FUNCTION_FAILED; 2560 return QLA_FUNCTION_FAILED;
2525 2561
2526 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2562 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2527 2563
2528 mcp->mb[0] = MBC_TRACE_CONTROL; 2564 mcp->mb[0] = MBC_TRACE_CONTROL;
2529 mcp->mb[1] = TC_FCE_DISABLE; 2565 mcp->mb[1] = TC_FCE_DISABLE;
@@ -2533,12 +2569,12 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
2533 MBX_1|MBX_0; 2569 MBX_1|MBX_0;
2534 mcp->tov = MBX_TOV_SECONDS; 2570 mcp->tov = MBX_TOV_SECONDS;
2535 mcp->flags = 0; 2571 mcp->flags = 0;
2536 rval = qla2x00_mailbox_command(ha, mcp); 2572 rval = qla2x00_mailbox_command(vha, mcp);
2537 if (rval != QLA_SUCCESS) { 2573 if (rval != QLA_SUCCESS) {
2538 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2574 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2539 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2575 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2540 } else { 2576 } else {
2541 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2577 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2542 2578
2543 if (wr) 2579 if (wr)
2544 *wr = (uint64_t) mcp->mb[5] << 48 | 2580 *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2556,17 +2592,17 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
2556} 2592}
2557 2593
2558int 2594int
2559qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr, 2595qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
2560 uint16_t off, uint16_t count) 2596 uint16_t off, uint16_t count)
2561{ 2597{
2562 int rval; 2598 int rval;
2563 mbx_cmd_t mc; 2599 mbx_cmd_t mc;
2564 mbx_cmd_t *mcp = &mc; 2600 mbx_cmd_t *mcp = &mc;
2565 2601
2566 if (!IS_FWI2_CAPABLE(ha)) 2602 if (!IS_FWI2_CAPABLE(vha->hw))
2567 return QLA_FUNCTION_FAILED; 2603 return QLA_FUNCTION_FAILED;
2568 2604
2569 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2605 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2570 2606
2571 mcp->mb[0] = MBC_READ_SFP; 2607 mcp->mb[0] = MBC_READ_SFP;
2572 mcp->mb[1] = addr; 2608 mcp->mb[1] = addr;
@@ -2581,30 +2617,30 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
2581 mcp->in_mb = MBX_0; 2617 mcp->in_mb = MBX_0;
2582 mcp->tov = MBX_TOV_SECONDS; 2618 mcp->tov = MBX_TOV_SECONDS;
2583 mcp->flags = 0; 2619 mcp->flags = 0;
2584 rval = qla2x00_mailbox_command(ha, mcp); 2620 rval = qla2x00_mailbox_command(vha, mcp);
2585 2621
2586 if (rval != QLA_SUCCESS) { 2622 if (rval != QLA_SUCCESS) {
2587 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 2623 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2588 ha->host_no, rval, mcp->mb[0])); 2624 vha->host_no, rval, mcp->mb[0]));
2589 } else { 2625 } else {
2590 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2626 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2591 } 2627 }
2592 2628
2593 return rval; 2629 return rval;
2594} 2630}
2595 2631
2596int 2632int
2597qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id, 2633qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2598 uint16_t port_speed, uint16_t *mb) 2634 uint16_t port_speed, uint16_t *mb)
2599{ 2635{
2600 int rval; 2636 int rval;
2601 mbx_cmd_t mc; 2637 mbx_cmd_t mc;
2602 mbx_cmd_t *mcp = &mc; 2638 mbx_cmd_t *mcp = &mc;
2603 2639
2604 if (!IS_IIDMA_CAPABLE(ha)) 2640 if (!IS_IIDMA_CAPABLE(vha->hw))
2605 return QLA_FUNCTION_FAILED; 2641 return QLA_FUNCTION_FAILED;
2606 2642
2607 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2643 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2608 2644
2609 mcp->mb[0] = MBC_PORT_PARAMS; 2645 mcp->mb[0] = MBC_PORT_PARAMS;
2610 mcp->mb[1] = loop_id; 2646 mcp->mb[1] = loop_id;
@@ -2615,7 +2651,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2615 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 2651 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
2616 mcp->tov = MBX_TOV_SECONDS; 2652 mcp->tov = MBX_TOV_SECONDS;
2617 mcp->flags = 0; 2653 mcp->flags = 0;
2618 rval = qla2x00_mailbox_command(ha, mcp); 2654 rval = qla2x00_mailbox_command(vha, mcp);
2619 2655
2620 /* Return mailbox statuses. */ 2656 /* Return mailbox statuses. */
2621 if (mb != NULL) { 2657 if (mb != NULL) {
@@ -2628,28 +2664,29 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2628 2664
2629 if (rval != QLA_SUCCESS) { 2665 if (rval != QLA_SUCCESS) {
2630 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2666 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2631 ha->host_no, rval)); 2667 vha->host_no, rval));
2632 } else { 2668 } else {
2633 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2669 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2634 } 2670 }
2635 2671
2636 return rval; 2672 return rval;
2637} 2673}
2638 2674
2639void 2675void
2640qla24xx_report_id_acquisition(scsi_qla_host_t *ha, 2676qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2641 struct vp_rpt_id_entry_24xx *rptid_entry) 2677 struct vp_rpt_id_entry_24xx *rptid_entry)
2642{ 2678{
2643 uint8_t vp_idx; 2679 uint8_t vp_idx;
2644 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); 2680 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2645 scsi_qla_host_t *vha; 2681 struct qla_hw_data *ha = vha->hw;
2682 scsi_qla_host_t *vp;
2646 2683
2647 if (rptid_entry->entry_status != 0) 2684 if (rptid_entry->entry_status != 0)
2648 return; 2685 return;
2649 2686
2650 if (rptid_entry->format == 0) { 2687 if (rptid_entry->format == 0) {
2651 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," 2688 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
2652 " number of VPs acquired %d\n", __func__, ha->host_no, 2689 " number of VPs acquired %d\n", __func__, vha->host_no,
2653 MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count))); 2690 MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count)));
2654 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__, 2691 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
2655 rptid_entry->port_id[2], rptid_entry->port_id[1], 2692 rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2658,7 +2695,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2658 vp_idx = LSB(stat); 2695 vp_idx = LSB(stat);
2659 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled " 2696 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
2660 "- status %d - " 2697 "- status %d - "
2661 "with port id %02x%02x%02x\n",__func__,ha->host_no, 2698 "with port id %02x%02x%02x\n", __func__, vha->host_no,
2662 vp_idx, MSB(stat), 2699 vp_idx, MSB(stat),
2663 rptid_entry->port_id[2], rptid_entry->port_id[1], 2700 rptid_entry->port_id[2], rptid_entry->port_id[1],
2664 rptid_entry->port_id[0])); 2701 rptid_entry->port_id[0]));
@@ -2668,25 +2705,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2668 if (MSB(stat) == 1) 2705 if (MSB(stat) == 1)
2669 return; 2706 return;
2670 2707
2671 list_for_each_entry(vha, &ha->vp_list, vp_list) 2708 list_for_each_entry(vp, &ha->vp_list, list)
2672 if (vp_idx == vha->vp_idx) 2709 if (vp_idx == vp->vp_idx)
2673 break; 2710 break;
2674 2711 if (!vp)
2675 if (!vha)
2676 return; 2712 return;
2677 2713
2678 vha->d_id.b.domain = rptid_entry->port_id[2]; 2714 vp->d_id.b.domain = rptid_entry->port_id[2];
2679 vha->d_id.b.area = rptid_entry->port_id[1]; 2715 vp->d_id.b.area = rptid_entry->port_id[1];
2680 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 2716 vp->d_id.b.al_pa = rptid_entry->port_id[0];
2681 2717
2682 /* 2718 /*
2683 * Cannot configure here as we are still sitting on the 2719 * Cannot configure here as we are still sitting on the
2684 * response queue. Handle it in dpc context. 2720 * response queue. Handle it in dpc context.
2685 */ 2721 */
2686 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); 2722 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
2687 set_bit(VP_DPC_NEEDED, &ha->dpc_flags); 2723 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
2688 2724
2689 qla2xxx_wake_dpc(ha); 2725 qla2xxx_wake_dpc(vha);
2690 } 2726 }
2691} 2727}
2692 2728
@@ -2709,15 +2745,15 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2709 int rval; 2745 int rval;
2710 struct vp_config_entry_24xx *vpmod; 2746 struct vp_config_entry_24xx *vpmod;
2711 dma_addr_t vpmod_dma; 2747 dma_addr_t vpmod_dma;
2712 scsi_qla_host_t *pha; 2748 struct qla_hw_data *ha = vha->hw;
2749 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2713 2750
2714 /* This can be called by the parent */ 2751 /* This can be called by the parent */
2715 pha = to_qla_parent(vha);
2716 2752
2717 vpmod = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 2753 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
2718 if (!vpmod) { 2754 if (!vpmod) {
2719 DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP " 2755 DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP "
2720 "IOCB.\n", __func__, pha->host_no)); 2756 "IOCB.\n", __func__, vha->host_no));
2721 return QLA_MEMORY_ALLOC_FAILED; 2757 return QLA_MEMORY_ALLOC_FAILED;
2722 } 2758 }
2723 2759
@@ -2732,26 +2768,27 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2732 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 2768 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
2733 vpmod->entry_count = 1; 2769 vpmod->entry_count = 1;
2734 2770
2735 rval = qla2x00_issue_iocb(pha, vpmod, vpmod_dma, 0); 2771 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
2736 if (rval != QLA_SUCCESS) { 2772 if (rval != QLA_SUCCESS) {
2737 DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB" 2773 DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB"
2738 "(%x).\n", __func__, pha->host_no, rval)); 2774 "(%x).\n", __func__, base_vha->host_no, rval));
2739 } else if (vpmod->comp_status != 0) { 2775 } else if (vpmod->comp_status != 0) {
2740 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2776 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2741 "-- error status (%x).\n", __func__, pha->host_no, 2777 "-- error status (%x).\n", __func__, base_vha->host_no,
2742 vpmod->comp_status)); 2778 vpmod->comp_status));
2743 rval = QLA_FUNCTION_FAILED; 2779 rval = QLA_FUNCTION_FAILED;
2744 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 2780 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2745 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2781 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2746 "-- completion status (%x).\n", __func__, pha->host_no, 2782 "-- completion status (%x).\n", __func__, base_vha->host_no,
2747 le16_to_cpu(vpmod->comp_status))); 2783 le16_to_cpu(vpmod->comp_status)));
2748 rval = QLA_FUNCTION_FAILED; 2784 rval = QLA_FUNCTION_FAILED;
2749 } else { 2785 } else {
2750 /* EMPTY */ 2786 /* EMPTY */
2751 DEBUG11(printk("%s(%ld): done.\n", __func__, pha->host_no)); 2787 DEBUG11(printk("%s(%ld): done.\n", __func__,
2788 base_vha->host_no));
2752 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 2789 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
2753 } 2790 }
2754 dma_pool_free(pha->s_dma_pool, vpmod, vpmod_dma); 2791 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
2755 2792
2756 return rval; 2793 return rval;
2757} 2794}
@@ -2778,11 +2815,12 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2778 int map, pos; 2815 int map, pos;
2779 struct vp_ctrl_entry_24xx *vce; 2816 struct vp_ctrl_entry_24xx *vce;
2780 dma_addr_t vce_dma; 2817 dma_addr_t vce_dma;
2781 scsi_qla_host_t *ha = vha->parent; 2818 struct qla_hw_data *ha = vha->hw;
2782 int vp_index = vha->vp_idx; 2819 int vp_index = vha->vp_idx;
2820 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2783 2821
2784 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__, 2822 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
2785 ha->host_no, vp_index)); 2823 vha->host_no, vp_index));
2786 2824
2787 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 2825 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
2788 return QLA_PARAMETER_ERROR; 2826 return QLA_PARAMETER_ERROR;
@@ -2791,7 +2829,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2791 if (!vce) { 2829 if (!vce) {
2792 DEBUG2_3(printk("%s(%ld): " 2830 DEBUG2_3(printk("%s(%ld): "
2793 "failed to allocate VP Control IOCB.\n", __func__, 2831 "failed to allocate VP Control IOCB.\n", __func__,
2794 ha->host_no)); 2832 base_vha->host_no));
2795 return QLA_MEMORY_ALLOC_FAILED; 2833 return QLA_MEMORY_ALLOC_FAILED;
2796 } 2834 }
2797 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); 2835 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
@@ -2810,30 +2848,30 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2810 vce->vp_idx_map[map] |= 1 << pos; 2848 vce->vp_idx_map[map] |= 1 << pos;
2811 mutex_unlock(&ha->vport_lock); 2849 mutex_unlock(&ha->vport_lock);
2812 2850
2813 rval = qla2x00_issue_iocb(ha, vce, vce_dma, 0); 2851 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
2814 if (rval != QLA_SUCCESS) { 2852 if (rval != QLA_SUCCESS) {
2815 DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB" 2853 DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB"
2816 "(%x).\n", __func__, ha->host_no, rval)); 2854 "(%x).\n", __func__, base_vha->host_no, rval));
2817 printk("%s(%ld): failed to issue VP control IOCB" 2855 printk("%s(%ld): failed to issue VP control IOCB"
2818 "(%x).\n", __func__, ha->host_no, rval); 2856 "(%x).\n", __func__, base_vha->host_no, rval);
2819 } else if (vce->entry_status != 0) { 2857 } else if (vce->entry_status != 0) {
2820 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2858 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2821 "-- error status (%x).\n", __func__, ha->host_no, 2859 "-- error status (%x).\n", __func__, base_vha->host_no,
2822 vce->entry_status)); 2860 vce->entry_status));
2823 printk("%s(%ld): failed to complete IOCB " 2861 printk("%s(%ld): failed to complete IOCB "
2824 "-- error status (%x).\n", __func__, ha->host_no, 2862 "-- error status (%x).\n", __func__, base_vha->host_no,
2825 vce->entry_status); 2863 vce->entry_status);
2826 rval = QLA_FUNCTION_FAILED; 2864 rval = QLA_FUNCTION_FAILED;
2827 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 2865 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2828 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2866 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2829 "-- completion status (%x).\n", __func__, ha->host_no, 2867 "-- completion status (%x).\n", __func__, base_vha->host_no,
2830 le16_to_cpu(vce->comp_status))); 2868 le16_to_cpu(vce->comp_status)));
2831 printk("%s(%ld): failed to complete IOCB " 2869 printk("%s(%ld): failed to complete IOCB "
2832 "-- completion status (%x).\n", __func__, ha->host_no, 2870 "-- completion status (%x).\n", __func__, base_vha->host_no,
2833 le16_to_cpu(vce->comp_status)); 2871 le16_to_cpu(vce->comp_status));
2834 rval = QLA_FUNCTION_FAILED; 2872 rval = QLA_FUNCTION_FAILED;
2835 } else { 2873 } else {
2836 DEBUG2(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2874 DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no));
2837 } 2875 }
2838 2876
2839 dma_pool_free(ha->s_dma_pool, vce, vce_dma); 2877 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -2863,7 +2901,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2863 */ 2901 */
2864 2902
2865int 2903int
2866qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format, 2904qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
2867 uint16_t vp_idx) 2905 uint16_t vp_idx)
2868{ 2906{
2869 int rval; 2907 int rval;
@@ -2884,7 +2922,7 @@ qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
2884 mcp->in_mb = MBX_0|MBX_1; 2922 mcp->in_mb = MBX_0|MBX_1;
2885 mcp->tov = MBX_TOV_SECONDS; 2923 mcp->tov = MBX_TOV_SECONDS;
2886 mcp->flags = 0; 2924 mcp->flags = 0;
2887 rval = qla2x00_mailbox_command(ha, mcp); 2925 rval = qla2x00_mailbox_command(vha, mcp);
2888 2926
2889 if (rval == QLA_SUCCESS) { 2927 if (rval == QLA_SUCCESS) {
2890 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2928 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -2897,16 +2935,16 @@ qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
2897} 2935}
2898 2936
2899int 2937int
2900qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr, 2938qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
2901 uint32_t size) 2939 uint32_t size)
2902{ 2940{
2903 int rval; 2941 int rval;
2904 mbx_cmd_t mc; 2942 mbx_cmd_t mc;
2905 mbx_cmd_t *mcp = &mc; 2943 mbx_cmd_t *mcp = &mc;
2906 2944
2907 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2945 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2908 2946
2909 if (MSW(addr) || IS_FWI2_CAPABLE(ha)) { 2947 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
2910 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 2948 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
2911 mcp->mb[8] = MSW(addr); 2949 mcp->mb[8] = MSW(addr);
2912 mcp->out_mb = MBX_8|MBX_0; 2950 mcp->out_mb = MBX_8|MBX_0;
@@ -2920,7 +2958,7 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
2920 mcp->mb[6] = MSW(MSD(req_dma)); 2958 mcp->mb[6] = MSW(MSD(req_dma));
2921 mcp->mb[7] = LSW(MSD(req_dma)); 2959 mcp->mb[7] = LSW(MSD(req_dma));
2922 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 2960 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
2923 if (IS_FWI2_CAPABLE(ha)) { 2961 if (IS_FWI2_CAPABLE(vha->hw)) {
2924 mcp->mb[4] = MSW(size); 2962 mcp->mb[4] = MSW(size);
2925 mcp->mb[5] = LSW(size); 2963 mcp->mb[5] = LSW(size);
2926 mcp->out_mb |= MBX_5|MBX_4; 2964 mcp->out_mb |= MBX_5|MBX_4;
@@ -2932,13 +2970,13 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
2932 mcp->in_mb = MBX_0; 2970 mcp->in_mb = MBX_0;
2933 mcp->tov = MBX_TOV_SECONDS; 2971 mcp->tov = MBX_TOV_SECONDS;
2934 mcp->flags = 0; 2972 mcp->flags = 0;
2935 rval = qla2x00_mailbox_command(ha, mcp); 2973 rval = qla2x00_mailbox_command(vha, mcp);
2936 2974
2937 if (rval != QLA_SUCCESS) { 2975 if (rval != QLA_SUCCESS) {
2938 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 2976 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
2939 ha->host_no, rval, mcp->mb[0])); 2977 vha->host_no, rval, mcp->mb[0]));
2940 } else { 2978 } else {
2941 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2979 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2942 } 2980 }
2943 2981
2944 return rval; 2982 return rval;
@@ -2954,20 +2992,21 @@ struct cs84xx_mgmt_cmd {
2954}; 2992};
2955 2993
2956int 2994int
2957qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status) 2995qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
2958{ 2996{
2959 int rval, retry; 2997 int rval, retry;
2960 struct cs84xx_mgmt_cmd *mn; 2998 struct cs84xx_mgmt_cmd *mn;
2961 dma_addr_t mn_dma; 2999 dma_addr_t mn_dma;
2962 uint16_t options; 3000 uint16_t options;
2963 unsigned long flags; 3001 unsigned long flags;
3002 struct qla_hw_data *ha = vha->hw;
2964 3003
2965 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 3004 DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2966 3005
2967 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 3006 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
2968 if (mn == NULL) { 3007 if (mn == NULL) {
2969 DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX " 3008 DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
2970 "IOCB.\n", __func__, ha->host_no)); 3009 "IOCB.\n", __func__, vha->host_no));
2971 return QLA_MEMORY_ALLOC_FAILED; 3010 return QLA_MEMORY_ALLOC_FAILED;
2972 } 3011 }
2973 3012
@@ -2986,19 +3025,19 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
2986 mn->p.req.options = cpu_to_le16(options); 3025 mn->p.req.options = cpu_to_le16(options);
2987 3026
2988 DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__, 3027 DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__,
2989 ha->host_no)); 3028 vha->host_no));
2990 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3029 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
2991 sizeof(*mn))); 3030 sizeof(*mn)));
2992 3031
2993 rval = qla2x00_issue_iocb_timeout(ha, mn, mn_dma, 0, 120); 3032 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
2994 if (rval != QLA_SUCCESS) { 3033 if (rval != QLA_SUCCESS) {
2995 DEBUG2_16(printk("%s(%ld): failed to issue Verify " 3034 DEBUG2_16(printk("%s(%ld): failed to issue Verify "
2996 "IOCB (%x).\n", __func__, ha->host_no, rval)); 3035 "IOCB (%x).\n", __func__, vha->host_no, rval));
2997 goto verify_done; 3036 goto verify_done;
2998 } 3037 }
2999 3038
3000 DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__, 3039 DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__,
3001 ha->host_no)); 3040 vha->host_no));
3002 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3041 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
3003 sizeof(*mn))); 3042 sizeof(*mn)));
3004 3043
@@ -3006,21 +3045,21 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
3006 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 3045 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3007 le16_to_cpu(mn->p.rsp.failure_code) : 0; 3046 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3008 DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__, 3047 DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__,
3009 ha->host_no, status[0], status[1])); 3048 vha->host_no, status[0], status[1]));
3010 3049
3011 if (status[0] != CS_COMPLETE) { 3050 if (status[0] != CS_COMPLETE) {
3012 rval = QLA_FUNCTION_FAILED; 3051 rval = QLA_FUNCTION_FAILED;
3013 if (!(options & VCO_DONT_UPDATE_FW)) { 3052 if (!(options & VCO_DONT_UPDATE_FW)) {
3014 DEBUG2_16(printk("%s(%ld): Firmware update " 3053 DEBUG2_16(printk("%s(%ld): Firmware update "
3015 "failed. Retrying without update " 3054 "failed. Retrying without update "
3016 "firmware.\n", __func__, ha->host_no)); 3055 "firmware.\n", __func__, vha->host_no));
3017 options |= VCO_DONT_UPDATE_FW; 3056 options |= VCO_DONT_UPDATE_FW;
3018 options &= ~VCO_FORCE_UPDATE; 3057 options &= ~VCO_FORCE_UPDATE;
3019 retry = 1; 3058 retry = 1;
3020 } 3059 }
3021 } else { 3060 } else {
3022 DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n", 3061 DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n",
3023 __func__, ha->host_no, 3062 __func__, vha->host_no,
3024 le32_to_cpu(mn->p.rsp.fw_ver))); 3063 le32_to_cpu(mn->p.rsp.fw_ver)));
3025 3064
3026 /* NOTE: we only update OP firmware. */ 3065 /* NOTE: we only update OP firmware. */
@@ -3037,10 +3076,115 @@ verify_done:
3037 3076
3038 if (rval != QLA_SUCCESS) { 3077 if (rval != QLA_SUCCESS) {
3039 DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__, 3078 DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__,
3040 ha->host_no, rval)); 3079 vha->host_no, rval));
3041 } else { 3080 } else {
3042 DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3081 DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no));
3082 }
3083
3084 return rval;
3085}
3086
3087int
3088qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req,
3089 uint8_t options)
3090{
3091 int rval;
3092 unsigned long flags;
3093 mbx_cmd_t mc;
3094 mbx_cmd_t *mcp = &mc;
3095 struct device_reg_25xxmq __iomem *reg;
3096 struct qla_hw_data *ha = vha->hw;
3097
3098 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3099 mcp->mb[1] = options;
3100 mcp->mb[2] = MSW(LSD(req->dma));
3101 mcp->mb[3] = LSW(LSD(req->dma));
3102 mcp->mb[6] = MSW(MSD(req->dma));
3103 mcp->mb[7] = LSW(MSD(req->dma));
3104 mcp->mb[5] = req->length;
3105 if (req->rsp)
3106 mcp->mb[10] = req->rsp->id;
3107 mcp->mb[12] = req->qos;
3108 mcp->mb[11] = req->vp_idx;
3109 mcp->mb[13] = req->rid;
3110
3111 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3112 QLA_QUE_PAGE * req->id);
3113
3114 mcp->mb[4] = req->id;
3115 /* que in ptr index */
3116 mcp->mb[8] = 0;
3117 /* que out ptr index */
3118 mcp->mb[9] = 0;
3119 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
3120 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3121 mcp->in_mb = MBX_0;
3122 mcp->flags = MBX_DMA_OUT;
3123 mcp->tov = 60;
3124
3125 spin_lock_irqsave(&ha->hardware_lock, flags);
3126 if (!(options & BIT_0)) {
3127 WRT_REG_DWORD(&reg->req_q_in, 0);
3128 WRT_REG_DWORD(&reg->req_q_out, 0);
3129 }
3130 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3131
3132 rval = qla2x00_mailbox_command(vha, mcp);
3133 if (rval != QLA_SUCCESS)
3134 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n",
3135 __func__, vha->host_no, rval, mcp->mb[0]));
3136 return rval;
3137}
3138
3139int
3140qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp,
3141 uint8_t options)
3142{
3143 int rval;
3144 unsigned long flags;
3145 mbx_cmd_t mc;
3146 mbx_cmd_t *mcp = &mc;
3147 struct device_reg_25xxmq __iomem *reg;
3148 struct qla_hw_data *ha = vha->hw;
3149
3150 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3151 mcp->mb[1] = options;
3152 mcp->mb[2] = MSW(LSD(rsp->dma));
3153 mcp->mb[3] = LSW(LSD(rsp->dma));
3154 mcp->mb[6] = MSW(MSD(rsp->dma));
3155 mcp->mb[7] = LSW(MSD(rsp->dma));
3156 mcp->mb[5] = rsp->length;
3157 mcp->mb[11] = rsp->vp_idx;
3158 mcp->mb[14] = rsp->msix->vector;
3159 mcp->mb[13] = rsp->rid;
3160
3161 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3162 QLA_QUE_PAGE * rsp->id);
3163
3164 mcp->mb[4] = rsp->id;
3165 /* que in ptr index */
3166 mcp->mb[8] = 0;
3167 /* que out ptr index */
3168 mcp->mb[9] = 0;
3169 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7
3170 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3171 mcp->in_mb = MBX_0;
3172 mcp->flags = MBX_DMA_OUT;
3173 mcp->tov = 60;
3174
3175 spin_lock_irqsave(&ha->hardware_lock, flags);
3176 if (!(options & BIT_0)) {
3177 WRT_REG_DWORD(&reg->rsp_q_out, 0);
3178 WRT_REG_DWORD(&reg->rsp_q_in, 0);
3043 } 3179 }
3044 3180
3181 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3182
3183 rval = qla2x00_mailbox_command(vha, mcp);
3184 if (rval != QLA_SUCCESS)
3185 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x "
3186 "mb0=%x.\n", __func__,
3187 vha->host_no, rval, mcp->mb[0]));
3045 return rval; 3188 return rval;
3046} 3189}
3190
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 93560cd72784..386ffeae5b5a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_gbl.h"
8 9
9#include <linux/moduleparam.h> 10#include <linux/moduleparam.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
@@ -18,7 +19,7 @@
18void 19void
19qla2x00_vp_stop_timer(scsi_qla_host_t *vha) 20qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
20{ 21{
21 if (vha->parent && vha->timer_active) { 22 if (vha->vp_idx && vha->timer_active) {
22 del_timer_sync(&vha->timer); 23 del_timer_sync(&vha->timer);
23 vha->timer_active = 0; 24 vha->timer_active = 0;
24 } 25 }
@@ -28,7 +29,7 @@ static uint32_t
28qla24xx_allocate_vp_id(scsi_qla_host_t *vha) 29qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
29{ 30{
30 uint32_t vp_id; 31 uint32_t vp_id;
31 scsi_qla_host_t *ha = vha->parent; 32 struct qla_hw_data *ha = vha->hw;
32 33
33 /* Find an empty slot and assign an vp_id */ 34 /* Find an empty slot and assign an vp_id */
34 mutex_lock(&ha->vport_lock); 35 mutex_lock(&ha->vport_lock);
@@ -44,7 +45,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
44 ha->num_vhosts++; 45 ha->num_vhosts++;
45 ha->cur_vport_count++; 46 ha->cur_vport_count++;
46 vha->vp_idx = vp_id; 47 vha->vp_idx = vp_id;
47 list_add_tail(&vha->vp_list, &ha->vp_list); 48 list_add_tail(&vha->list, &ha->vp_list);
48 mutex_unlock(&ha->vport_lock); 49 mutex_unlock(&ha->vport_lock);
49 return vp_id; 50 return vp_id;
50} 51}
@@ -53,24 +54,24 @@ void
53qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) 54qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
54{ 55{
55 uint16_t vp_id; 56 uint16_t vp_id;
56 scsi_qla_host_t *ha = vha->parent; 57 struct qla_hw_data *ha = vha->hw;
57 58
58 mutex_lock(&ha->vport_lock); 59 mutex_lock(&ha->vport_lock);
59 vp_id = vha->vp_idx; 60 vp_id = vha->vp_idx;
60 ha->num_vhosts--; 61 ha->num_vhosts--;
61 ha->cur_vport_count--; 62 ha->cur_vport_count--;
62 clear_bit(vp_id, ha->vp_idx_map); 63 clear_bit(vp_id, ha->vp_idx_map);
63 list_del(&vha->vp_list); 64 list_del(&vha->list);
64 mutex_unlock(&ha->vport_lock); 65 mutex_unlock(&ha->vport_lock);
65} 66}
66 67
67static scsi_qla_host_t * 68static scsi_qla_host_t *
68qla24xx_find_vhost_by_name(scsi_qla_host_t *ha, uint8_t *port_name) 69qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
69{ 70{
70 scsi_qla_host_t *vha; 71 scsi_qla_host_t *vha;
71 72
72 /* Locate matching device in database. */ 73 /* Locate matching device in database. */
73 list_for_each_entry(vha, &ha->vp_list, vp_list) { 74 list_for_each_entry(vha, &ha->vp_list, list) {
74 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) 75 if (!memcmp(port_name, vha->port_name, WWN_SIZE))
75 return vha; 76 return vha;
76 } 77 }
@@ -94,16 +95,13 @@ static void
94qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) 95qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
95{ 96{
96 fc_port_t *fcport; 97 fc_port_t *fcport;
97 scsi_qla_host_t *pha = to_qla_parent(vha);
98
99 list_for_each_entry(fcport, &pha->fcports, list) {
100 if (fcport->vp_idx != vha->vp_idx)
101 continue;
102 98
99 list_for_each_entry(fcport, &vha->vp_fcports, list) {
103 DEBUG15(printk("scsi(%ld): Marking port dead, " 100 DEBUG15(printk("scsi(%ld): Marking port dead, "
104 "loop_id=0x%04x :%x\n", 101 "loop_id=0x%04x :%x\n",
105 vha->host_no, fcport->loop_id, fcport->vp_idx)); 102 vha->host_no, fcport->loop_id, fcport->vp_idx));
106 103
104 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
107 qla2x00_mark_device_lost(vha, fcport, 0, 0); 105 qla2x00_mark_device_lost(vha, fcport, 0, 0);
108 atomic_set(&fcport->state, FCS_UNCONFIGURED); 106 atomic_set(&fcport->state, FCS_UNCONFIGURED);
109 } 107 }
@@ -118,7 +116,6 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
118 atomic_set(&vha->loop_state, LOOP_DOWN); 116 atomic_set(&vha->loop_state, LOOP_DOWN);
119 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 117 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
120 118
121 /* Delete all vp's fcports from parent's list */
122 qla2x00_mark_vp_devices_dead(vha); 119 qla2x00_mark_vp_devices_dead(vha);
123 atomic_set(&vha->vp_state, VP_FAILED); 120 atomic_set(&vha->vp_state, VP_FAILED);
124 vha->flags.management_server_logged_in = 0; 121 vha->flags.management_server_logged_in = 0;
@@ -135,11 +132,12 @@ int
135qla24xx_enable_vp(scsi_qla_host_t *vha) 132qla24xx_enable_vp(scsi_qla_host_t *vha)
136{ 133{
137 int ret; 134 int ret;
138 scsi_qla_host_t *ha = vha->parent; 135 struct qla_hw_data *ha = vha->hw;
136 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
139 137
140 /* Check if physical ha port is Up */ 138 /* Check if physical ha port is Up */
141 if (atomic_read(&ha->loop_state) == LOOP_DOWN || 139 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
142 atomic_read(&ha->loop_state) == LOOP_DEAD ) { 140 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
143 vha->vp_err_state = VP_ERR_PORTDWN; 141 vha->vp_err_state = VP_ERR_PORTDWN;
144 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); 142 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
145 goto enable_failed; 143 goto enable_failed;
@@ -177,8 +175,8 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
177 vha->host_no, __func__)); 175 vha->host_no, __func__));
178 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); 176 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
179 if (ret != QLA_SUCCESS) { 177 if (ret != QLA_SUCCESS) {
180 DEBUG15(qla_printk(KERN_ERR, vha, "Failed to enable receiving" 178 DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
181 " of RSCN requests: 0x%x\n", ret)); 179 "receiving of RSCN requests: 0x%x\n", ret));
182 return; 180 return;
183 } else { 181 } else {
184 /* Corresponds to SCR enabled */ 182 /* Corresponds to SCR enabled */
@@ -194,25 +192,14 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
194} 192}
195 193
196void 194void
197qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb) 195qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
198{ 196{
199 int i, vp_idx_matched;
200 scsi_qla_host_t *vha; 197 scsi_qla_host_t *vha;
198 struct qla_hw_data *ha = rsp->hw;
199 int i = 0;
201 200
202 if (ha->parent) 201 list_for_each_entry(vha, &ha->vp_list, list) {
203 return; 202 if (vha->vp_idx) {
204
205 for_each_mapped_vp_idx(ha, i) {
206 vp_idx_matched = 0;
207
208 list_for_each_entry(vha, &ha->vp_list, vp_list) {
209 if (i == vha->vp_idx) {
210 vp_idx_matched = 1;
211 break;
212 }
213 }
214
215 if (vp_idx_matched) {
216 switch (mb[0]) { 203 switch (mb[0]) {
217 case MBA_LIP_OCCURRED: 204 case MBA_LIP_OCCURRED:
218 case MBA_LOOP_UP: 205 case MBA_LOOP_UP:
@@ -223,16 +210,17 @@ qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
223 case MBA_PORT_UPDATE: 210 case MBA_PORT_UPDATE:
224 case MBA_RSCN_UPDATE: 211 case MBA_RSCN_UPDATE:
225 DEBUG15(printk("scsi(%ld)%s: Async_event for" 212 DEBUG15(printk("scsi(%ld)%s: Async_event for"
226 " VP[%d], mb = 0x%x, vha=%p\n", 213 " VP[%d], mb = 0x%x, vha=%p\n",
227 vha->host_no, __func__,i, *mb, vha)); 214 vha->host_no, __func__, i, *mb, vha));
228 qla2x00_async_event(vha, mb); 215 qla2x00_async_event(vha, rsp, mb);
229 break; 216 break;
230 } 217 }
231 } 218 }
219 i++;
232 } 220 }
233} 221}
234 222
235void 223int
236qla2x00_vp_abort_isp(scsi_qla_host_t *vha) 224qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
237{ 225{
238 /* 226 /*
@@ -247,38 +235,56 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
247 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 235 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
248 } 236 }
249 237
238 /* To exclusively reset vport, we need to log it out first.*/
239 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
240 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
241
250 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n", 242 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
251 vha->host_no, vha->vp_idx)); 243 vha->host_no, vha->vp_idx));
252 qla24xx_enable_vp(vha); 244 return qla24xx_enable_vp(vha);
253} 245}
254 246
255static int 247static int
256qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 248qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
257{ 249{
258 scsi_qla_host_t *ha = vha->parent; 250 struct qla_hw_data *ha = vha->hw;
251 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
259 252
260 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 253 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
261 /* VP acquired. complete port configuration */ 254 /* VP acquired. complete port configuration */
262 if (atomic_read(&ha->loop_state) == LOOP_READY) { 255 if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
263 qla24xx_configure_vp(vha); 256 qla24xx_configure_vp(vha);
264 } else { 257 } else {
265 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); 258 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
266 set_bit(VP_DPC_NEEDED, &ha->dpc_flags); 259 set_bit(VP_DPC_NEEDED, &base_vha->dpc_flags);
267 } 260 }
268 261
269 return 0; 262 return 0;
270 } 263 }
271 264
272 if (test_and_clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 265 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
273 qla2x00_vp_abort_isp(vha); 266 qla2x00_update_fcports(vha);
267 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
268 }
269
270 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
271 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
272 atomic_read(&vha->loop_state) != LOOP_DOWN) {
273
274 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
275 vha->host_no));
276 qla2x00_relogin(vha);
277
278 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
279 vha->host_no));
280 }
274 281
275 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 282 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
276 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) { 283 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
277 clear_bit(RESET_ACTIVE, &vha->dpc_flags); 284 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
278 } 285 }
279 286
280 if (atomic_read(&vha->vp_state) == VP_ACTIVE && 287 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
281 test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
282 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 288 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
283 qla2x00_loop_resync(vha); 289 qla2x00_loop_resync(vha);
284 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 290 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
@@ -289,38 +295,30 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
289} 295}
290 296
291void 297void
292qla2x00_do_dpc_all_vps(scsi_qla_host_t *ha) 298qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
293{ 299{
294 int ret; 300 int ret;
295 int i, vp_idx_matched; 301 struct qla_hw_data *ha = vha->hw;
296 scsi_qla_host_t *vha; 302 scsi_qla_host_t *vp;
297 303
298 if (ha->parent) 304 if (vha->vp_idx)
299 return; 305 return;
300 if (list_empty(&ha->vp_list)) 306 if (list_empty(&ha->vp_list))
301 return; 307 return;
302 308
303 clear_bit(VP_DPC_NEEDED, &ha->dpc_flags); 309 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
304
305 for_each_mapped_vp_idx(ha, i) {
306 vp_idx_matched = 0;
307
308 list_for_each_entry(vha, &ha->vp_list, vp_list) {
309 if (i == vha->vp_idx) {
310 vp_idx_matched = 1;
311 break;
312 }
313 }
314 310
315 if (vp_idx_matched) 311 list_for_each_entry(vp, &ha->vp_list, list) {
316 ret = qla2x00_do_dpc_vp(vha); 312 if (vp->vp_idx)
313 ret = qla2x00_do_dpc_vp(vp);
317 } 314 }
318} 315}
319 316
320int 317int
321qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) 318qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
322{ 319{
323 scsi_qla_host_t *ha = shost_priv(fc_vport->shost); 320 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
321 struct qla_hw_data *ha = base_vha->hw;
324 scsi_qla_host_t *vha; 322 scsi_qla_host_t *vha;
325 uint8_t port_name[WWN_SIZE]; 323 uint8_t port_name[WWN_SIZE];
326 324
@@ -337,7 +335,7 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
337 335
338 /* Check up unique WWPN */ 336 /* Check up unique WWPN */
339 u64_to_wwn(fc_vport->port_name, port_name); 337 u64_to_wwn(fc_vport->port_name, port_name);
340 if (!memcmp(port_name, ha->port_name, WWN_SIZE)) 338 if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
341 return VPCERR_BAD_WWN; 339 return VPCERR_BAD_WWN;
342 vha = qla24xx_find_vhost_by_name(ha, port_name); 340 vha = qla24xx_find_vhost_by_name(ha, port_name);
343 if (vha) 341 if (vha)
@@ -346,7 +344,7 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
346 /* Check up max-npiv-supports */ 344 /* Check up max-npiv-supports */
347 if (ha->num_vhosts > ha->max_npiv_vports) { 345 if (ha->num_vhosts > ha->max_npiv_vports) {
348 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than " 346 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
349 "max_npv_vports %ud.\n", ha->host_no, 347 "max_npv_vports %ud.\n", base_vha->host_no,
350 ha->num_vhosts, ha->max_npiv_vports)); 348 ha->num_vhosts, ha->max_npiv_vports));
351 return VPCERR_UNSUPPORTED; 349 return VPCERR_UNSUPPORTED;
352 } 350 }
@@ -356,59 +354,34 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
356scsi_qla_host_t * 354scsi_qla_host_t *
357qla24xx_create_vhost(struct fc_vport *fc_vport) 355qla24xx_create_vhost(struct fc_vport *fc_vport)
358{ 356{
359 scsi_qla_host_t *ha = shost_priv(fc_vport->shost); 357 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
358 struct qla_hw_data *ha = base_vha->hw;
360 scsi_qla_host_t *vha; 359 scsi_qla_host_t *vha;
360 struct scsi_host_template *sht = &qla24xx_driver_template;
361 struct Scsi_Host *host; 361 struct Scsi_Host *host;
362 362
363 host = scsi_host_alloc(&qla24xx_driver_template, 363 vha = qla2x00_create_host(sht, ha);
364 sizeof(scsi_qla_host_t)); 364 if (!vha) {
365 if (!host) { 365 DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
366 printk(KERN_WARNING
367 "qla2xxx: scsi_host_alloc() failed for vport\n");
368 return(NULL); 366 return(NULL);
369 } 367 }
370 368
371 vha = shost_priv(host); 369 host = vha->host;
372
373 /* clone the parent hba */
374 memcpy(vha, ha, sizeof (scsi_qla_host_t));
375
376 fc_vport->dd_data = vha; 370 fc_vport->dd_data = vha;
377
378 vha->node_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
379 if (!vha->node_name)
380 goto create_vhost_failed_1;
381
382 vha->port_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
383 if (!vha->port_name)
384 goto create_vhost_failed_2;
385
386 /* New host info */ 371 /* New host info */
387 u64_to_wwn(fc_vport->node_name, vha->node_name); 372 u64_to_wwn(fc_vport->node_name, vha->node_name);
388 u64_to_wwn(fc_vport->port_name, vha->port_name); 373 u64_to_wwn(fc_vport->port_name, vha->port_name);
389 374
390 vha->host = host;
391 vha->host_no = host->host_no;
392 vha->parent = ha;
393 vha->fc_vport = fc_vport; 375 vha->fc_vport = fc_vport;
394 vha->device_flags = 0; 376 vha->device_flags = 0;
395 vha->vp_idx = qla24xx_allocate_vp_id(vha); 377 vha->vp_idx = qla24xx_allocate_vp_id(vha);
396 if (vha->vp_idx > ha->max_npiv_vports) { 378 if (vha->vp_idx > ha->max_npiv_vports) {
397 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n", 379 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
398 vha->host_no)); 380 vha->host_no));
399 goto create_vhost_failed_3; 381 goto create_vhost_failed;
400 } 382 }
401 vha->mgmt_svr_loop_id = 10 + vha->vp_idx; 383 vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
402 384
403 init_completion(&vha->mbx_cmd_comp);
404 complete(&vha->mbx_cmd_comp);
405 init_completion(&vha->mbx_intr_comp);
406
407 INIT_LIST_HEAD(&vha->list);
408 INIT_LIST_HEAD(&vha->fcports);
409 INIT_LIST_HEAD(&vha->vp_fcports);
410 INIT_LIST_HEAD(&vha->work_list);
411
412 vha->dpc_flags = 0L; 385 vha->dpc_flags = 0L;
413 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 386 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
414 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 387 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
@@ -423,7 +396,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
423 396
424 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); 397 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
425 398
426 host->can_queue = vha->request_q_length + 128; 399 memset(vha->req_ques, 0, sizeof(vha->req_ques) * QLA_MAX_HOST_QUES);
400 vha->req_ques[0] = ha->req_q_map[0]->id;
401 host->can_queue = ha->req_q_map[0]->length + 128;
427 host->this_id = 255; 402 host->this_id = 255;
428 host->cmd_per_lun = 3; 403 host->cmd_per_lun = 3;
429 host->max_cmd_len = MAX_CMDSZ; 404 host->max_cmd_len = MAX_CMDSZ;
@@ -440,12 +415,341 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
440 415
441 return vha; 416 return vha;
442 417
443create_vhost_failed_3: 418create_vhost_failed:
444 kfree(vha->port_name); 419 return NULL;
420}
445 421
446create_vhost_failed_2: 422static void
447 kfree(vha->node_name); 423qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
424{
425 struct qla_hw_data *ha = vha->hw;
426 uint16_t que_id = req->id;
427
428 dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
429 sizeof(request_t), req->ring, req->dma);
430 req->ring = NULL;
431 req->dma = 0;
432 if (que_id) {
433 ha->req_q_map[que_id] = NULL;
434 mutex_lock(&ha->vport_lock);
435 clear_bit(que_id, ha->req_qid_map);
436 mutex_unlock(&ha->vport_lock);
437 }
438 kfree(req);
439 req = NULL;
440}
448 441
449create_vhost_failed_1: 442static void
450 return NULL; 443qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
444{
445 struct qla_hw_data *ha = vha->hw;
446 uint16_t que_id = rsp->id;
447
448 if (rsp->msix && rsp->msix->have_irq) {
449 free_irq(rsp->msix->vector, rsp);
450 rsp->msix->have_irq = 0;
451 rsp->msix->rsp = NULL;
452 }
453 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
454 sizeof(response_t), rsp->ring, rsp->dma);
455 rsp->ring = NULL;
456 rsp->dma = 0;
457 if (que_id) {
458 ha->rsp_q_map[que_id] = NULL;
459 mutex_lock(&ha->vport_lock);
460 clear_bit(que_id, ha->rsp_qid_map);
461 mutex_unlock(&ha->vport_lock);
462 }
463 kfree(rsp);
464 rsp = NULL;
465}
466
467int
468qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
469{
470 int ret = -1;
471
472 if (req) {
473 req->options |= BIT_0;
474 ret = qla25xx_init_req_que(vha, req, req->options);
475 }
476 if (ret == QLA_SUCCESS)
477 qla25xx_free_req_que(vha, req);
478
479 return ret;
480}
481
482int
483qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
484{
485 int ret = -1;
486
487 if (rsp) {
488 rsp->options |= BIT_0;
489 ret = qla25xx_init_rsp_que(vha, rsp, rsp->options);
490 }
491 if (ret == QLA_SUCCESS)
492 qla25xx_free_rsp_que(vha, rsp);
493
494 return ret;
495}
496
497int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
498{
499 int ret = 0;
500 struct qla_hw_data *ha = vha->hw;
501 struct req_que *req = ha->req_q_map[que];
502
503 req->options |= BIT_3;
504 req->qos = qos;
505 ret = qla25xx_init_req_que(vha, req, req->options);
506 if (ret != QLA_SUCCESS)
507 DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__));
508 /* restore options bit */
509 req->options &= ~BIT_3;
510 return ret;
511}
512
513
514/* Delete all queues for a given vhost */
515int
516qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no)
517{
518 int cnt, ret = 0;
519 struct req_que *req = NULL;
520 struct rsp_que *rsp = NULL;
521 struct qla_hw_data *ha = vha->hw;
522
523 if (que_no) {
524 /* Delete request queue */
525 req = ha->req_q_map[que_no];
526 if (req) {
527 rsp = req->rsp;
528 ret = qla25xx_delete_req_que(vha, req);
529 if (ret != QLA_SUCCESS) {
530 qla_printk(KERN_WARNING, ha,
531 "Couldn't delete req que %d\n", req->id);
532 return ret;
533 }
534 /* Delete associated response queue */
535 if (rsp) {
536 ret = qla25xx_delete_rsp_que(vha, rsp);
537 if (ret != QLA_SUCCESS) {
538 qla_printk(KERN_WARNING, ha,
539 "Couldn't delete rsp que %d\n",
540 rsp->id);
541 return ret;
542 }
543 }
544 }
545 } else { /* delete all queues of this host */
546 for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) {
547 /* Delete request queues */
548 req = ha->req_q_map[vha->req_ques[cnt]];
549 if (req && req->id) {
550 rsp = req->rsp;
551 ret = qla25xx_delete_req_que(vha, req);
552 if (ret != QLA_SUCCESS) {
553 qla_printk(KERN_WARNING, ha,
554 "Couldn't delete req que %d\n",
555 vha->req_ques[cnt]);
556 return ret;
557 }
558 vha->req_ques[cnt] = ha->req_q_map[0]->id;
559 /* Delete associated response queue */
560 if (rsp && rsp->id) {
561 ret = qla25xx_delete_rsp_que(vha, rsp);
562 if (ret != QLA_SUCCESS) {
563 qla_printk(KERN_WARNING, ha,
564 "Couldn't delete rsp que %d\n",
565 rsp->id);
566 return ret;
567 }
568 }
569 }
570 }
571 }
572 qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
573 vha->vp_idx);
574 return ret;
575}
576
577int
578qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
579 uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos)
580{
581 int ret = 0;
582 struct req_que *req = NULL;
583 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
584 uint16_t que_id = 0;
585
586 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
587 if (req == NULL) {
588 qla_printk(KERN_WARNING, ha, "could not allocate memory"
589 "for request que\n");
590 goto que_failed;
591 }
592
593 req->length = REQUEST_ENTRY_CNT_24XX;
594 req->ring = dma_alloc_coherent(&ha->pdev->dev,
595 (req->length + 1) * sizeof(request_t),
596 &req->dma, GFP_KERNEL);
597 if (req->ring == NULL) {
598 qla_printk(KERN_WARNING, ha,
599 "Memory Allocation failed - request_ring\n");
600 goto que_failed;
601 }
602
603 mutex_lock(&ha->vport_lock);
604 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
605 if (que_id >= ha->max_queues) {
606 mutex_unlock(&ha->vport_lock);
607 qla_printk(KERN_INFO, ha, "No resources to create "
608 "additional request queue\n");
609 goto que_failed;
610 }
611 set_bit(que_id, ha->req_qid_map);
612 ha->req_q_map[que_id] = req;
613 req->rid = rid;
614 req->vp_idx = vp_idx;
615 req->qos = qos;
616
617 if (ha->rsp_q_map[rsp_que])
618 req->rsp = ha->rsp_q_map[rsp_que];
619 /* Use alternate PCI bus number */
620 if (MSB(req->rid))
621 options |= BIT_4;
622 /* Use alternate PCI devfn */
623 if (LSB(req->rid))
624 options |= BIT_5;
625 req->options = options;
626 req->ring_ptr = req->ring;
627 req->ring_index = 0;
628 req->cnt = req->length;
629 req->id = que_id;
630 mutex_unlock(&ha->vport_lock);
631
632 ret = qla25xx_init_req_que(base_vha, req, options);
633 if (ret != QLA_SUCCESS) {
634 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
635 mutex_lock(&ha->vport_lock);
636 clear_bit(que_id, ha->req_qid_map);
637 mutex_unlock(&ha->vport_lock);
638 goto que_failed;
639 }
640
641 return req->id;
642
643que_failed:
644 qla25xx_free_req_que(base_vha, req);
645 return 0;
646}
647
648/* create response queue */
649int
650qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
651 uint8_t vp_idx, uint16_t rid)
652{
653 int ret = 0;
654 struct rsp_que *rsp = NULL;
655 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
656 uint16_t que_id = 0;;
657
658 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
659 if (rsp == NULL) {
660 qla_printk(KERN_WARNING, ha, "could not allocate memory for"
661 " response que\n");
662 goto que_failed;
663 }
664
665 rsp->length = RESPONSE_ENTRY_CNT_2300;
666 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
667 (rsp->length + 1) * sizeof(response_t),
668 &rsp->dma, GFP_KERNEL);
669 if (rsp->ring == NULL) {
670 qla_printk(KERN_WARNING, ha,
671 "Memory Allocation failed - response_ring\n");
672 goto que_failed;
673 }
674
675 mutex_lock(&ha->vport_lock);
676 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
677 if (que_id >= ha->max_queues) {
678 mutex_unlock(&ha->vport_lock);
679 qla_printk(KERN_INFO, ha, "No resources to create "
680 "additional response queue\n");
681 goto que_failed;
682 }
683 set_bit(que_id, ha->rsp_qid_map);
684
685 if (ha->flags.msix_enabled)
686 rsp->msix = &ha->msix_entries[que_id + 1];
687 else
688 qla_printk(KERN_WARNING, ha, "msix not enabled\n");
689
690 ha->rsp_q_map[que_id] = rsp;
691 rsp->rid = rid;
692 rsp->vp_idx = vp_idx;
693 rsp->hw = ha;
694 /* Use alternate PCI bus number */
695 if (MSB(rsp->rid))
696 options |= BIT_4;
697 /* Use alternate PCI devfn */
698 if (LSB(rsp->rid))
699 options |= BIT_5;
700 rsp->options = options;
701 rsp->ring_ptr = rsp->ring;
702 rsp->ring_index = 0;
703 rsp->id = que_id;
704 mutex_unlock(&ha->vport_lock);
705
706 ret = qla25xx_request_irq(rsp);
707 if (ret)
708 goto que_failed;
709
710 ret = qla25xx_init_rsp_que(base_vha, rsp, options);
711 if (ret != QLA_SUCCESS) {
712 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
713 mutex_lock(&ha->vport_lock);
714 clear_bit(que_id, ha->rsp_qid_map);
715 mutex_unlock(&ha->vport_lock);
716 goto que_failed;
717 }
718
719 qla2x00_init_response_q_entries(rsp);
720
721 return rsp->id;
722
723que_failed:
724 qla25xx_free_rsp_que(base_vha, rsp);
725 return 0;
726}
727
728int
729qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
730{
731 uint16_t options = 0;
732 uint8_t ret = 0;
733 struct qla_hw_data *ha = vha->hw;
734
735 options |= BIT_1;
736 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0);
737 if (!ret) {
738 qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
739 return ret;
740 } else
741 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
742
743 options = 0;
744 if (qos & BIT_7)
745 options |= BIT_8;
746 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
747 qos & ~BIT_7);
748 if (ret) {
749 vha->req_ques[0] = ret;
750 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
751 } else
752 qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
753
754 return ret;
451} 755}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 35567203ef61..8ea927788b3f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -92,7 +92,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
92 "Enables iIDMA settings " 92 "Enables iIDMA settings "
93 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 93 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
94 94
95 95int ql2xmaxqueues = 1;
96module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
97MODULE_PARM_DESC(ql2xmaxqueues,
98 "Enables MQ settings "
99 "Default is 1 for single queue. Set it to number \
100 of queues in MQ mode.");
96/* 101/*
97 * SCSI host template entry points 102 * SCSI host template entry points
98 */ 103 */
@@ -183,42 +188,108 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
183 */ 188 */
184 189
185__inline__ void 190__inline__ void
186qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval) 191qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
187{ 192{
188 init_timer(&ha->timer); 193 init_timer(&vha->timer);
189 ha->timer.expires = jiffies + interval * HZ; 194 vha->timer.expires = jiffies + interval * HZ;
190 ha->timer.data = (unsigned long)ha; 195 vha->timer.data = (unsigned long)vha;
191 ha->timer.function = (void (*)(unsigned long))func; 196 vha->timer.function = (void (*)(unsigned long))func;
192 add_timer(&ha->timer); 197 add_timer(&vha->timer);
193 ha->timer_active = 1; 198 vha->timer_active = 1;
194} 199}
195 200
196static inline void 201static inline void
197qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval) 202qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
198{ 203{
199 mod_timer(&ha->timer, jiffies + interval * HZ); 204 mod_timer(&vha->timer, jiffies + interval * HZ);
200} 205}
201 206
202static __inline__ void 207static __inline__ void
203qla2x00_stop_timer(scsi_qla_host_t *ha) 208qla2x00_stop_timer(scsi_qla_host_t *vha)
204{ 209{
205 del_timer_sync(&ha->timer); 210 del_timer_sync(&vha->timer);
206 ha->timer_active = 0; 211 vha->timer_active = 0;
207} 212}
208 213
209static int qla2x00_do_dpc(void *data); 214static int qla2x00_do_dpc(void *data);
210 215
211static void qla2x00_rst_aen(scsi_qla_host_t *); 216static void qla2x00_rst_aen(scsi_qla_host_t *);
212 217
213static int qla2x00_mem_alloc(scsi_qla_host_t *); 218static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
214static void qla2x00_mem_free(scsi_qla_host_t *ha); 219 struct req_que **, struct rsp_que **);
215static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 220static void qla2x00_mem_free(struct qla_hw_data *);
221static void qla2x00_sp_free_dma(srb_t *);
216 222
217/* -------------------------------------------------------------------------- */ 223/* -------------------------------------------------------------------------- */
224static int qla2x00_alloc_queues(struct qla_hw_data *ha)
225{
226 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues,
227 GFP_KERNEL);
228 if (!ha->req_q_map) {
229 qla_printk(KERN_WARNING, ha,
230 "Unable to allocate memory for request queue ptrs\n");
231 goto fail_req_map;
232 }
233
234 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues,
235 GFP_KERNEL);
236 if (!ha->rsp_q_map) {
237 qla_printk(KERN_WARNING, ha,
238 "Unable to allocate memory for response queue ptrs\n");
239 goto fail_rsp_map;
240 }
241 set_bit(0, ha->rsp_qid_map);
242 set_bit(0, ha->req_qid_map);
243 return 1;
244
245fail_rsp_map:
246 kfree(ha->req_q_map);
247 ha->req_q_map = NULL;
248fail_req_map:
249 return -ENOMEM;
250}
251
252static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
253 struct rsp_que *rsp)
254{
255 if (rsp && rsp->ring)
256 dma_free_coherent(&ha->pdev->dev,
257 (rsp->length + 1) * sizeof(response_t),
258 rsp->ring, rsp->dma);
259
260 kfree(rsp);
261 rsp = NULL;
262 if (req && req->ring)
263 dma_free_coherent(&ha->pdev->dev,
264 (req->length + 1) * sizeof(request_t),
265 req->ring, req->dma);
266
267 kfree(req);
268 req = NULL;
269}
270
271static void qla2x00_free_queues(struct qla_hw_data *ha)
272{
273 struct req_que *req;
274 struct rsp_que *rsp;
275 int cnt;
276
277 for (cnt = 0; cnt < ha->max_queues; cnt++) {
278 rsp = ha->rsp_q_map[cnt];
279 req = ha->req_q_map[cnt];
280 qla2x00_free_que(ha, req, rsp);
281 }
282 kfree(ha->rsp_q_map);
283 ha->rsp_q_map = NULL;
284
285 kfree(ha->req_q_map);
286 ha->req_q_map = NULL;
287}
218 288
219static char * 289static char *
220qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str) 290qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
221{ 291{
292 struct qla_hw_data *ha = vha->hw;
222 static char *pci_bus_modes[] = { 293 static char *pci_bus_modes[] = {
223 "33", "66", "100", "133", 294 "33", "66", "100", "133",
224 }; 295 };
@@ -240,9 +311,10 @@ qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str)
240} 311}
241 312
242static char * 313static char *
243qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str) 314qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
244{ 315{
245 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 316 static char *pci_bus_modes[] = { "33", "66", "100", "133", };
317 struct qla_hw_data *ha = vha->hw;
246 uint32_t pci_bus; 318 uint32_t pci_bus;
247 int pcie_reg; 319 int pcie_reg;
248 320
@@ -290,9 +362,10 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
290} 362}
291 363
292static char * 364static char *
293qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) 365qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
294{ 366{
295 char un_str[10]; 367 char un_str[10];
368 struct qla_hw_data *ha = vha->hw;
296 369
297 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 370 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
298 ha->fw_minor_version, 371 ha->fw_minor_version,
@@ -328,8 +401,9 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
328} 401}
329 402
330static char * 403static char *
331qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) 404qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
332{ 405{
406 struct qla_hw_data *ha = vha->hw;
333 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 407 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
334 ha->fw_minor_version, 408 ha->fw_minor_version,
335 ha->fw_subminor_version); 409 ha->fw_subminor_version);
@@ -354,18 +428,20 @@ qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
354} 428}
355 429
356static inline srb_t * 430static inline srb_t *
357qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport, 431qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
358 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 432 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
359{ 433{
360 srb_t *sp; 434 srb_t *sp;
435 struct qla_hw_data *ha = vha->hw;
361 436
362 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 437 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
363 if (!sp) 438 if (!sp)
364 return sp; 439 return sp;
365 440
366 sp->ha = ha; 441 sp->vha = vha;
367 sp->fcport = fcport; 442 sp->fcport = fcport;
368 sp->cmd = cmd; 443 sp->cmd = cmd;
444 sp->que = ha->req_q_map[0];
369 sp->flags = 0; 445 sp->flags = 0;
370 CMD_SP(cmd) = (void *)sp; 446 CMD_SP(cmd) = (void *)sp;
371 cmd->scsi_done = done; 447 cmd->scsi_done = done;
@@ -376,9 +452,10 @@ qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport,
376static int 452static int
377qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 453qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
378{ 454{
379 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 455 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
380 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 456 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
381 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 457 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
458 struct qla_hw_data *ha = vha->hw;
382 srb_t *sp; 459 srb_t *sp;
383 int rval; 460 int rval;
384 461
@@ -399,33 +476,33 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
399 476
400 if (atomic_read(&fcport->state) != FCS_ONLINE) { 477 if (atomic_read(&fcport->state) != FCS_ONLINE) {
401 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 478 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
402 atomic_read(&ha->loop_state) == LOOP_DEAD) { 479 atomic_read(&vha->loop_state) == LOOP_DEAD) {
403 cmd->result = DID_NO_CONNECT << 16; 480 cmd->result = DID_NO_CONNECT << 16;
404 goto qc_fail_command; 481 goto qc_fail_command;
405 } 482 }
406 goto qc_target_busy; 483 goto qc_target_busy;
407 } 484 }
408 485
409 spin_unlock_irq(ha->host->host_lock); 486 spin_unlock_irq(vha->host->host_lock);
410 487
411 sp = qla2x00_get_new_sp(ha, fcport, cmd, done); 488 sp = qla2x00_get_new_sp(vha, fcport, cmd, done);
412 if (!sp) 489 if (!sp)
413 goto qc_host_busy_lock; 490 goto qc_host_busy_lock;
414 491
415 rval = qla2x00_start_scsi(sp); 492 rval = ha->isp_ops->start_scsi(sp);
416 if (rval != QLA_SUCCESS) 493 if (rval != QLA_SUCCESS)
417 goto qc_host_busy_free_sp; 494 goto qc_host_busy_free_sp;
418 495
419 spin_lock_irq(ha->host->host_lock); 496 spin_lock_irq(vha->host->host_lock);
420 497
421 return 0; 498 return 0;
422 499
423qc_host_busy_free_sp: 500qc_host_busy_free_sp:
424 qla2x00_sp_free_dma(ha, sp); 501 qla2x00_sp_free_dma(sp);
425 mempool_free(sp, ha->srb_mempool); 502 mempool_free(sp, ha->srb_mempool);
426 503
427qc_host_busy_lock: 504qc_host_busy_lock:
428 spin_lock_irq(ha->host->host_lock); 505 spin_lock_irq(vha->host->host_lock);
429 return SCSI_MLQUEUE_HOST_BUSY; 506 return SCSI_MLQUEUE_HOST_BUSY;
430 507
431qc_target_busy: 508qc_target_busy:
@@ -441,14 +518,15 @@ qc_fail_command:
441static int 518static int
442qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 519qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
443{ 520{
444 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 521 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
445 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 522 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
446 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 523 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
524 struct qla_hw_data *ha = vha->hw;
525 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
447 srb_t *sp; 526 srb_t *sp;
448 int rval; 527 int rval;
449 scsi_qla_host_t *pha = to_qla_parent(ha);
450 528
451 if (unlikely(pci_channel_offline(pha->pdev))) { 529 if (unlikely(pci_channel_offline(ha->pdev))) {
452 cmd->result = DID_REQUEUE << 16; 530 cmd->result = DID_REQUEUE << 16;
453 goto qc24_fail_command; 531 goto qc24_fail_command;
454 } 532 }
@@ -465,33 +543,33 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
465 543
466 if (atomic_read(&fcport->state) != FCS_ONLINE) { 544 if (atomic_read(&fcport->state) != FCS_ONLINE) {
467 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 545 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
468 atomic_read(&pha->loop_state) == LOOP_DEAD) { 546 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
469 cmd->result = DID_NO_CONNECT << 16; 547 cmd->result = DID_NO_CONNECT << 16;
470 goto qc24_fail_command; 548 goto qc24_fail_command;
471 } 549 }
472 goto qc24_target_busy; 550 goto qc24_target_busy;
473 } 551 }
474 552
475 spin_unlock_irq(ha->host->host_lock); 553 spin_unlock_irq(vha->host->host_lock);
476 554
477 sp = qla2x00_get_new_sp(pha, fcport, cmd, done); 555 sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
478 if (!sp) 556 if (!sp)
479 goto qc24_host_busy_lock; 557 goto qc24_host_busy_lock;
480 558
481 rval = qla24xx_start_scsi(sp); 559 rval = ha->isp_ops->start_scsi(sp);
482 if (rval != QLA_SUCCESS) 560 if (rval != QLA_SUCCESS)
483 goto qc24_host_busy_free_sp; 561 goto qc24_host_busy_free_sp;
484 562
485 spin_lock_irq(ha->host->host_lock); 563 spin_lock_irq(vha->host->host_lock);
486 564
487 return 0; 565 return 0;
488 566
489qc24_host_busy_free_sp: 567qc24_host_busy_free_sp:
490 qla2x00_sp_free_dma(pha, sp); 568 qla2x00_sp_free_dma(sp);
491 mempool_free(sp, pha->srb_mempool); 569 mempool_free(sp, ha->srb_mempool);
492 570
493qc24_host_busy_lock: 571qc24_host_busy_lock:
494 spin_lock_irq(ha->host->host_lock); 572 spin_lock_irq(vha->host->host_lock);
495 return SCSI_MLQUEUE_HOST_BUSY; 573 return SCSI_MLQUEUE_HOST_BUSY;
496 574
497qc24_target_busy: 575qc24_target_busy:
@@ -510,17 +588,14 @@ qc24_fail_command:
510 * max time. 588 * max time.
511 * 589 *
512 * Input: 590 * Input:
513 * ha = actual ha whose done queue will contain the command
514 * returned by firmware.
515 * cmd = Scsi Command to wait on. 591 * cmd = Scsi Command to wait on.
516 * flag = Abort/Reset(Bus or Device Reset)
517 * 592 *
518 * Return: 593 * Return:
519 * Not Found : 0 594 * Not Found : 0
520 * Found : 1 595 * Found : 1
521 */ 596 */
522static int 597static int
523qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) 598qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
524{ 599{
525#define ABORT_POLLING_PERIOD 1000 600#define ABORT_POLLING_PERIOD 1000
526#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 601#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
@@ -557,21 +632,22 @@ qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
557 * Failed (Adapter is offline/disabled) : 1 632 * Failed (Adapter is offline/disabled) : 1
558 */ 633 */
559int 634int
560qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 635qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
561{ 636{
562 int return_status; 637 int return_status;
563 unsigned long wait_online; 638 unsigned long wait_online;
564 scsi_qla_host_t *pha = to_qla_parent(ha); 639 struct qla_hw_data *ha = vha->hw;
640 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
565 641
566 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 642 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
567 while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) || 643 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
568 test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) || 644 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
569 test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) || 645 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
570 pha->dpc_active) && time_before(jiffies, wait_online)) { 646 ha->dpc_active) && time_before(jiffies, wait_online)) {
571 647
572 msleep(1000); 648 msleep(1000);
573 } 649 }
574 if (pha->flags.online) 650 if (base_vha->flags.online)
575 return_status = QLA_SUCCESS; 651 return_status = QLA_SUCCESS;
576 else 652 else
577 return_status = QLA_FUNCTION_FAILED; 653 return_status = QLA_FUNCTION_FAILED;
@@ -596,19 +672,20 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
596 * Failed (LOOP_NOT_READY) : 1 672 * Failed (LOOP_NOT_READY) : 1
597 */ 673 */
598static inline int 674static inline int
599qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha) 675qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
600{ 676{
601 int return_status = QLA_SUCCESS; 677 int return_status = QLA_SUCCESS;
602 unsigned long loop_timeout ; 678 unsigned long loop_timeout ;
603 scsi_qla_host_t *pha = to_qla_parent(ha); 679 struct qla_hw_data *ha = vha->hw;
680 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
604 681
605 /* wait for 5 min at the max for loop to be ready */ 682 /* wait for 5 min at the max for loop to be ready */
606 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ); 683 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
607 684
608 while ((!atomic_read(&pha->loop_down_timer) && 685 while ((!atomic_read(&base_vha->loop_down_timer) &&
609 atomic_read(&pha->loop_state) == LOOP_DOWN) || 686 atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
610 atomic_read(&pha->loop_state) != LOOP_READY) { 687 atomic_read(&base_vha->loop_state) != LOOP_READY) {
611 if (atomic_read(&pha->loop_state) == LOOP_DEAD) { 688 if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
612 return_status = QLA_FUNCTION_FAILED; 689 return_status = QLA_FUNCTION_FAILED;
613 break; 690 break;
614 } 691 }
@@ -624,35 +701,42 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
624void 701void
625qla2x00_abort_fcport_cmds(fc_port_t *fcport) 702qla2x00_abort_fcport_cmds(fc_port_t *fcport)
626{ 703{
627 int cnt; 704 int cnt, que, id;
628 unsigned long flags; 705 unsigned long flags;
629 srb_t *sp; 706 srb_t *sp;
630 scsi_qla_host_t *ha = fcport->ha; 707 scsi_qla_host_t *vha = fcport->vha;
631 scsi_qla_host_t *pha = to_qla_parent(ha); 708 struct qla_hw_data *ha = vha->hw;
709 struct req_que *req;
632 710
633 spin_lock_irqsave(&pha->hardware_lock, flags); 711 spin_lock_irqsave(&ha->hardware_lock, flags);
634 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 712 for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
635 sp = pha->outstanding_cmds[cnt]; 713 id = vha->req_ques[que];
636 if (!sp) 714 req = ha->req_q_map[id];
637 continue; 715 if (!req)
638 if (sp->fcport != fcport)
639 continue; 716 continue;
717 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
718 sp = req->outstanding_cmds[cnt];
719 if (!sp)
720 continue;
721 if (sp->fcport != fcport)
722 continue;
640 723
641 spin_unlock_irqrestore(&pha->hardware_lock, flags); 724 spin_unlock_irqrestore(&ha->hardware_lock, flags);
642 if (ha->isp_ops->abort_command(ha, sp)) { 725 if (ha->isp_ops->abort_command(vha, sp, req)) {
643 DEBUG2(qla_printk(KERN_WARNING, ha,
644 "Abort failed -- %lx\n", sp->cmd->serial_number));
645 } else {
646 if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
647 QLA_SUCCESS)
648 DEBUG2(qla_printk(KERN_WARNING, ha, 726 DEBUG2(qla_printk(KERN_WARNING, ha,
649 "Abort failed while waiting -- %lx\n", 727 "Abort failed -- %lx\n",
650 sp->cmd->serial_number)); 728 sp->cmd->serial_number));
651 729 } else {
730 if (qla2x00_eh_wait_on_command(sp->cmd) !=
731 QLA_SUCCESS)
732 DEBUG2(qla_printk(KERN_WARNING, ha,
733 "Abort failed while waiting -- %lx\n",
734 sp->cmd->serial_number));
735 }
736 spin_lock_irqsave(&ha->hardware_lock, flags);
652 } 737 }
653 spin_lock_irqsave(&pha->hardware_lock, flags);
654 } 738 }
655 spin_unlock_irqrestore(&pha->hardware_lock, flags); 739 spin_unlock_irqrestore(&ha->hardware_lock, flags);
656} 740}
657 741
658static void 742static void
@@ -690,14 +774,16 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
690static int 774static int
691qla2xxx_eh_abort(struct scsi_cmnd *cmd) 775qla2xxx_eh_abort(struct scsi_cmnd *cmd)
692{ 776{
693 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 777 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
694 srb_t *sp; 778 srb_t *sp;
695 int ret, i; 779 int ret, i;
696 unsigned int id, lun; 780 unsigned int id, lun;
697 unsigned long serial; 781 unsigned long serial;
698 unsigned long flags; 782 unsigned long flags;
699 int wait = 0; 783 int wait = 0;
700 scsi_qla_host_t *pha = to_qla_parent(ha); 784 struct qla_hw_data *ha = vha->hw;
785 struct req_que *req;
786 srb_t *spt;
701 787
702 qla2x00_block_error_handler(cmd); 788 qla2x00_block_error_handler(cmd);
703 789
@@ -709,11 +795,15 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
709 id = cmd->device->id; 795 id = cmd->device->id;
710 lun = cmd->device->lun; 796 lun = cmd->device->lun;
711 serial = cmd->serial_number; 797 serial = cmd->serial_number;
798 spt = (srb_t *) CMD_SP(cmd);
799 if (!spt)
800 return SUCCESS;
801 req = spt->que;
712 802
713 /* Check active list for command command. */ 803 /* Check active list for command command. */
714 spin_lock_irqsave(&pha->hardware_lock, flags); 804 spin_lock_irqsave(&ha->hardware_lock, flags);
715 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 805 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
716 sp = pha->outstanding_cmds[i]; 806 sp = req->outstanding_cmds[i];
717 807
718 if (sp == NULL) 808 if (sp == NULL)
719 continue; 809 continue;
@@ -721,38 +811,36 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
721 if (sp->cmd != cmd) 811 if (sp->cmd != cmd)
722 continue; 812 continue;
723 813
724 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", 814 DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
725 __func__, ha->host_no, sp, serial)); 815 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
726 816
727 spin_unlock_irqrestore(&pha->hardware_lock, flags); 817 spin_unlock_irqrestore(&ha->hardware_lock, flags);
728 if (ha->isp_ops->abort_command(ha, sp)) { 818 if (ha->isp_ops->abort_command(vha, sp, req)) {
729 DEBUG2(printk("%s(%ld): abort_command " 819 DEBUG2(printk("%s(%ld): abort_command "
730 "mbx failed.\n", __func__, ha->host_no)); 820 "mbx failed.\n", __func__, vha->host_no));
731 ret = FAILED;
732 } else { 821 } else {
733 DEBUG3(printk("%s(%ld): abort_command " 822 DEBUG3(printk("%s(%ld): abort_command "
734 "mbx success.\n", __func__, ha->host_no)); 823 "mbx success.\n", __func__, vha->host_no));
735 wait = 1; 824 wait = 1;
736 } 825 }
737 spin_lock_irqsave(&pha->hardware_lock, flags); 826 spin_lock_irqsave(&ha->hardware_lock, flags);
738
739 break; 827 break;
740 } 828 }
741 spin_unlock_irqrestore(&pha->hardware_lock, flags); 829 spin_unlock_irqrestore(&ha->hardware_lock, flags);
742 830
743 /* Wait for the command to be returned. */ 831 /* Wait for the command to be returned. */
744 if (wait) { 832 if (wait) {
745 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { 833 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
746 qla_printk(KERN_ERR, ha, 834 qla_printk(KERN_ERR, ha,
747 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 835 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
748 "%x.\n", ha->host_no, id, lun, serial, ret); 836 "%x.\n", vha->host_no, id, lun, serial, ret);
749 ret = FAILED; 837 ret = FAILED;
750 } 838 }
751 } 839 }
752 840
753 qla_printk(KERN_INFO, ha, 841 qla_printk(KERN_INFO, ha,
754 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", 842 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
755 ha->host_no, id, lun, wait, serial, ret); 843 vha->host_no, id, lun, wait, serial, ret);
756 844
757 return ret; 845 return ret;
758} 846}
@@ -764,23 +852,27 @@ enum nexus_wait_type {
764}; 852};
765 853
766static int 854static int
767qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t, 855qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
768 unsigned int l, enum nexus_wait_type type) 856 unsigned int l, srb_t *sp, enum nexus_wait_type type)
769{ 857{
770 int cnt, match, status; 858 int cnt, match, status;
771 srb_t *sp;
772 unsigned long flags; 859 unsigned long flags;
773 scsi_qla_host_t *pha = to_qla_parent(ha); 860 struct qla_hw_data *ha = vha->hw;
861 struct req_que *req;
774 862
775 status = QLA_SUCCESS; 863 status = QLA_SUCCESS;
776 spin_lock_irqsave(&pha->hardware_lock, flags); 864 if (!sp)
777 for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; 865 return status;
778 cnt++) { 866
779 sp = pha->outstanding_cmds[cnt]; 867 spin_lock_irqsave(&ha->hardware_lock, flags);
868 req = sp->que;
869 for (cnt = 1; status == QLA_SUCCESS &&
870 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
871 sp = req->outstanding_cmds[cnt];
780 if (!sp) 872 if (!sp)
781 continue; 873 continue;
782 874
783 if (ha->vp_idx != sp->fcport->ha->vp_idx) 875 if (vha->vp_idx != sp->fcport->vha->vp_idx)
784 continue; 876 continue;
785 match = 0; 877 match = 0;
786 switch (type) { 878 switch (type) {
@@ -792,17 +884,17 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t,
792 break; 884 break;
793 case WAIT_LUN: 885 case WAIT_LUN:
794 match = (sp->cmd->device->id == t && 886 match = (sp->cmd->device->id == t &&
795 sp->cmd->device->lun == l); 887 sp->cmd->device->lun == l);
796 break; 888 break;
797 } 889 }
798 if (!match) 890 if (!match)
799 continue; 891 continue;
800 892
801 spin_unlock_irqrestore(&pha->hardware_lock, flags); 893 spin_unlock_irqrestore(&ha->hardware_lock, flags);
802 status = qla2x00_eh_wait_on_command(ha, sp->cmd); 894 status = qla2x00_eh_wait_on_command(sp->cmd);
803 spin_lock_irqsave(&pha->hardware_lock, flags); 895 spin_lock_irqsave(&ha->hardware_lock, flags);
804 } 896 }
805 spin_unlock_irqrestore(&pha->hardware_lock, flags); 897 spin_unlock_irqrestore(&ha->hardware_lock, flags);
806 898
807 return status; 899 return status;
808} 900}
@@ -818,7 +910,7 @@ static int
818__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 910__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
819 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 911 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int))
820{ 912{
821 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 913 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
822 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 914 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
823 int err; 915 int err;
824 916
@@ -827,31 +919,31 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
827 if (!fcport) 919 if (!fcport)
828 return FAILED; 920 return FAILED;
829 921
830 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 922 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
831 ha->host_no, cmd->device->id, cmd->device->lun, name); 923 vha->host_no, cmd->device->id, cmd->device->lun, name);
832 924
833 err = 0; 925 err = 0;
834 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 926 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
835 goto eh_reset_failed; 927 goto eh_reset_failed;
836 err = 1; 928 err = 1;
837 if (qla2x00_wait_for_loop_ready(ha) != QLA_SUCCESS) 929 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
838 goto eh_reset_failed; 930 goto eh_reset_failed;
839 err = 2; 931 err = 2;
840 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 932 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS)
841 goto eh_reset_failed; 933 goto eh_reset_failed;
842 err = 3; 934 err = 3;
843 if (qla2x00_eh_wait_for_pending_commands(ha, cmd->device->id, 935 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
844 cmd->device->lun, type) != QLA_SUCCESS) 936 cmd->device->lun, (srb_t *) CMD_SP(cmd), type) != QLA_SUCCESS)
845 goto eh_reset_failed; 937 goto eh_reset_failed;
846 938
847 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 939 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
848 ha->host_no, cmd->device->id, cmd->device->lun, name); 940 vha->host_no, cmd->device->id, cmd->device->lun, name);
849 941
850 return SUCCESS; 942 return SUCCESS;
851 943
852 eh_reset_failed: 944 eh_reset_failed:
853 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n", 945 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
854 ha->host_no, cmd->device->id, cmd->device->lun, name, 946 , vha->host_no, cmd->device->id, cmd->device->lun, name,
855 reset_errors[err]); 947 reset_errors[err]);
856 return FAILED; 948 return FAILED;
857} 949}
@@ -859,7 +951,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
859static int 951static int
860qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 952qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
861{ 953{
862 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 954 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
955 struct qla_hw_data *ha = vha->hw;
863 956
864 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 957 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
865 ha->isp_ops->lun_reset); 958 ha->isp_ops->lun_reset);
@@ -868,7 +961,8 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
868static int 961static int
869qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 962qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
870{ 963{
871 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 964 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
965 struct qla_hw_data *ha = vha->hw;
872 966
873 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 967 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
874 ha->isp_ops->target_reset); 968 ha->isp_ops->target_reset);
@@ -892,12 +986,12 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
892static int 986static int
893qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 987qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
894{ 988{
895 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 989 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
896 scsi_qla_host_t *pha = to_qla_parent(ha);
897 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 990 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
898 int ret = FAILED; 991 int ret = FAILED;
899 unsigned int id, lun; 992 unsigned int id, lun;
900 unsigned long serial; 993 unsigned long serial;
994 srb_t *sp = (srb_t *) CMD_SP(cmd);
901 995
902 qla2x00_block_error_handler(cmd); 996 qla2x00_block_error_handler(cmd);
903 997
@@ -908,28 +1002,28 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
908 if (!fcport) 1002 if (!fcport)
909 return ret; 1003 return ret;
910 1004
911 qla_printk(KERN_INFO, ha, 1005 qla_printk(KERN_INFO, vha->hw,
912 "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun); 1006 "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
913 1007
914 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) { 1008 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
915 DEBUG2(printk("%s failed:board disabled\n",__func__)); 1009 DEBUG2(printk("%s failed:board disabled\n",__func__));
916 goto eh_bus_reset_done; 1010 goto eh_bus_reset_done;
917 } 1011 }
918 1012
919 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { 1013 if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
920 if (qla2x00_loop_reset(ha) == QLA_SUCCESS) 1014 if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
921 ret = SUCCESS; 1015 ret = SUCCESS;
922 } 1016 }
923 if (ret == FAILED) 1017 if (ret == FAILED)
924 goto eh_bus_reset_done; 1018 goto eh_bus_reset_done;
925 1019
926 /* Flush outstanding commands. */ 1020 /* Flush outstanding commands. */
927 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) != 1021 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) !=
928 QLA_SUCCESS) 1022 QLA_SUCCESS)
929 ret = FAILED; 1023 ret = FAILED;
930 1024
931eh_bus_reset_done: 1025eh_bus_reset_done:
932 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1026 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
933 (ret == FAILED) ? "failed" : "succeded"); 1027 (ret == FAILED) ? "failed" : "succeded");
934 1028
935 return ret; 1029 return ret;
@@ -953,12 +1047,14 @@ eh_bus_reset_done:
953static int 1047static int
954qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1048qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
955{ 1049{
956 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 1050 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
957 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1051 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1052 struct qla_hw_data *ha = vha->hw;
958 int ret = FAILED; 1053 int ret = FAILED;
959 unsigned int id, lun; 1054 unsigned int id, lun;
960 unsigned long serial; 1055 unsigned long serial;
961 scsi_qla_host_t *pha = to_qla_parent(ha); 1056 srb_t *sp = (srb_t *) CMD_SP(cmd);
1057 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
962 1058
963 qla2x00_block_error_handler(cmd); 1059 qla2x00_block_error_handler(cmd);
964 1060
@@ -970,9 +1066,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
970 return ret; 1066 return ret;
971 1067
972 qla_printk(KERN_INFO, ha, 1068 qla_printk(KERN_INFO, ha,
973 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun); 1069 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
974 1070
975 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 1071 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
976 goto eh_host_reset_lock; 1072 goto eh_host_reset_lock;
977 1073
978 /* 1074 /*
@@ -983,26 +1079,28 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
983 * devices as lost kicking of the port_down_timer 1079 * devices as lost kicking of the port_down_timer
984 * while dpc is stuck for the mailbox to complete. 1080 * while dpc is stuck for the mailbox to complete.
985 */ 1081 */
986 qla2x00_wait_for_loop_ready(ha); 1082 qla2x00_wait_for_loop_ready(vha);
987 set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 1083 if (vha != base_vha) {
988 if (qla2x00_abort_isp(pha)) { 1084 if (qla2x00_vp_abort_isp(vha))
989 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
990 /* failed. schedule dpc to try */
991 set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags);
992
993 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
994 goto eh_host_reset_lock; 1085 goto eh_host_reset_lock;
1086 } else {
1087 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1088 if (qla2x00_abort_isp(base_vha)) {
1089 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1090 /* failed. schedule dpc to try */
1091 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1092
1093 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1094 goto eh_host_reset_lock;
1095 }
1096 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
995 } 1097 }
996 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
997 1098
998 /* Waiting for our command in done_queue to be returned to OS.*/ 1099 /* Waiting for command to be returned to OS.*/
999 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) == 1100 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) ==
1000 QLA_SUCCESS) 1101 QLA_SUCCESS)
1001 ret = SUCCESS; 1102 ret = SUCCESS;
1002 1103
1003 if (ha->parent)
1004 qla2x00_vp_abort_isp(ha);
1005
1006eh_host_reset_lock: 1104eh_host_reset_lock:
1007 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1105 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1008 (ret == FAILED) ? "failed" : "succeded"); 1106 (ret == FAILED) ? "failed" : "succeded");
@@ -1021,35 +1119,36 @@ eh_host_reset_lock:
1021* 0 = success 1119* 0 = success
1022*/ 1120*/
1023int 1121int
1024qla2x00_loop_reset(scsi_qla_host_t *ha) 1122qla2x00_loop_reset(scsi_qla_host_t *vha)
1025{ 1123{
1026 int ret; 1124 int ret;
1027 struct fc_port *fcport; 1125 struct fc_port *fcport;
1126 struct qla_hw_data *ha = vha->hw;
1028 1127
1029 if (ha->flags.enable_lip_full_login) { 1128 if (ha->flags.enable_lip_full_login && !vha->vp_idx) {
1030 ret = qla2x00_full_login_lip(ha); 1129 ret = qla2x00_full_login_lip(vha);
1031 if (ret != QLA_SUCCESS) { 1130 if (ret != QLA_SUCCESS) {
1032 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1131 DEBUG2_3(printk("%s(%ld): failed: "
1033 "full_login_lip=%d.\n", __func__, ha->host_no, 1132 "full_login_lip=%d.\n", __func__, vha->host_no,
1034 ret)); 1133 ret));
1035 } 1134 }
1036 atomic_set(&ha->loop_state, LOOP_DOWN); 1135 atomic_set(&vha->loop_state, LOOP_DOWN);
1037 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 1136 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1038 qla2x00_mark_all_devices_lost(ha, 0); 1137 qla2x00_mark_all_devices_lost(vha, 0);
1039 qla2x00_wait_for_loop_ready(ha); 1138 qla2x00_wait_for_loop_ready(vha);
1040 } 1139 }
1041 1140
1042 if (ha->flags.enable_lip_reset) { 1141 if (ha->flags.enable_lip_reset && !vha->vp_idx) {
1043 ret = qla2x00_lip_reset(ha); 1142 ret = qla2x00_lip_reset(vha);
1044 if (ret != QLA_SUCCESS) { 1143 if (ret != QLA_SUCCESS) {
1045 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1144 DEBUG2_3(printk("%s(%ld): failed: "
1046 "lip_reset=%d.\n", __func__, ha->host_no, ret)); 1145 "lip_reset=%d.\n", __func__, vha->host_no, ret));
1047 } 1146 } else
1048 qla2x00_wait_for_loop_ready(ha); 1147 qla2x00_wait_for_loop_ready(vha);
1049 } 1148 }
1050 1149
1051 if (ha->flags.enable_target_reset) { 1150 if (ha->flags.enable_target_reset) {
1052 list_for_each_entry(fcport, &ha->fcports, list) { 1151 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1053 if (fcport->port_type != FCT_TARGET) 1152 if (fcport->port_type != FCT_TARGET)
1054 continue; 1153 continue;
1055 1154
@@ -1057,31 +1156,37 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
1057 if (ret != QLA_SUCCESS) { 1156 if (ret != QLA_SUCCESS) {
1058 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1157 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1059 "target_reset=%d d_id=%x.\n", __func__, 1158 "target_reset=%d d_id=%x.\n", __func__,
1060 ha->host_no, ret, fcport->d_id.b24)); 1159 vha->host_no, ret, fcport->d_id.b24));
1061 } 1160 }
1062 } 1161 }
1063 } 1162 }
1064
1065 /* Issue marker command only when we are going to start the I/O */ 1163 /* Issue marker command only when we are going to start the I/O */
1066 ha->marker_needed = 1; 1164 vha->marker_needed = 1;
1067 1165
1068 return QLA_SUCCESS; 1166 return QLA_SUCCESS;
1069} 1167}
1070 1168
1071void 1169void
1072qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res) 1170qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1073{ 1171{
1074 int cnt; 1172 int que, cnt;
1075 unsigned long flags; 1173 unsigned long flags;
1076 srb_t *sp; 1174 srb_t *sp;
1175 struct qla_hw_data *ha = vha->hw;
1176 struct req_que *req;
1077 1177
1078 spin_lock_irqsave(&ha->hardware_lock, flags); 1178 spin_lock_irqsave(&ha->hardware_lock, flags);
1079 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1179 for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
1080 sp = ha->outstanding_cmds[cnt]; 1180 req = ha->req_q_map[vha->req_ques[que]];
1081 if (sp) { 1181 if (!req)
1082 ha->outstanding_cmds[cnt] = NULL; 1182 continue;
1083 sp->cmd->result = res; 1183 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1084 qla2x00_sp_compl(ha, sp); 1184 sp = req->outstanding_cmds[cnt];
1185 if (sp && sp->vha == vha) {
1186 req->outstanding_cmds[cnt] = NULL;
1187 sp->cmd->result = res;
1188 qla2x00_sp_compl(ha, sp);
1189 }
1085 } 1190 }
1086 } 1191 }
1087 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1192 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1103,13 +1208,15 @@ qla2xxx_slave_alloc(struct scsi_device *sdev)
1103static int 1208static int
1104qla2xxx_slave_configure(struct scsi_device *sdev) 1209qla2xxx_slave_configure(struct scsi_device *sdev)
1105{ 1210{
1106 scsi_qla_host_t *ha = shost_priv(sdev->host); 1211 scsi_qla_host_t *vha = shost_priv(sdev->host);
1212 struct qla_hw_data *ha = vha->hw;
1107 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1213 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1214 struct req_que *req = ha->req_q_map[0];
1108 1215
1109 if (sdev->tagged_supported) 1216 if (sdev->tagged_supported)
1110 scsi_activate_tcq(sdev, ha->max_q_depth); 1217 scsi_activate_tcq(sdev, req->max_q_depth);
1111 else 1218 else
1112 scsi_deactivate_tcq(sdev, ha->max_q_depth); 1219 scsi_deactivate_tcq(sdev, req->max_q_depth);
1113 1220
1114 rport->dev_loss_tmo = ha->port_down_retry_count; 1221 rport->dev_loss_tmo = ha->port_down_retry_count;
1115 1222
@@ -1152,8 +1259,9 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1152 * supported addressing method. 1259 * supported addressing method.
1153 */ 1260 */
1154static void 1261static void
1155qla2x00_config_dma_addressing(scsi_qla_host_t *ha) 1262qla2x00_config_dma_addressing(scsi_qla_host_t *vha)
1156{ 1263{
1264 struct qla_hw_data *ha = vha->hw;
1157 /* Assume a 32bit DMA mask. */ 1265 /* Assume a 32bit DMA mask. */
1158 ha->flags.enable_64bit_addressing = 0; 1266 ha->flags.enable_64bit_addressing = 0;
1159 1267
@@ -1174,7 +1282,7 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
1174} 1282}
1175 1283
1176static void 1284static void
1177qla2x00_enable_intrs(scsi_qla_host_t *ha) 1285qla2x00_enable_intrs(struct qla_hw_data *ha)
1178{ 1286{
1179 unsigned long flags = 0; 1287 unsigned long flags = 0;
1180 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1288 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1189,7 +1297,7 @@ qla2x00_enable_intrs(scsi_qla_host_t *ha)
1189} 1297}
1190 1298
1191static void 1299static void
1192qla2x00_disable_intrs(scsi_qla_host_t *ha) 1300qla2x00_disable_intrs(struct qla_hw_data *ha)
1193{ 1301{
1194 unsigned long flags = 0; 1302 unsigned long flags = 0;
1195 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1303 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1203,7 +1311,7 @@ qla2x00_disable_intrs(scsi_qla_host_t *ha)
1203} 1311}
1204 1312
1205static void 1313static void
1206qla24xx_enable_intrs(scsi_qla_host_t *ha) 1314qla24xx_enable_intrs(struct qla_hw_data *ha)
1207{ 1315{
1208 unsigned long flags = 0; 1316 unsigned long flags = 0;
1209 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1317 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1216,7 +1324,7 @@ qla24xx_enable_intrs(scsi_qla_host_t *ha)
1216} 1324}
1217 1325
1218static void 1326static void
1219qla24xx_disable_intrs(scsi_qla_host_t *ha) 1327qla24xx_disable_intrs(struct qla_hw_data *ha)
1220{ 1328{
1221 unsigned long flags = 0; 1329 unsigned long flags = 0;
1222 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1330 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1260,6 +1368,10 @@ static struct isp_operations qla2100_isp_ops = {
1260 .read_optrom = qla2x00_read_optrom_data, 1368 .read_optrom = qla2x00_read_optrom_data,
1261 .write_optrom = qla2x00_write_optrom_data, 1369 .write_optrom = qla2x00_write_optrom_data,
1262 .get_flash_version = qla2x00_get_flash_version, 1370 .get_flash_version = qla2x00_get_flash_version,
1371 .start_scsi = qla2x00_start_scsi,
1372 .wrt_req_reg = NULL,
1373 .wrt_rsp_reg = NULL,
1374 .rd_req_reg = NULL,
1263}; 1375};
1264 1376
1265static struct isp_operations qla2300_isp_ops = { 1377static struct isp_operations qla2300_isp_ops = {
@@ -1294,6 +1406,10 @@ static struct isp_operations qla2300_isp_ops = {
1294 .read_optrom = qla2x00_read_optrom_data, 1406 .read_optrom = qla2x00_read_optrom_data,
1295 .write_optrom = qla2x00_write_optrom_data, 1407 .write_optrom = qla2x00_write_optrom_data,
1296 .get_flash_version = qla2x00_get_flash_version, 1408 .get_flash_version = qla2x00_get_flash_version,
1409 .start_scsi = qla2x00_start_scsi,
1410 .wrt_req_reg = NULL,
1411 .wrt_rsp_reg = NULL,
1412 .rd_req_reg = NULL,
1297}; 1413};
1298 1414
1299static struct isp_operations qla24xx_isp_ops = { 1415static struct isp_operations qla24xx_isp_ops = {
@@ -1328,6 +1444,10 @@ static struct isp_operations qla24xx_isp_ops = {
1328 .read_optrom = qla24xx_read_optrom_data, 1444 .read_optrom = qla24xx_read_optrom_data,
1329 .write_optrom = qla24xx_write_optrom_data, 1445 .write_optrom = qla24xx_write_optrom_data,
1330 .get_flash_version = qla24xx_get_flash_version, 1446 .get_flash_version = qla24xx_get_flash_version,
1447 .start_scsi = qla24xx_start_scsi,
1448 .wrt_req_reg = qla24xx_wrt_req_reg,
1449 .wrt_rsp_reg = qla24xx_wrt_rsp_reg,
1450 .rd_req_reg = qla24xx_rd_req_reg,
1331}; 1451};
1332 1452
1333static struct isp_operations qla25xx_isp_ops = { 1453static struct isp_operations qla25xx_isp_ops = {
@@ -1362,10 +1482,14 @@ static struct isp_operations qla25xx_isp_ops = {
1362 .read_optrom = qla25xx_read_optrom_data, 1482 .read_optrom = qla25xx_read_optrom_data,
1363 .write_optrom = qla24xx_write_optrom_data, 1483 .write_optrom = qla24xx_write_optrom_data,
1364 .get_flash_version = qla24xx_get_flash_version, 1484 .get_flash_version = qla24xx_get_flash_version,
1485 .start_scsi = qla24xx_start_scsi,
1486 .wrt_req_reg = qla24xx_wrt_req_reg,
1487 .wrt_rsp_reg = qla24xx_wrt_rsp_reg,
1488 .rd_req_reg = qla24xx_rd_req_reg,
1365}; 1489};
1366 1490
1367static inline void 1491static inline void
1368qla2x00_set_isp_flags(scsi_qla_host_t *ha) 1492qla2x00_set_isp_flags(struct qla_hw_data *ha)
1369{ 1493{
1370 ha->device_type = DT_EXTENDED_IDS; 1494 ha->device_type = DT_EXTENDED_IDS;
1371 switch (ha->pdev->device) { 1495 switch (ha->pdev->device) {
@@ -1447,9 +1571,10 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
1447} 1571}
1448 1572
1449static int 1573static int
1450qla2x00_iospace_config(scsi_qla_host_t *ha) 1574qla2x00_iospace_config(struct qla_hw_data *ha)
1451{ 1575{
1452 resource_size_t pio; 1576 resource_size_t pio;
1577 uint16_t msix;
1453 1578
1454 if (pci_request_selected_regions(ha->pdev, ha->bars, 1579 if (pci_request_selected_regions(ha->pdev, ha->bars,
1455 QLA2XXX_DRIVER_NAME)) { 1580 QLA2XXX_DRIVER_NAME)) {
@@ -1502,6 +1627,30 @@ skip_pio:
1502 goto iospace_error_exit; 1627 goto iospace_error_exit;
1503 } 1628 }
1504 1629
1630 /* Determine queue resources */
1631 ha->max_queues = 1;
1632 if (ql2xmaxqueues <= 1 || !IS_QLA25XX(ha))
1633 goto mqiobase_exit;
1634 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1635 pci_resource_len(ha->pdev, 3));
1636 if (ha->mqiobase) {
1637 /* Read MSIX vector size of the board */
1638 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1639 ha->msix_count = msix;
1640 /* Max queues are bounded by available msix vectors */
1641 /* queue 0 uses two msix vectors */
1642 if (ha->msix_count - 1 < ql2xmaxqueues)
1643 ha->max_queues = ha->msix_count - 1;
1644 else if (ql2xmaxqueues > QLA_MQ_SIZE)
1645 ha->max_queues = QLA_MQ_SIZE;
1646 else
1647 ha->max_queues = ql2xmaxqueues;
1648 qla_printk(KERN_INFO, ha,
1649 "MSI-X vector count: %d\n", msix);
1650 }
1651
1652mqiobase_exit:
1653 ha->msix_count = ha->max_queues + 1;
1505 return (0); 1654 return (0);
1506 1655
1507iospace_error_exit: 1656iospace_error_exit:
@@ -1511,25 +1660,25 @@ iospace_error_exit:
1511static void 1660static void
1512qla2xxx_scan_start(struct Scsi_Host *shost) 1661qla2xxx_scan_start(struct Scsi_Host *shost)
1513{ 1662{
1514 scsi_qla_host_t *ha = shost_priv(shost); 1663 scsi_qla_host_t *vha = shost_priv(shost);
1515 1664
1516 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 1665 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1517 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1666 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1518 set_bit(RSCN_UPDATE, &ha->dpc_flags); 1667 set_bit(RSCN_UPDATE, &vha->dpc_flags);
1519 set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 1668 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1520} 1669}
1521 1670
1522static int 1671static int
1523qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 1672qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1524{ 1673{
1525 scsi_qla_host_t *ha = shost_priv(shost); 1674 scsi_qla_host_t *vha = shost_priv(shost);
1526 1675
1527 if (!ha->host) 1676 if (!vha->host)
1528 return 1; 1677 return 1;
1529 if (time > ha->loop_reset_delay * HZ) 1678 if (time > vha->hw->loop_reset_delay * HZ)
1530 return 1; 1679 return 1;
1531 1680
1532 return atomic_read(&ha->loop_state) == LOOP_READY; 1681 return atomic_read(&vha->loop_state) == LOOP_READY;
1533} 1682}
1534 1683
1535/* 1684/*
@@ -1540,11 +1689,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1540{ 1689{
1541 int ret = -ENODEV; 1690 int ret = -ENODEV;
1542 struct Scsi_Host *host; 1691 struct Scsi_Host *host;
1543 scsi_qla_host_t *ha; 1692 scsi_qla_host_t *base_vha = NULL;
1693 struct qla_hw_data *ha;
1544 char pci_info[30]; 1694 char pci_info[30];
1545 char fw_str[30]; 1695 char fw_str[30];
1546 struct scsi_host_template *sht; 1696 struct scsi_host_template *sht;
1547 int bars, mem_only = 0; 1697 int bars, max_id, mem_only = 0;
1698 uint16_t req_length = 0, rsp_length = 0;
1699 struct req_que *req = NULL;
1700 struct rsp_que *rsp = NULL;
1548 1701
1549 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 1702 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1550 sht = &qla2x00_driver_template; 1703 sht = &qla2x00_driver_template;
@@ -1570,33 +1723,24 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1570 /* This may fail but that's ok */ 1723 /* This may fail but that's ok */
1571 pci_enable_pcie_error_reporting(pdev); 1724 pci_enable_pcie_error_reporting(pdev);
1572 1725
1573 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 1726 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1574 if (host == NULL) { 1727 if (!ha) {
1575 printk(KERN_WARNING 1728 DEBUG(printk("Unable to allocate memory for ha\n"));
1576 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 1729 goto probe_out;
1577 goto probe_disable_device;
1578 } 1730 }
1731 ha->pdev = pdev;
1579 1732
1580 /* Clear our data area */ 1733 /* Clear our data area */
1581 ha = shost_priv(host);
1582 memset(ha, 0, sizeof(scsi_qla_host_t));
1583
1584 ha->pdev = pdev;
1585 ha->host = host;
1586 ha->host_no = host->host_no;
1587 sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no);
1588 ha->parent = NULL;
1589 ha->bars = bars; 1734 ha->bars = bars;
1590 ha->mem_only = mem_only; 1735 ha->mem_only = mem_only;
1591 spin_lock_init(&ha->hardware_lock); 1736 spin_lock_init(&ha->hardware_lock);
1592 1737
1593 /* Set ISP-type information. */ 1738 /* Set ISP-type information. */
1594 qla2x00_set_isp_flags(ha); 1739 qla2x00_set_isp_flags(ha);
1595
1596 /* Configure PCI I/O space */ 1740 /* Configure PCI I/O space */
1597 ret = qla2x00_iospace_config(ha); 1741 ret = qla2x00_iospace_config(ha);
1598 if (ret) 1742 if (ret)
1599 goto probe_failed; 1743 goto probe_hw_failed;
1600 1744
1601 qla_printk(KERN_INFO, ha, 1745 qla_printk(KERN_INFO, ha,
1602 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 1746 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
@@ -1604,95 +1748,137 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1604 1748
1605 ha->prev_topology = 0; 1749 ha->prev_topology = 0;
1606 ha->init_cb_size = sizeof(init_cb_t); 1750 ha->init_cb_size = sizeof(init_cb_t);
1607 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
1608 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1751 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1609 ha->optrom_size = OPTROM_SIZE_2300; 1752 ha->optrom_size = OPTROM_SIZE_2300;
1610 1753
1611 ha->max_q_depth = MAX_Q_DEPTH;
1612 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
1613 ha->max_q_depth = ql2xmaxqdepth;
1614
1615 /* Assign ISP specific operations. */ 1754 /* Assign ISP specific operations. */
1755 max_id = MAX_TARGETS_2200;
1616 if (IS_QLA2100(ha)) { 1756 if (IS_QLA2100(ha)) {
1617 host->max_id = MAX_TARGETS_2100; 1757 max_id = MAX_TARGETS_2100;
1618 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1758 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
1619 ha->request_q_length = REQUEST_ENTRY_CNT_2100; 1759 req_length = REQUEST_ENTRY_CNT_2100;
1620 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1760 rsp_length = RESPONSE_ENTRY_CNT_2100;
1621 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1761 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1622 host->sg_tablesize = 32;
1623 ha->gid_list_info_size = 4; 1762 ha->gid_list_info_size = 4;
1624 ha->isp_ops = &qla2100_isp_ops; 1763 ha->isp_ops = &qla2100_isp_ops;
1625 } else if (IS_QLA2200(ha)) { 1764 } else if (IS_QLA2200(ha)) {
1626 host->max_id = MAX_TARGETS_2200;
1627 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1765 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1628 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1766 req_length = REQUEST_ENTRY_CNT_2200;
1629 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1767 rsp_length = RESPONSE_ENTRY_CNT_2100;
1630 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1768 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1631 ha->gid_list_info_size = 4; 1769 ha->gid_list_info_size = 4;
1632 ha->isp_ops = &qla2100_isp_ops; 1770 ha->isp_ops = &qla2100_isp_ops;
1633 } else if (IS_QLA23XX(ha)) { 1771 } else if (IS_QLA23XX(ha)) {
1634 host->max_id = MAX_TARGETS_2200;
1635 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1772 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1636 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1773 req_length = REQUEST_ENTRY_CNT_2200;
1637 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1774 rsp_length = RESPONSE_ENTRY_CNT_2300;
1638 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1775 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1639 ha->gid_list_info_size = 6; 1776 ha->gid_list_info_size = 6;
1640 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1777 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1641 ha->optrom_size = OPTROM_SIZE_2322; 1778 ha->optrom_size = OPTROM_SIZE_2322;
1642 ha->isp_ops = &qla2300_isp_ops; 1779 ha->isp_ops = &qla2300_isp_ops;
1643 } else if (IS_QLA24XX_TYPE(ha)) { 1780 } else if (IS_QLA24XX_TYPE(ha)) {
1644 host->max_id = MAX_TARGETS_2200;
1645 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1781 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1646 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1782 req_length = REQUEST_ENTRY_CNT_24XX;
1647 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1783 rsp_length = RESPONSE_ENTRY_CNT_2300;
1648 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1784 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1649 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1785 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1650 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1651 ha->gid_list_info_size = 8; 1786 ha->gid_list_info_size = 8;
1652 ha->optrom_size = OPTROM_SIZE_24XX; 1787 ha->optrom_size = OPTROM_SIZE_24XX;
1788 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
1653 ha->isp_ops = &qla24xx_isp_ops; 1789 ha->isp_ops = &qla24xx_isp_ops;
1654 } else if (IS_QLA25XX(ha)) { 1790 } else if (IS_QLA25XX(ha)) {
1655 host->max_id = MAX_TARGETS_2200;
1656 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1791 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1657 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1792 req_length = REQUEST_ENTRY_CNT_24XX;
1658 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1793 rsp_length = RESPONSE_ENTRY_CNT_2300;
1659 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1794 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1660 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1795 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1661 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1662 ha->gid_list_info_size = 8; 1796 ha->gid_list_info_size = 8;
1663 ha->optrom_size = OPTROM_SIZE_25XX; 1797 ha->optrom_size = OPTROM_SIZE_25XX;
1798 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1664 ha->isp_ops = &qla25xx_isp_ops; 1799 ha->isp_ops = &qla25xx_isp_ops;
1665 } 1800 }
1666 host->can_queue = ha->request_q_length + 128;
1667 1801
1668 mutex_init(&ha->vport_lock); 1802 mutex_init(&ha->vport_lock);
1669 init_completion(&ha->mbx_cmd_comp); 1803 init_completion(&ha->mbx_cmd_comp);
1670 complete(&ha->mbx_cmd_comp); 1804 complete(&ha->mbx_cmd_comp);
1671 init_completion(&ha->mbx_intr_comp); 1805 init_completion(&ha->mbx_intr_comp);
1672 1806
1673 INIT_LIST_HEAD(&ha->list);
1674 INIT_LIST_HEAD(&ha->fcports);
1675 INIT_LIST_HEAD(&ha->vp_list);
1676 INIT_LIST_HEAD(&ha->work_list);
1677
1678 set_bit(0, (unsigned long *) ha->vp_idx_map); 1807 set_bit(0, (unsigned long *) ha->vp_idx_map);
1679 1808
1680 qla2x00_config_dma_addressing(ha); 1809 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
1681 if (qla2x00_mem_alloc(ha)) { 1810 if (!ret) {
1682 qla_printk(KERN_WARNING, ha, 1811 qla_printk(KERN_WARNING, ha,
1683 "[ERROR] Failed to allocate memory for adapter\n"); 1812 "[ERROR] Failed to allocate memory for adapter\n");
1684 1813
1814 goto probe_hw_failed;
1815 }
1816
1817 req->max_q_depth = MAX_Q_DEPTH;
1818 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
1819 req->max_q_depth = ql2xmaxqdepth;
1820
1821
1822 base_vha = qla2x00_create_host(sht, ha);
1823 if (!base_vha) {
1824 qla_printk(KERN_WARNING, ha,
1825 "[ERROR] Failed to allocate memory for scsi_host\n");
1826
1685 ret = -ENOMEM; 1827 ret = -ENOMEM;
1828 goto probe_hw_failed;
1829 }
1830
1831 pci_set_drvdata(pdev, base_vha);
1832
1833 qla2x00_config_dma_addressing(base_vha);
1834
1835 host = base_vha->host;
1836 base_vha->req_ques[0] = req->id;
1837 host->can_queue = req->length + 128;
1838 if (IS_QLA2XXX_MIDTYPE(ha))
1839 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
1840 else
1841 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
1842 base_vha->vp_idx;
1843 if (IS_QLA2100(ha))
1844 host->sg_tablesize = 32;
1845 host->max_id = max_id;
1846 host->this_id = 255;
1847 host->cmd_per_lun = 3;
1848 host->unique_id = host->host_no;
1849 host->max_cmd_len = MAX_CMDSZ;
1850 host->max_channel = MAX_BUSES - 1;
1851 host->max_lun = MAX_LUNS;
1852 host->transportt = qla2xxx_transport_template;
1853
1854 /* Set up the irqs */
1855 ret = qla2x00_request_irqs(ha, rsp);
1856 if (ret)
1857 goto probe_failed;
1858
1859 /* Alloc arrays of request and response ring ptrs */
1860 if (!qla2x00_alloc_queues(ha)) {
1861 qla_printk(KERN_WARNING, ha,
1862 "[ERROR] Failed to allocate memory for queue"
1863 " pointers\n");
1686 goto probe_failed; 1864 goto probe_failed;
1687 } 1865 }
1866 ha->rsp_q_map[0] = rsp;
1867 ha->req_q_map[0] = req;
1688 1868
1689 if (qla2x00_initialize_adapter(ha)) { 1869 if (ha->mqenable) {
1870 ha->isp_ops->wrt_req_reg = qla25xx_wrt_req_reg;
1871 ha->isp_ops->wrt_rsp_reg = qla25xx_wrt_rsp_reg;
1872 ha->isp_ops->rd_req_reg = qla25xx_rd_req_reg;
1873 }
1874
1875 if (qla2x00_initialize_adapter(base_vha)) {
1690 qla_printk(KERN_WARNING, ha, 1876 qla_printk(KERN_WARNING, ha,
1691 "Failed to initialize adapter\n"); 1877 "Failed to initialize adapter\n");
1692 1878
1693 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 1879 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
1694 "Adapter flags %x.\n", 1880 "Adapter flags %x.\n",
1695 ha->host_no, ha->device_flags)); 1881 base_vha->host_no, base_vha->device_flags));
1696 1882
1697 ret = -ENODEV; 1883 ret = -ENODEV;
1698 goto probe_failed; 1884 goto probe_failed;
@@ -1702,7 +1888,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1702 * Startup the kernel thread for this host adapter 1888 * Startup the kernel thread for this host adapter
1703 */ 1889 */
1704 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 1890 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
1705 "%s_dpc", ha->host_str); 1891 "%s_dpc", base_vha->host_str);
1706 if (IS_ERR(ha->dpc_thread)) { 1892 if (IS_ERR(ha->dpc_thread)) {
1707 qla_printk(KERN_WARNING, ha, 1893 qla_printk(KERN_WARNING, ha,
1708 "Unable to start DPC thread!\n"); 1894 "Unable to start DPC thread!\n");
@@ -1710,28 +1896,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1710 goto probe_failed; 1896 goto probe_failed;
1711 } 1897 }
1712 1898
1713 host->this_id = 255; 1899 list_add_tail(&base_vha->list, &ha->vp_list);
1714 host->cmd_per_lun = 3; 1900 base_vha->host->irq = ha->pdev->irq;
1715 host->unique_id = host->host_no;
1716 host->max_cmd_len = MAX_CMDSZ;
1717 host->max_channel = MAX_BUSES - 1;
1718 host->max_lun = MAX_LUNS;
1719 host->transportt = qla2xxx_transport_template;
1720
1721 ret = qla2x00_request_irqs(ha);
1722 if (ret)
1723 goto probe_failed;
1724 1901
1725 /* Initialized the timer */ 1902 /* Initialized the timer */
1726 qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL); 1903 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
1727 1904
1728 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1905 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
1729 ha->host_no, ha)); 1906 base_vha->host_no, ha));
1730 1907
1731 pci_set_drvdata(pdev, ha); 1908 base_vha->flags.init_done = 1;
1732 1909 base_vha->flags.online = 1;
1733 ha->flags.init_done = 1;
1734 ha->flags.online = 1;
1735 1910
1736 ret = scsi_add_host(host, &pdev->dev); 1911 ret = scsi_add_host(host, &pdev->dev);
1737 if (ret) 1912 if (ret)
@@ -1741,76 +1916,98 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1741 1916
1742 scsi_scan_host(host); 1917 scsi_scan_host(host);
1743 1918
1744 qla2x00_alloc_sysfs_attr(ha); 1919 qla2x00_alloc_sysfs_attr(base_vha);
1745 1920
1746 qla2x00_init_host_attr(ha); 1921 qla2x00_init_host_attr(base_vha);
1747 1922
1748 qla2x00_dfs_setup(ha); 1923 qla2x00_dfs_setup(base_vha);
1749 1924
1750 qla_printk(KERN_INFO, ha, "\n" 1925 qla_printk(KERN_INFO, ha, "\n"
1751 " QLogic Fibre Channel HBA Driver: %s\n" 1926 " QLogic Fibre Channel HBA Driver: %s\n"
1752 " QLogic %s - %s\n" 1927 " QLogic %s - %s\n"
1753 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 1928 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
1754 qla2x00_version_str, ha->model_number, 1929 qla2x00_version_str, ha->model_number,
1755 ha->model_desc ? ha->model_desc: "", pdev->device, 1930 ha->model_desc ? ha->model_desc : "", pdev->device,
1756 ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev), 1931 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
1757 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, 1932 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
1758 ha->isp_ops->fw_version_str(ha, fw_str)); 1933 ha->isp_ops->fw_version_str(base_vha, fw_str));
1759 1934
1760 return 0; 1935 return 0;
1761 1936
1762probe_failed: 1937probe_failed:
1763 qla2x00_free_device(ha); 1938 qla2x00_free_que(ha, req, rsp);
1939 qla2x00_free_device(base_vha);
1764 1940
1765 scsi_host_put(host); 1941 scsi_host_put(base_vha->host);
1766 1942
1767probe_disable_device: 1943probe_hw_failed:
1768 pci_disable_device(pdev); 1944 if (ha->iobase)
1945 iounmap(ha->iobase);
1946
1947 pci_release_selected_regions(ha->pdev, ha->bars);
1948 kfree(ha);
1949 ha = NULL;
1769 1950
1770probe_out: 1951probe_out:
1952 pci_disable_device(pdev);
1771 return ret; 1953 return ret;
1772} 1954}
1773 1955
1774static void 1956static void
1775qla2x00_remove_one(struct pci_dev *pdev) 1957qla2x00_remove_one(struct pci_dev *pdev)
1776{ 1958{
1777 scsi_qla_host_t *ha, *vha, *temp; 1959 scsi_qla_host_t *base_vha, *vha, *temp;
1960 struct qla_hw_data *ha;
1961
1962 base_vha = pci_get_drvdata(pdev);
1963 ha = base_vha->hw;
1964
1965 list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
1966 if (vha && vha->fc_vport)
1967 fc_vport_terminate(vha->fc_vport);
1968 }
1778 1969
1779 ha = pci_get_drvdata(pdev); 1970 set_bit(UNLOADING, &base_vha->dpc_flags);
1780 1971
1781 list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list) 1972 qla2x00_dfs_remove(base_vha);
1782 fc_vport_terminate(vha->fc_vport);
1783 1973
1784 set_bit(UNLOADING, &ha->dpc_flags); 1974 qla84xx_put_chip(base_vha);
1785 1975
1786 qla2x00_dfs_remove(ha); 1976 qla2x00_free_sysfs_attr(base_vha);
1787 1977
1788 qla84xx_put_chip(ha); 1978 fc_remove_host(base_vha->host);
1789 1979
1790 qla2x00_free_sysfs_attr(ha); 1980 scsi_remove_host(base_vha->host);
1791 1981
1792 fc_remove_host(ha->host); 1982 qla2x00_free_device(base_vha);
1793 1983
1794 scsi_remove_host(ha->host); 1984 scsi_host_put(base_vha->host);
1795 1985
1796 qla2x00_free_device(ha); 1986 if (ha->iobase)
1987 iounmap(ha->iobase);
1797 1988
1798 scsi_host_put(ha->host); 1989 if (ha->mqiobase)
1990 iounmap(ha->mqiobase);
1991
1992 pci_release_selected_regions(ha->pdev, ha->bars);
1993 kfree(ha);
1994 ha = NULL;
1799 1995
1800 pci_disable_device(pdev); 1996 pci_disable_device(pdev);
1801 pci_set_drvdata(pdev, NULL); 1997 pci_set_drvdata(pdev, NULL);
1802} 1998}
1803 1999
1804static void 2000static void
1805qla2x00_free_device(scsi_qla_host_t *ha) 2001qla2x00_free_device(scsi_qla_host_t *vha)
1806{ 2002{
1807 qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16); 2003 struct qla_hw_data *ha = vha->hw;
2004 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
1808 2005
1809 /* Disable timer */ 2006 /* Disable timer */
1810 if (ha->timer_active) 2007 if (vha->timer_active)
1811 qla2x00_stop_timer(ha); 2008 qla2x00_stop_timer(vha);
1812 2009
1813 ha->flags.online = 0; 2010 vha->flags.online = 0;
1814 2011
1815 /* Kill the kernel thread for this host */ 2012 /* Kill the kernel thread for this host */
1816 if (ha->dpc_thread) { 2013 if (ha->dpc_thread) {
@@ -1825,45 +2022,41 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1825 } 2022 }
1826 2023
1827 if (ha->flags.fce_enabled) 2024 if (ha->flags.fce_enabled)
1828 qla2x00_disable_fce_trace(ha, NULL, NULL); 2025 qla2x00_disable_fce_trace(vha, NULL, NULL);
1829 2026
1830 if (ha->eft) 2027 if (ha->eft)
1831 qla2x00_disable_eft_trace(ha); 2028 qla2x00_disable_eft_trace(vha);
1832 2029
1833 /* Stop currently executing firmware. */ 2030 /* Stop currently executing firmware. */
1834 qla2x00_try_to_stop_firmware(ha); 2031 qla2x00_try_to_stop_firmware(vha);
1835 2032
1836 /* turn-off interrupts on the card */ 2033 /* turn-off interrupts on the card */
1837 if (ha->interrupts_on) 2034 if (ha->interrupts_on)
1838 ha->isp_ops->disable_intrs(ha); 2035 ha->isp_ops->disable_intrs(ha);
1839 2036
1840 qla2x00_mem_free(ha); 2037 qla2x00_free_irqs(vha);
1841 2038
1842 qla2x00_free_irqs(ha); 2039 qla2x00_mem_free(ha);
1843 2040
1844 /* release io space registers */ 2041 qla2x00_free_queues(ha);
1845 if (ha->iobase)
1846 iounmap(ha->iobase);
1847 pci_release_selected_regions(ha->pdev, ha->bars);
1848} 2042}
1849 2043
1850static inline void 2044static inline void
1851qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, 2045qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
1852 int defer) 2046 int defer)
1853{ 2047{
1854 struct fc_rport *rport; 2048 struct fc_rport *rport;
1855 scsi_qla_host_t *pha = to_qla_parent(ha);
1856 2049
1857 if (!fcport->rport) 2050 if (!fcport->rport)
1858 return; 2051 return;
1859 2052
1860 rport = fcport->rport; 2053 rport = fcport->rport;
1861 if (defer) { 2054 if (defer) {
1862 spin_lock_irq(ha->host->host_lock); 2055 spin_lock_irq(vha->host->host_lock);
1863 fcport->drport = rport; 2056 fcport->drport = rport;
1864 spin_unlock_irq(ha->host->host_lock); 2057 spin_unlock_irq(vha->host->host_lock);
1865 set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags); 2058 set_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
1866 qla2xxx_wake_dpc(pha); 2059 qla2xxx_wake_dpc(vha);
1867 } else 2060 } else
1868 fc_remote_port_delete(rport); 2061 fc_remote_port_delete(rport);
1869} 2062}
@@ -1877,13 +2070,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1877 * 2070 *
1878 * Context: 2071 * Context:
1879 */ 2072 */
1880void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport, 2073void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
1881 int do_login, int defer) 2074 int do_login, int defer)
1882{ 2075{
1883 if (atomic_read(&fcport->state) == FCS_ONLINE && 2076 if (atomic_read(&fcport->state) == FCS_ONLINE &&
1884 ha->vp_idx == fcport->vp_idx) 2077 vha->vp_idx == fcport->vp_idx) {
1885 qla2x00_schedule_rport_del(ha, fcport, defer); 2078 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1886 2079 qla2x00_schedule_rport_del(vha, fcport, defer);
2080 }
1887 /* 2081 /*
1888 * We may need to retry the login, so don't change the state of the 2082 * We may need to retry the login, so don't change the state of the
1889 * port but do the retries. 2083 * port but do the retries.
@@ -1895,13 +2089,13 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
1895 return; 2089 return;
1896 2090
1897 if (fcport->login_retry == 0) { 2091 if (fcport->login_retry == 0) {
1898 fcport->login_retry = ha->login_retry_count; 2092 fcport->login_retry = vha->hw->login_retry_count;
1899 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 2093 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1900 2094
1901 DEBUG(printk("scsi(%ld): Port login retry: " 2095 DEBUG(printk("scsi(%ld): Port login retry: "
1902 "%02x%02x%02x%02x%02x%02x%02x%02x, " 2096 "%02x%02x%02x%02x%02x%02x%02x%02x, "
1903 "id = 0x%04x retry cnt=%d\n", 2097 "id = 0x%04x retry cnt=%d\n",
1904 ha->host_no, 2098 vha->host_no,
1905 fcport->port_name[0], 2099 fcport->port_name[0],
1906 fcport->port_name[1], 2100 fcport->port_name[1],
1907 fcport->port_name[2], 2101 fcport->port_name[2],
@@ -1929,13 +2123,12 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
1929 * Context: 2123 * Context:
1930 */ 2124 */
1931void 2125void
1932qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer) 2126qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
1933{ 2127{
1934 fc_port_t *fcport; 2128 fc_port_t *fcport;
1935 scsi_qla_host_t *pha = to_qla_parent(ha);
1936 2129
1937 list_for_each_entry(fcport, &pha->fcports, list) { 2130 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1938 if (ha->vp_idx != fcport->vp_idx) 2131 if (vha->vp_idx != fcport->vp_idx)
1939 continue; 2132 continue;
1940 /* 2133 /*
1941 * No point in marking the device as lost, if the device is 2134 * No point in marking the device as lost, if the device is
@@ -1943,9 +2136,11 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1943 */ 2136 */
1944 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 2137 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
1945 continue; 2138 continue;
1946 if (atomic_read(&fcport->state) == FCS_ONLINE) 2139 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1947 qla2x00_schedule_rport_del(ha, fcport, defer); 2140 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1948 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2141 qla2x00_schedule_rport_del(vha, fcport, defer);
2142 } else
2143 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1949 } 2144 }
1950} 2145}
1951 2146
@@ -1958,105 +2153,153 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1958* !0 = failure. 2153* !0 = failure.
1959*/ 2154*/
1960static int 2155static int
1961qla2x00_mem_alloc(scsi_qla_host_t *ha) 2156qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2157 struct req_que **req, struct rsp_que **rsp)
1962{ 2158{
1963 char name[16]; 2159 char name[16];
1964 2160
1965 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, 2161 ha->init_cb_size = sizeof(init_cb_t);
1966 (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma, 2162 if (IS_QLA2XXX_MIDTYPE(ha))
1967 GFP_KERNEL); 2163 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1968 if (!ha->request_ring)
1969 goto fail;
1970
1971 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
1972 (ha->response_q_length + 1) * sizeof(response_t),
1973 &ha->response_dma, GFP_KERNEL);
1974 if (!ha->response_ring)
1975 goto fail_free_request_ring;
1976
1977 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
1978 &ha->gid_list_dma, GFP_KERNEL);
1979 if (!ha->gid_list)
1980 goto fail_free_response_ring;
1981 2164
1982 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 2165 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
1983 &ha->init_cb_dma, GFP_KERNEL); 2166 &ha->init_cb_dma, GFP_KERNEL);
1984 if (!ha->init_cb) 2167 if (!ha->init_cb)
1985 goto fail_free_gid_list; 2168 goto fail;
1986 2169
1987 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME, 2170 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
1988 ha->host_no); 2171 &ha->gid_list_dma, GFP_KERNEL);
1989 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2172 if (!ha->gid_list)
1990 DMA_POOL_SIZE, 8, 0);
1991 if (!ha->s_dma_pool)
1992 goto fail_free_init_cb; 2173 goto fail_free_init_cb;
1993 2174
1994 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 2175 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
1995 if (!ha->srb_mempool) 2176 if (!ha->srb_mempool)
1996 goto fail_free_s_dma_pool; 2177 goto fail_free_gid_list;
1997 2178
1998 /* Get memory for cached NVRAM */ 2179 /* Get memory for cached NVRAM */
1999 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 2180 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2000 if (!ha->nvram) 2181 if (!ha->nvram)
2001 goto fail_free_srb_mempool; 2182 goto fail_free_srb_mempool;
2002 2183
2184 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2185 ha->pdev->device);
2186 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2187 DMA_POOL_SIZE, 8, 0);
2188 if (!ha->s_dma_pool)
2189 goto fail_free_nvram;
2190
2003 /* Allocate memory for SNS commands */ 2191 /* Allocate memory for SNS commands */
2004 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2192 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2005 /* Get consistent memory allocated for SNS commands */ 2193 /* Get consistent memory allocated for SNS commands */
2006 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 2194 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2007 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2195 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2008 if (!ha->sns_cmd) 2196 if (!ha->sns_cmd)
2009 goto fail_free_nvram; 2197 goto fail_dma_pool;
2010 } else { 2198 } else {
2011 /* Get consistent memory allocated for MS IOCB */ 2199 /* Get consistent memory allocated for MS IOCB */
2012 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2200 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2013 &ha->ms_iocb_dma); 2201 &ha->ms_iocb_dma);
2014 if (!ha->ms_iocb) 2202 if (!ha->ms_iocb)
2015 goto fail_free_nvram; 2203 goto fail_dma_pool;
2016 2204 /* Get consistent memory allocated for CT SNS commands */
2017 /* Get consistent memory allocated for CT SNS commands */
2018 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 2205 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2019 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2206 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2020 if (!ha->ct_sns) 2207 if (!ha->ct_sns)
2021 goto fail_free_ms_iocb; 2208 goto fail_free_ms_iocb;
2022 } 2209 }
2023 2210
2024 return 0; 2211 /* Allocate memory for request ring */
2212 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2213 if (!*req) {
2214 DEBUG(printk("Unable to allocate memory for req\n"));
2215 goto fail_req;
2216 }
2217 (*req)->length = req_len;
2218 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
2219 ((*req)->length + 1) * sizeof(request_t),
2220 &(*req)->dma, GFP_KERNEL);
2221 if (!(*req)->ring) {
2222 DEBUG(printk("Unable to allocate memory for req_ring\n"));
2223 goto fail_req_ring;
2224 }
2225 /* Allocate memory for response ring */
2226 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2227 if (!*rsp) {
2228 qla_printk(KERN_WARNING, ha,
2229 "Unable to allocate memory for rsp\n");
2230 goto fail_rsp;
2231 }
2232 (*rsp)->hw = ha;
2233 (*rsp)->length = rsp_len;
2234 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
2235 ((*rsp)->length + 1) * sizeof(response_t),
2236 &(*rsp)->dma, GFP_KERNEL);
2237 if (!(*rsp)->ring) {
2238 qla_printk(KERN_WARNING, ha,
2239 "Unable to allocate memory for rsp_ring\n");
2240 goto fail_rsp_ring;
2241 }
2242 (*req)->rsp = *rsp;
2243 (*rsp)->req = *req;
2244 /* Allocate memory for NVRAM data for vports */
2245 if (ha->nvram_npiv_size) {
2246 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2247 ha->nvram_npiv_size, GFP_KERNEL);
2248 if (!ha->npiv_info) {
2249 qla_printk(KERN_WARNING, ha,
2250 "Unable to allocate memory for npiv info\n");
2251 goto fail_npiv_info;
2252 }
2253 } else
2254 ha->npiv_info = NULL;
2025 2255
2256 INIT_LIST_HEAD(&ha->vp_list);
2257 return 1;
2258
2259fail_npiv_info:
2260 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
2261 sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
2262 (*rsp)->ring = NULL;
2263 (*rsp)->dma = 0;
2264fail_rsp_ring:
2265 kfree(*rsp);
2266fail_rsp:
2267 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
2268 sizeof(request_t), (*req)->ring, (*req)->dma);
2269 (*req)->ring = NULL;
2270 (*req)->dma = 0;
2271fail_req_ring:
2272 kfree(*req);
2273fail_req:
2274 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2275 ha->ct_sns, ha->ct_sns_dma);
2276 ha->ct_sns = NULL;
2277 ha->ct_sns_dma = 0;
2026fail_free_ms_iocb: 2278fail_free_ms_iocb:
2027 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2279 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2028 ha->ms_iocb = NULL; 2280 ha->ms_iocb = NULL;
2029 ha->ms_iocb_dma = 0; 2281 ha->ms_iocb_dma = 0;
2282fail_dma_pool:
2283 dma_pool_destroy(ha->s_dma_pool);
2284 ha->s_dma_pool = NULL;
2030fail_free_nvram: 2285fail_free_nvram:
2031 kfree(ha->nvram); 2286 kfree(ha->nvram);
2032 ha->nvram = NULL; 2287 ha->nvram = NULL;
2033fail_free_srb_mempool: 2288fail_free_srb_mempool:
2034 mempool_destroy(ha->srb_mempool); 2289 mempool_destroy(ha->srb_mempool);
2035 ha->srb_mempool = NULL; 2290 ha->srb_mempool = NULL;
2036fail_free_s_dma_pool:
2037 dma_pool_destroy(ha->s_dma_pool);
2038 ha->s_dma_pool = NULL;
2039fail_free_init_cb:
2040 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2041 ha->init_cb_dma);
2042 ha->init_cb = NULL;
2043 ha->init_cb_dma = 0;
2044fail_free_gid_list: 2291fail_free_gid_list:
2045 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2292 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2046 ha->gid_list_dma); 2293 ha->gid_list_dma);
2047 ha->gid_list = NULL; 2294 ha->gid_list = NULL;
2048 ha->gid_list_dma = 0; 2295 ha->gid_list_dma = 0;
2049fail_free_response_ring: 2296fail_free_init_cb:
2050 dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) * 2297 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2051 sizeof(response_t), ha->response_ring, ha->response_dma); 2298 ha->init_cb_dma);
2052 ha->response_ring = NULL; 2299 ha->init_cb = NULL;
2053 ha->response_dma = 0; 2300 ha->init_cb_dma = 0;
2054fail_free_request_ring:
2055 dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) *
2056 sizeof(request_t), ha->request_ring, ha->request_dma);
2057 ha->request_ring = NULL;
2058 ha->request_dma = 0;
2059fail: 2301fail:
2302 DEBUG(printk("%s: Memory allocation failure\n", __func__));
2060 return -ENOMEM; 2303 return -ENOMEM;
2061} 2304}
2062 2305
@@ -2068,32 +2311,29 @@ fail:
2068* ha = adapter block pointer. 2311* ha = adapter block pointer.
2069*/ 2312*/
2070static void 2313static void
2071qla2x00_mem_free(scsi_qla_host_t *ha) 2314qla2x00_mem_free(struct qla_hw_data *ha)
2072{ 2315{
2073 struct list_head *fcpl, *fcptemp;
2074 fc_port_t *fcport;
2075
2076 if (ha->srb_mempool) 2316 if (ha->srb_mempool)
2077 mempool_destroy(ha->srb_mempool); 2317 mempool_destroy(ha->srb_mempool);
2078 2318
2079 if (ha->fce) 2319 if (ha->fce)
2080 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2320 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2081 ha->fce_dma); 2321 ha->fce_dma);
2082 2322
2083 if (ha->fw_dump) { 2323 if (ha->fw_dump) {
2084 if (ha->eft) 2324 if (ha->eft)
2085 dma_free_coherent(&ha->pdev->dev, 2325 dma_free_coherent(&ha->pdev->dev,
2086 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 2326 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2087 vfree(ha->fw_dump); 2327 vfree(ha->fw_dump);
2088 } 2328 }
2089 2329
2090 if (ha->sns_cmd) 2330 if (ha->sns_cmd)
2091 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2331 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2092 ha->sns_cmd, ha->sns_cmd_dma); 2332 ha->sns_cmd, ha->sns_cmd_dma);
2093 2333
2094 if (ha->ct_sns) 2334 if (ha->ct_sns)
2095 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 2335 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2096 ha->ct_sns, ha->ct_sns_dma); 2336 ha->ct_sns, ha->ct_sns_dma);
2097 2337
2098 if (ha->sfp_data) 2338 if (ha->sfp_data)
2099 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 2339 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
@@ -2104,23 +2344,18 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2104 if (ha->s_dma_pool) 2344 if (ha->s_dma_pool)
2105 dma_pool_destroy(ha->s_dma_pool); 2345 dma_pool_destroy(ha->s_dma_pool);
2106 2346
2107 if (ha->init_cb)
2108 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2109 ha->init_cb, ha->init_cb_dma);
2110 2347
2111 if (ha->gid_list) 2348 if (ha->gid_list)
2112 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2349 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2113 ha->gid_list_dma); 2350 ha->gid_list_dma);
2114 2351
2115 if (ha->response_ring)
2116 dma_free_coherent(&ha->pdev->dev,
2117 (ha->response_q_length + 1) * sizeof(response_t),
2118 ha->response_ring, ha->response_dma);
2119 2352
2120 if (ha->request_ring) 2353 if (ha->init_cb)
2121 dma_free_coherent(&ha->pdev->dev, 2354 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2122 (ha->request_q_length + 1) * sizeof(request_t), 2355 ha->init_cb, ha->init_cb_dma);
2123 ha->request_ring, ha->request_dma); 2356 vfree(ha->optrom_buffer);
2357 kfree(ha->nvram);
2358 kfree(ha->npiv_info);
2124 2359
2125 ha->srb_mempool = NULL; 2360 ha->srb_mempool = NULL;
2126 ha->eft = NULL; 2361 ha->eft = NULL;
@@ -2139,30 +2374,45 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2139 ha->gid_list = NULL; 2374 ha->gid_list = NULL;
2140 ha->gid_list_dma = 0; 2375 ha->gid_list_dma = 0;
2141 2376
2142 ha->response_ring = NULL; 2377 ha->fw_dump = NULL;
2143 ha->response_dma = 0; 2378 ha->fw_dumped = 0;
2144 ha->request_ring = NULL; 2379 ha->fw_dump_reading = 0;
2145 ha->request_dma = 0; 2380}
2146 2381
2147 list_for_each_safe(fcpl, fcptemp, &ha->fcports) { 2382struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2148 fcport = list_entry(fcpl, fc_port_t, list); 2383 struct qla_hw_data *ha)
2384{
2385 struct Scsi_Host *host;
2386 struct scsi_qla_host *vha = NULL;
2149 2387
2150 /* fc ports */ 2388 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
2151 list_del_init(&fcport->list); 2389 if (host == NULL) {
2152 kfree(fcport); 2390 printk(KERN_WARNING
2391 "qla2xxx: Couldn't allocate host from scsi layer!\n");
2392 goto fail;
2153 } 2393 }
2154 INIT_LIST_HEAD(&ha->fcports);
2155 2394
2156 ha->fw_dump = NULL; 2395 /* Clear our data area */
2157 ha->fw_dumped = 0; 2396 vha = shost_priv(host);
2158 ha->fw_dump_reading = 0; 2397 memset(vha, 0, sizeof(scsi_qla_host_t));
2159 2398
2160 vfree(ha->optrom_buffer); 2399 vha->host = host;
2161 kfree(ha->nvram); 2400 vha->host_no = host->host_no;
2401 vha->hw = ha;
2402
2403 INIT_LIST_HEAD(&vha->vp_fcports);
2404 INIT_LIST_HEAD(&vha->work_list);
2405 INIT_LIST_HEAD(&vha->list);
2406
2407 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2408 return vha;
2409
2410fail:
2411 return vha;
2162} 2412}
2163 2413
2164static struct qla_work_evt * 2414static struct qla_work_evt *
2165qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type, 2415qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
2166 int locked) 2416 int locked)
2167{ 2417{
2168 struct qla_work_evt *e; 2418 struct qla_work_evt *e;
@@ -2179,42 +2429,42 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
2179} 2429}
2180 2430
2181static int 2431static int
2182qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2432qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked)
2183{ 2433{
2184 unsigned long uninitialized_var(flags); 2434 unsigned long uninitialized_var(flags);
2185 scsi_qla_host_t *pha = to_qla_parent(ha); 2435 struct qla_hw_data *ha = vha->hw;
2186 2436
2187 if (!locked) 2437 if (!locked)
2188 spin_lock_irqsave(&pha->hardware_lock, flags); 2438 spin_lock_irqsave(&ha->hardware_lock, flags);
2189 list_add_tail(&e->list, &ha->work_list); 2439 list_add_tail(&e->list, &vha->work_list);
2190 qla2xxx_wake_dpc(ha); 2440 qla2xxx_wake_dpc(vha);
2191 if (!locked) 2441 if (!locked)
2192 spin_unlock_irqrestore(&pha->hardware_lock, flags); 2442 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2193 return QLA_SUCCESS; 2443 return QLA_SUCCESS;
2194} 2444}
2195 2445
2196int 2446int
2197qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code, 2447qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2198 u32 data) 2448 u32 data)
2199{ 2449{
2200 struct qla_work_evt *e; 2450 struct qla_work_evt *e;
2201 2451
2202 e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1); 2452 e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1);
2203 if (!e) 2453 if (!e)
2204 return QLA_FUNCTION_FAILED; 2454 return QLA_FUNCTION_FAILED;
2205 2455
2206 e->u.aen.code = code; 2456 e->u.aen.code = code;
2207 e->u.aen.data = data; 2457 e->u.aen.data = data;
2208 return qla2x00_post_work(ha, e, 1); 2458 return qla2x00_post_work(vha, e, 1);
2209} 2459}
2210 2460
2211int 2461int
2212qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1, 2462qla2x00_post_hwe_work(struct scsi_qla_host *vha, uint16_t code, uint16_t d1,
2213 uint16_t d2, uint16_t d3) 2463 uint16_t d2, uint16_t d3)
2214{ 2464{
2215 struct qla_work_evt *e; 2465 struct qla_work_evt *e;
2216 2466
2217 e = qla2x00_alloc_work(ha, QLA_EVT_HWE_LOG, 1); 2467 e = qla2x00_alloc_work(vha, QLA_EVT_HWE_LOG, 1);
2218 if (!e) 2468 if (!e)
2219 return QLA_FUNCTION_FAILED; 2469 return QLA_FUNCTION_FAILED;
2220 2470
@@ -2222,36 +2472,95 @@ qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1,
2222 e->u.hwe.d1 = d1; 2472 e->u.hwe.d1 = d1;
2223 e->u.hwe.d2 = d2; 2473 e->u.hwe.d2 = d2;
2224 e->u.hwe.d3 = d3; 2474 e->u.hwe.d3 = d3;
2225 return qla2x00_post_work(ha, e, 1); 2475 return qla2x00_post_work(vha, e, 1);
2226} 2476}
2227 2477
2228static void 2478static void
2229qla2x00_do_work(struct scsi_qla_host *ha) 2479qla2x00_do_work(struct scsi_qla_host *vha)
2230{ 2480{
2231 struct qla_work_evt *e; 2481 struct qla_work_evt *e;
2232 scsi_qla_host_t *pha = to_qla_parent(ha); 2482 struct qla_hw_data *ha = vha->hw;
2233 2483
2234 spin_lock_irq(&pha->hardware_lock); 2484 spin_lock_irq(&ha->hardware_lock);
2235 while (!list_empty(&ha->work_list)) { 2485 while (!list_empty(&vha->work_list)) {
2236 e = list_entry(ha->work_list.next, struct qla_work_evt, list); 2486 e = list_entry(vha->work_list.next, struct qla_work_evt, list);
2237 list_del_init(&e->list); 2487 list_del_init(&e->list);
2238 spin_unlock_irq(&pha->hardware_lock); 2488 spin_unlock_irq(&ha->hardware_lock);
2239 2489
2240 switch (e->type) { 2490 switch (e->type) {
2241 case QLA_EVT_AEN: 2491 case QLA_EVT_AEN:
2242 fc_host_post_event(ha->host, fc_get_event_number(), 2492 fc_host_post_event(vha->host, fc_get_event_number(),
2243 e->u.aen.code, e->u.aen.data); 2493 e->u.aen.code, e->u.aen.data);
2244 break; 2494 break;
2245 case QLA_EVT_HWE_LOG: 2495 case QLA_EVT_HWE_LOG:
2246 qla2xxx_hw_event_log(ha, e->u.hwe.code, e->u.hwe.d1, 2496 qla2xxx_hw_event_log(vha, e->u.hwe.code, e->u.hwe.d1,
2247 e->u.hwe.d2, e->u.hwe.d3); 2497 e->u.hwe.d2, e->u.hwe.d3);
2248 break; 2498 break;
2249 } 2499 }
2250 if (e->flags & QLA_EVT_FLAG_FREE) 2500 if (e->flags & QLA_EVT_FLAG_FREE)
2251 kfree(e); 2501 kfree(e);
2252 spin_lock_irq(&pha->hardware_lock); 2502 spin_lock_irq(&ha->hardware_lock);
2503 }
2504 spin_unlock_irq(&ha->hardware_lock);
2505}
2506/* Relogins all the fcports of a vport
2507 * Context: dpc thread
2508 */
2509void qla2x00_relogin(struct scsi_qla_host *vha)
2510{
2511 fc_port_t *fcport;
2512 uint8_t status;
2513 uint16_t next_loopid = 0;
2514 struct qla_hw_data *ha = vha->hw;
2515
2516 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2517 /*
2518 * If the port is not ONLINE then try to login
2519 * to it if we haven't run out of retries.
2520 */
2521 if (atomic_read(&fcport->state) !=
2522 FCS_ONLINE && fcport->login_retry) {
2523
2524 if (fcport->flags & FCF_FABRIC_DEVICE) {
2525 if (fcport->flags & FCF_TAPE_PRESENT)
2526 ha->isp_ops->fabric_logout(vha,
2527 fcport->loop_id,
2528 fcport->d_id.b.domain,
2529 fcport->d_id.b.area,
2530 fcport->d_id.b.al_pa);
2531
2532 status = qla2x00_fabric_login(vha, fcport,
2533 &next_loopid);
2534 } else
2535 status = qla2x00_local_device_login(vha,
2536 fcport);
2537
2538 fcport->login_retry--;
2539 if (status == QLA_SUCCESS) {
2540 fcport->old_loop_id = fcport->loop_id;
2541
2542 DEBUG(printk("scsi(%ld): port login OK: logged "
2543 "in ID 0x%x\n", vha->host_no, fcport->loop_id));
2544
2545 qla2x00_update_fcport(vha, fcport);
2546
2547 } else if (status == 1) {
2548 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2549 /* retry the login again */
2550 DEBUG(printk("scsi(%ld): Retrying"
2551 " %d login again loop_id 0x%x\n",
2552 vha->host_no, fcport->login_retry,
2553 fcport->loop_id));
2554 } else {
2555 fcport->login_retry = 0;
2556 }
2557
2558 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2559 fcport->loop_id = FC_NO_LOOP_ID;
2560 }
2561 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2562 break;
2253 } 2563 }
2254 spin_unlock_irq(&pha->hardware_lock);
2255} 2564}
2256 2565
2257/************************************************************************** 2566/**************************************************************************
@@ -2271,15 +2580,11 @@ static int
2271qla2x00_do_dpc(void *data) 2580qla2x00_do_dpc(void *data)
2272{ 2581{
2273 int rval; 2582 int rval;
2274 scsi_qla_host_t *ha; 2583 scsi_qla_host_t *base_vha;
2275 fc_port_t *fcport; 2584 struct qla_hw_data *ha;
2276 uint8_t status;
2277 uint16_t next_loopid;
2278 struct scsi_qla_host *vha;
2279 int i;
2280 2585
2281 2586 ha = (struct qla_hw_data *)data;
2282 ha = (scsi_qla_host_t *)data; 2587 base_vha = pci_get_drvdata(ha->pdev);
2283 2588
2284 set_user_nice(current, -20); 2589 set_user_nice(current, -20);
2285 2590
@@ -2293,10 +2598,10 @@ qla2x00_do_dpc(void *data)
2293 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 2598 DEBUG3(printk("qla2x00: DPC handler waking up\n"));
2294 2599
2295 /* Initialization not yet finished. Don't do anything yet. */ 2600 /* Initialization not yet finished. Don't do anything yet. */
2296 if (!ha->flags.init_done) 2601 if (!base_vha->flags.init_done)
2297 continue; 2602 continue;
2298 2603
2299 DEBUG3(printk("scsi(%ld): DPC handler\n", ha->host_no)); 2604 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
2300 2605
2301 ha->dpc_active = 1; 2606 ha->dpc_active = 1;
2302 2607
@@ -2305,149 +2610,98 @@ qla2x00_do_dpc(void *data)
2305 continue; 2610 continue;
2306 } 2611 }
2307 2612
2308 qla2x00_do_work(ha); 2613 qla2x00_do_work(base_vha);
2309 2614
2310 if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 2615 if (test_and_clear_bit(ISP_ABORT_NEEDED,
2616 &base_vha->dpc_flags)) {
2311 2617
2312 DEBUG(printk("scsi(%ld): dpc: sched " 2618 DEBUG(printk("scsi(%ld): dpc: sched "
2313 "qla2x00_abort_isp ha = %p\n", 2619 "qla2x00_abort_isp ha = %p\n",
2314 ha->host_no, ha)); 2620 base_vha->host_no, ha));
2315 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 2621 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
2316 &ha->dpc_flags))) { 2622 &base_vha->dpc_flags))) {
2317 2623
2318 if (qla2x00_abort_isp(ha)) { 2624 if (qla2x00_abort_isp(base_vha)) {
2319 /* failed. retry later */ 2625 /* failed. retry later */
2320 set_bit(ISP_ABORT_NEEDED, 2626 set_bit(ISP_ABORT_NEEDED,
2321 &ha->dpc_flags); 2627 &base_vha->dpc_flags);
2322 }
2323 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
2324 }
2325
2326 for_each_mapped_vp_idx(ha, i) {
2327 list_for_each_entry(vha, &ha->vp_list,
2328 vp_list) {
2329 if (i == vha->vp_idx) {
2330 set_bit(ISP_ABORT_NEEDED,
2331 &vha->dpc_flags);
2332 break;
2333 }
2334 } 2628 }
2629 clear_bit(ABORT_ISP_ACTIVE,
2630 &base_vha->dpc_flags);
2335 } 2631 }
2336 2632
2337 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 2633 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
2338 ha->host_no)); 2634 base_vha->host_no));
2339 } 2635 }
2340 2636
2341 if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) { 2637 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
2342 qla2x00_update_fcports(ha); 2638 qla2x00_update_fcports(base_vha);
2343 clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 2639 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2344 } 2640 }
2345 2641
2346 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2642 if (test_and_clear_bit(RESET_MARKER_NEEDED,
2347 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { 2643 &base_vha->dpc_flags) &&
2644 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
2348 2645
2349 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 2646 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
2350 ha->host_no)); 2647 base_vha->host_no));
2351 2648
2352 qla2x00_rst_aen(ha); 2649 qla2x00_rst_aen(base_vha);
2353 clear_bit(RESET_ACTIVE, &ha->dpc_flags); 2650 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
2354 } 2651 }
2355 2652
2356 /* Retry each device up to login retry count */ 2653 /* Retry each device up to login retry count */
2357 if ((test_and_clear_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2654 if ((test_and_clear_bit(RELOGIN_NEEDED,
2358 !test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) && 2655 &base_vha->dpc_flags)) &&
2359 atomic_read(&ha->loop_state) != LOOP_DOWN) { 2656 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
2657 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
2360 2658
2361 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 2659 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
2362 ha->host_no)); 2660 base_vha->host_no));
2363 2661 qla2x00_relogin(base_vha);
2364 next_loopid = 0; 2662
2365 list_for_each_entry(fcport, &ha->fcports, list) {
2366 /*
2367 * If the port is not ONLINE then try to login
2368 * to it if we haven't run out of retries.
2369 */
2370 if (atomic_read(&fcport->state) != FCS_ONLINE &&
2371 fcport->login_retry) {
2372
2373 if (fcport->flags & FCF_FABRIC_DEVICE) {
2374 if (fcport->flags &
2375 FCF_TAPE_PRESENT)
2376 ha->isp_ops->fabric_logout(
2377 ha, fcport->loop_id,
2378 fcport->d_id.b.domain,
2379 fcport->d_id.b.area,
2380 fcport->d_id.b.al_pa);
2381 status = qla2x00_fabric_login(
2382 ha, fcport, &next_loopid);
2383 } else
2384 status =
2385 qla2x00_local_device_login(
2386 ha, fcport);
2387
2388 fcport->login_retry--;
2389 if (status == QLA_SUCCESS) {
2390 fcport->old_loop_id = fcport->loop_id;
2391
2392 DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n",
2393 ha->host_no, fcport->loop_id));
2394
2395 qla2x00_update_fcport(ha,
2396 fcport);
2397 } else if (status == 1) {
2398 set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
2399 /* retry the login again */
2400 DEBUG(printk("scsi(%ld): Retrying %d login again loop_id 0x%x\n",
2401 ha->host_no,
2402 fcport->login_retry, fcport->loop_id));
2403 } else {
2404 fcport->login_retry = 0;
2405 }
2406 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2407 fcport->loop_id = FC_NO_LOOP_ID;
2408 }
2409 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2410 break;
2411 }
2412 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 2663 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
2413 ha->host_no)); 2664 base_vha->host_no));
2414 } 2665 }
2415 2666
2416 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2667 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
2668 &base_vha->dpc_flags)) {
2417 2669
2418 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 2670 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
2419 ha->host_no)); 2671 base_vha->host_no));
2420 2672
2421 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 2673 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
2422 &ha->dpc_flags))) { 2674 &base_vha->dpc_flags))) {
2423 2675
2424 rval = qla2x00_loop_resync(ha); 2676 rval = qla2x00_loop_resync(base_vha);
2425 2677
2426 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 2678 clear_bit(LOOP_RESYNC_ACTIVE,
2679 &base_vha->dpc_flags);
2427 } 2680 }
2428 2681
2429 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 2682 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
2430 ha->host_no)); 2683 base_vha->host_no));
2431 } 2684 }
2432 2685
2433 if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) && 2686 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
2434 atomic_read(&ha->loop_state) == LOOP_READY) { 2687 atomic_read(&base_vha->loop_state) == LOOP_READY) {
2435 clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 2688 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
2436 qla2xxx_flash_npiv_conf(ha); 2689 qla2xxx_flash_npiv_conf(base_vha);
2437 } 2690 }
2438 2691
2439 if (!ha->interrupts_on) 2692 if (!ha->interrupts_on)
2440 ha->isp_ops->enable_intrs(ha); 2693 ha->isp_ops->enable_intrs(ha);
2441 2694
2442 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2695 if (test_and_clear_bit(BEACON_BLINK_NEEDED,
2443 ha->isp_ops->beacon_blink(ha); 2696 &base_vha->dpc_flags))
2697 ha->isp_ops->beacon_blink(base_vha);
2444 2698
2445 qla2x00_do_dpc_all_vps(ha); 2699 qla2x00_do_dpc_all_vps(base_vha);
2446 2700
2447 ha->dpc_active = 0; 2701 ha->dpc_active = 0;
2448 } /* End of while(1) */ 2702 } /* End of while(1) */
2449 2703
2450 DEBUG(printk("scsi(%ld): DPC handler exiting\n", ha->host_no)); 2704 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
2451 2705
2452 /* 2706 /*
2453 * Make sure that nobody tries to wake us up again. 2707 * Make sure that nobody tries to wake us up again.
@@ -2458,11 +2712,12 @@ qla2x00_do_dpc(void *data)
2458} 2712}
2459 2713
2460void 2714void
2461qla2xxx_wake_dpc(scsi_qla_host_t *ha) 2715qla2xxx_wake_dpc(struct scsi_qla_host *vha)
2462{ 2716{
2717 struct qla_hw_data *ha = vha->hw;
2463 struct task_struct *t = ha->dpc_thread; 2718 struct task_struct *t = ha->dpc_thread;
2464 2719
2465 if (!test_bit(UNLOADING, &ha->dpc_flags) && t) 2720 if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
2466 wake_up_process(t); 2721 wake_up_process(t);
2467} 2722}
2468 2723
@@ -2474,26 +2729,26 @@ qla2xxx_wake_dpc(scsi_qla_host_t *ha)
2474* ha = adapter block pointer. 2729* ha = adapter block pointer.
2475*/ 2730*/
2476static void 2731static void
2477qla2x00_rst_aen(scsi_qla_host_t *ha) 2732qla2x00_rst_aen(scsi_qla_host_t *vha)
2478{ 2733{
2479 if (ha->flags.online && !ha->flags.reset_active && 2734 if (vha->flags.online && !vha->flags.reset_active &&
2480 !atomic_read(&ha->loop_down_timer) && 2735 !atomic_read(&vha->loop_down_timer) &&
2481 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 2736 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
2482 do { 2737 do {
2483 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 2738 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2484 2739
2485 /* 2740 /*
2486 * Issue marker command only when we are going to start 2741 * Issue marker command only when we are going to start
2487 * the I/O. 2742 * the I/O.
2488 */ 2743 */
2489 ha->marker_needed = 1; 2744 vha->marker_needed = 1;
2490 } while (!atomic_read(&ha->loop_down_timer) && 2745 } while (!atomic_read(&vha->loop_down_timer) &&
2491 (test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags))); 2746 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
2492 } 2747 }
2493} 2748}
2494 2749
2495static void 2750static void
2496qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp) 2751qla2x00_sp_free_dma(srb_t *sp)
2497{ 2752{
2498 struct scsi_cmnd *cmd = sp->cmd; 2753 struct scsi_cmnd *cmd = sp->cmd;
2499 2754
@@ -2505,11 +2760,11 @@ qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
2505} 2760}
2506 2761
2507void 2762void
2508qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp) 2763qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
2509{ 2764{
2510 struct scsi_cmnd *cmd = sp->cmd; 2765 struct scsi_cmnd *cmd = sp->cmd;
2511 2766
2512 qla2x00_sp_free_dma(ha, sp); 2767 qla2x00_sp_free_dma(sp);
2513 2768
2514 mempool_free(sp, ha->srb_mempool); 2769 mempool_free(sp, ha->srb_mempool);
2515 2770
@@ -2525,7 +2780,7 @@ qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
2525* Context: Interrupt 2780* Context: Interrupt
2526***************************************************************************/ 2781***************************************************************************/
2527void 2782void
2528qla2x00_timer(scsi_qla_host_t *ha) 2783qla2x00_timer(scsi_qla_host_t *vha)
2529{ 2784{
2530 unsigned long cpu_flags = 0; 2785 unsigned long cpu_flags = 0;
2531 fc_port_t *fcport; 2786 fc_port_t *fcport;
@@ -2533,8 +2788,8 @@ qla2x00_timer(scsi_qla_host_t *ha)
2533 int index; 2788 int index;
2534 srb_t *sp; 2789 srb_t *sp;
2535 int t; 2790 int t;
2536 scsi_qla_host_t *pha = to_qla_parent(ha); 2791 struct qla_hw_data *ha = vha->hw;
2537 2792 struct req_que *req;
2538 /* 2793 /*
2539 * Ports - Port down timer. 2794 * Ports - Port down timer.
2540 * 2795 *
@@ -2543,7 +2798,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2543 * the port it marked DEAD. 2798 * the port it marked DEAD.
2544 */ 2799 */
2545 t = 0; 2800 t = 0;
2546 list_for_each_entry(fcport, &ha->fcports, list) { 2801 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2547 if (fcport->port_type != FCT_TARGET) 2802 if (fcport->port_type != FCT_TARGET)
2548 continue; 2803 continue;
2549 2804
@@ -2557,7 +2812,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2557 2812
2558 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: " 2813 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
2559 "%d remaining\n", 2814 "%d remaining\n",
2560 ha->host_no, 2815 vha->host_no,
2561 t, atomic_read(&fcport->port_down_timer))); 2816 t, atomic_read(&fcport->port_down_timer)));
2562 } 2817 }
2563 t++; 2818 t++;
@@ -2565,30 +2820,32 @@ qla2x00_timer(scsi_qla_host_t *ha)
2565 2820
2566 2821
2567 /* Loop down handler. */ 2822 /* Loop down handler. */
2568 if (atomic_read(&ha->loop_down_timer) > 0 && 2823 if (atomic_read(&vha->loop_down_timer) > 0 &&
2569 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) && ha->flags.online) { 2824 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
2825 && vha->flags.online) {
2570 2826
2571 if (atomic_read(&ha->loop_down_timer) == 2827 if (atomic_read(&vha->loop_down_timer) ==
2572 ha->loop_down_abort_time) { 2828 vha->loop_down_abort_time) {
2573 2829
2574 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 2830 DEBUG(printk("scsi(%ld): Loop Down - aborting the "
2575 "queues before time expire\n", 2831 "queues before time expire\n",
2576 ha->host_no)); 2832 vha->host_no));
2577 2833
2578 if (!IS_QLA2100(ha) && ha->link_down_timeout) 2834 if (!IS_QLA2100(ha) && vha->link_down_timeout)
2579 atomic_set(&ha->loop_state, LOOP_DEAD); 2835 atomic_set(&vha->loop_state, LOOP_DEAD);
2580 2836
2581 /* Schedule an ISP abort to return any tape commands. */ 2837 /* Schedule an ISP abort to return any tape commands. */
2582 /* NPIV - scan physical port only */ 2838 /* NPIV - scan physical port only */
2583 if (!ha->parent) { 2839 if (!vha->vp_idx) {
2584 spin_lock_irqsave(&ha->hardware_lock, 2840 spin_lock_irqsave(&ha->hardware_lock,
2585 cpu_flags); 2841 cpu_flags);
2842 req = ha->req_q_map[0];
2586 for (index = 1; 2843 for (index = 1;
2587 index < MAX_OUTSTANDING_COMMANDS; 2844 index < MAX_OUTSTANDING_COMMANDS;
2588 index++) { 2845 index++) {
2589 fc_port_t *sfcp; 2846 fc_port_t *sfcp;
2590 2847
2591 sp = ha->outstanding_cmds[index]; 2848 sp = req->outstanding_cmds[index];
2592 if (!sp) 2849 if (!sp)
2593 continue; 2850 continue;
2594 sfcp = sp->fcport; 2851 sfcp = sp->fcport;
@@ -2596,63 +2853,63 @@ qla2x00_timer(scsi_qla_host_t *ha)
2596 continue; 2853 continue;
2597 2854
2598 set_bit(ISP_ABORT_NEEDED, 2855 set_bit(ISP_ABORT_NEEDED,
2599 &ha->dpc_flags); 2856 &vha->dpc_flags);
2600 break; 2857 break;
2601 } 2858 }
2602 spin_unlock_irqrestore(&ha->hardware_lock, 2859 spin_unlock_irqrestore(&ha->hardware_lock,
2603 cpu_flags); 2860 cpu_flags);
2604 } 2861 }
2605 set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags); 2862 set_bit(ABORT_QUEUES_NEEDED, &vha->dpc_flags);
2606 start_dpc++; 2863 start_dpc++;
2607 } 2864 }
2608 2865
2609 /* if the loop has been down for 4 minutes, reinit adapter */ 2866 /* if the loop has been down for 4 minutes, reinit adapter */
2610 if (atomic_dec_and_test(&ha->loop_down_timer) != 0) { 2867 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
2611 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - " 2868 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - "
2612 "restarting queues.\n", 2869 "restarting queues.\n",
2613 ha->host_no)); 2870 vha->host_no));
2614 2871
2615 set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags); 2872 set_bit(RESTART_QUEUES_NEEDED, &vha->dpc_flags);
2616 start_dpc++; 2873 start_dpc++;
2617 2874
2618 if (!(ha->device_flags & DFLG_NO_CABLE) && 2875 if (!(vha->device_flags & DFLG_NO_CABLE) &&
2619 !ha->parent) { 2876 !vha->vp_idx) {
2620 DEBUG(printk("scsi(%ld): Loop down - " 2877 DEBUG(printk("scsi(%ld): Loop down - "
2621 "aborting ISP.\n", 2878 "aborting ISP.\n",
2622 ha->host_no)); 2879 vha->host_no));
2623 qla_printk(KERN_WARNING, ha, 2880 qla_printk(KERN_WARNING, ha,
2624 "Loop down - aborting ISP.\n"); 2881 "Loop down - aborting ISP.\n");
2625 2882
2626 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2883 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2627 } 2884 }
2628 } 2885 }
2629 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 2886 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
2630 ha->host_no, 2887 vha->host_no,
2631 atomic_read(&ha->loop_down_timer))); 2888 atomic_read(&vha->loop_down_timer)));
2632 } 2889 }
2633 2890
2634 /* Check if beacon LED needs to be blinked */ 2891 /* Check if beacon LED needs to be blinked */
2635 if (ha->beacon_blink_led == 1) { 2892 if (ha->beacon_blink_led == 1) {
2636 set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags); 2893 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
2637 start_dpc++; 2894 start_dpc++;
2638 } 2895 }
2639 2896
2640 /* Process any deferred work. */ 2897 /* Process any deferred work. */
2641 if (!list_empty(&ha->work_list)) 2898 if (!list_empty(&vha->work_list))
2642 start_dpc++; 2899 start_dpc++;
2643 2900
2644 /* Schedule the DPC routine if needed */ 2901 /* Schedule the DPC routine if needed */
2645 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2902 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2646 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2903 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
2647 test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) || 2904 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
2648 start_dpc || 2905 start_dpc ||
2649 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2906 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
2650 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || 2907 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
2651 test_bit(VP_DPC_NEEDED, &ha->dpc_flags) || 2908 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
2652 test_bit(RELOGIN_NEEDED, &ha->dpc_flags))) 2909 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
2653 qla2xxx_wake_dpc(pha); 2910 qla2xxx_wake_dpc(vha);
2654 2911
2655 qla2x00_restart_timer(ha, WATCH_INTERVAL); 2912 qla2x00_restart_timer(vha, WATCH_INTERVAL);
2656} 2913}
2657 2914
2658/* Firmware interface routines. */ 2915/* Firmware interface routines. */
@@ -2684,8 +2941,9 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
2684}; 2941};
2685 2942
2686struct fw_blob * 2943struct fw_blob *
2687qla2x00_request_firmware(scsi_qla_host_t *ha) 2944qla2x00_request_firmware(scsi_qla_host_t *vha)
2688{ 2945{
2946 struct qla_hw_data *ha = vha->hw;
2689 struct fw_blob *blob; 2947 struct fw_blob *blob;
2690 2948
2691 blob = NULL; 2949 blob = NULL;
@@ -2709,7 +2967,7 @@ qla2x00_request_firmware(scsi_qla_host_t *ha)
2709 2967
2710 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 2968 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
2711 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 2969 DEBUG2(printk("scsi(%ld): Failed to load firmware image "
2712 "(%s).\n", ha->host_no, blob->name)); 2970 "(%s).\n", vha->host_no, blob->name));
2713 blob->fw = NULL; 2971 blob->fw = NULL;
2714 blob = NULL; 2972 blob = NULL;
2715 goto out; 2973 goto out;
@@ -2754,7 +3012,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
2754 int risc_paused = 0; 3012 int risc_paused = 0;
2755 uint32_t stat; 3013 uint32_t stat;
2756 unsigned long flags; 3014 unsigned long flags;
2757 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 3015 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3016 struct qla_hw_data *ha = base_vha->hw;
2758 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3017 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2759 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 3018 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2760 3019
@@ -2777,7 +3036,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
2777 if (risc_paused) { 3036 if (risc_paused) {
2778 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 3037 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
2779 "Dumping firmware!\n"); 3038 "Dumping firmware!\n");
2780 ha->isp_ops->fw_dump(ha, 0); 3039 ha->isp_ops->fw_dump(base_vha, 0);
2781 3040
2782 return PCI_ERS_RESULT_NEED_RESET; 3041 return PCI_ERS_RESULT_NEED_RESET;
2783 } else 3042 } else
@@ -2788,7 +3047,8 @@ static pci_ers_result_t
2788qla2xxx_pci_slot_reset(struct pci_dev *pdev) 3047qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2789{ 3048{
2790 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 3049 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
2791 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 3050 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3051 struct qla_hw_data *ha = base_vha->hw;
2792 int rc; 3052 int rc;
2793 3053
2794 if (ha->mem_only) 3054 if (ha->mem_only)
@@ -2804,13 +3064,13 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2804 } 3064 }
2805 pci_set_master(pdev); 3065 pci_set_master(pdev);
2806 3066
2807 if (ha->isp_ops->pci_config(ha)) 3067 if (ha->isp_ops->pci_config(base_vha))
2808 return ret; 3068 return ret;
2809 3069
2810 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 3070 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
2811 if (qla2x00_abort_isp(ha)== QLA_SUCCESS) 3071 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
2812 ret = PCI_ERS_RESULT_RECOVERED; 3072 ret = PCI_ERS_RESULT_RECOVERED;
2813 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 3073 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
2814 3074
2815 return ret; 3075 return ret;
2816} 3076}
@@ -2818,10 +3078,11 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2818static void 3078static void
2819qla2xxx_pci_resume(struct pci_dev *pdev) 3079qla2xxx_pci_resume(struct pci_dev *pdev)
2820{ 3080{
2821 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 3081 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3082 struct qla_hw_data *ha = base_vha->hw;
2822 int ret; 3083 int ret;
2823 3084
2824 ret = qla2x00_wait_for_hba_online(ha); 3085 ret = qla2x00_wait_for_hba_online(base_vha);
2825 if (ret != QLA_SUCCESS) { 3086 if (ret != QLA_SUCCESS) {
2826 qla_printk(KERN_ERR, ha, 3087 qla_printk(KERN_ERR, ha,
2827 "the device failed to resume I/O " 3088 "the device failed to resume I/O "
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index e4af678eb2d6..c538ee1b1a31 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -10,10 +10,6 @@
10#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
11#include <asm/uaccess.h> 11#include <asm/uaccess.h>
12 12
13static uint16_t qla2x00_nvram_request(scsi_qla_host_t *, uint32_t);
14static void qla2x00_nv_deselect(scsi_qla_host_t *);
15static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
16
17/* 13/*
18 * NVRAM support routines 14 * NVRAM support routines
19 */ 15 */
@@ -23,7 +19,7 @@ static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
23 * @ha: HA context 19 * @ha: HA context
24 */ 20 */
25static void 21static void
26qla2x00_lock_nvram_access(scsi_qla_host_t *ha) 22qla2x00_lock_nvram_access(struct qla_hw_data *ha)
27{ 23{
28 uint16_t data; 24 uint16_t data;
29 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 25 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -56,7 +52,7 @@ qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
56 * @ha: HA context 52 * @ha: HA context
57 */ 53 */
58static void 54static void
59qla2x00_unlock_nvram_access(scsi_qla_host_t *ha) 55qla2x00_unlock_nvram_access(struct qla_hw_data *ha)
60{ 56{
61 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 57 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
62 58
@@ -67,6 +63,84 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
67} 63}
68 64
69/** 65/**
66 * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
67 * @ha: HA context
68 * @data: Serial interface selector
69 */
70static void
71qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data)
72{
73 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
74
75 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
76 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
77 NVRAM_DELAY();
78 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
79 NVR_WRT_ENABLE);
80 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
81 NVRAM_DELAY();
82 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
83 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
84 NVRAM_DELAY();
85}
86
87/**
88 * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
89 * NVRAM.
90 * @ha: HA context
91 * @nv_cmd: NVRAM command
92 *
93 * Bit definitions for NVRAM command:
94 *
95 * Bit 26 = start bit
96 * Bit 25, 24 = opcode
97 * Bit 23-16 = address
98 * Bit 15-0 = write data
99 *
100 * Returns the word read from nvram @addr.
101 */
102static uint16_t
103qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd)
104{
105 uint8_t cnt;
106 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
107 uint16_t data = 0;
108 uint16_t reg_data;
109
110 /* Send command to NVRAM. */
111 nv_cmd <<= 5;
112 for (cnt = 0; cnt < 11; cnt++) {
113 if (nv_cmd & BIT_31)
114 qla2x00_nv_write(ha, NVR_DATA_OUT);
115 else
116 qla2x00_nv_write(ha, 0);
117 nv_cmd <<= 1;
118 }
119
120 /* Read data from NVRAM. */
121 for (cnt = 0; cnt < 16; cnt++) {
122 WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
123 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
124 NVRAM_DELAY();
125 data <<= 1;
126 reg_data = RD_REG_WORD(&reg->nvram);
127 if (reg_data & NVR_DATA_IN)
128 data |= BIT_0;
129 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
130 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
131 NVRAM_DELAY();
132 }
133
134 /* Deselect chip. */
135 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
136 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
137 NVRAM_DELAY();
138
139 return data;
140}
141
142
143/**
70 * qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the 144 * qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the
71 * request routine to get the word from NVRAM. 145 * request routine to get the word from NVRAM.
72 * @ha: HA context 146 * @ha: HA context
@@ -75,7 +149,7 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
75 * Returns the word read from nvram @addr. 149 * Returns the word read from nvram @addr.
76 */ 150 */
77static uint16_t 151static uint16_t
78qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr) 152qla2x00_get_nvram_word(struct qla_hw_data *ha, uint32_t addr)
79{ 153{
80 uint16_t data; 154 uint16_t data;
81 uint32_t nv_cmd; 155 uint32_t nv_cmd;
@@ -88,13 +162,27 @@ qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr)
88} 162}
89 163
90/** 164/**
165 * qla2x00_nv_deselect() - Deselect NVRAM operations.
166 * @ha: HA context
167 */
168static void
169qla2x00_nv_deselect(struct qla_hw_data *ha)
170{
171 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
172
173 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
174 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
175 NVRAM_DELAY();
176}
177
178/**
91 * qla2x00_write_nvram_word() - Write NVRAM data. 179 * qla2x00_write_nvram_word() - Write NVRAM data.
92 * @ha: HA context 180 * @ha: HA context
93 * @addr: Address in NVRAM to write 181 * @addr: Address in NVRAM to write
94 * @data: word to program 182 * @data: word to program
95 */ 183 */
96static void 184static void
97qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data) 185qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
98{ 186{
99 int count; 187 int count;
100 uint16_t word; 188 uint16_t word;
@@ -132,7 +220,7 @@ qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
132 do { 220 do {
133 if (!--wait_cnt) { 221 if (!--wait_cnt) {
134 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", 222 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n",
135 __func__, ha->host_no)); 223 __func__, vha->host_no));
136 break; 224 break;
137 } 225 }
138 NVRAM_DELAY(); 226 NVRAM_DELAY();
@@ -150,8 +238,8 @@ qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
150} 238}
151 239
152static int 240static int
153qla2x00_write_nvram_word_tmo(scsi_qla_host_t *ha, uint32_t addr, uint16_t data, 241qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
154 uint32_t tmo) 242 uint16_t data, uint32_t tmo)
155{ 243{
156 int ret, count; 244 int ret, count;
157 uint16_t word; 245 uint16_t word;
@@ -209,102 +297,11 @@ qla2x00_write_nvram_word_tmo(scsi_qla_host_t *ha, uint32_t addr, uint16_t data,
209} 297}
210 298
211/** 299/**
212 * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
213 * NVRAM.
214 * @ha: HA context
215 * @nv_cmd: NVRAM command
216 *
217 * Bit definitions for NVRAM command:
218 *
219 * Bit 26 = start bit
220 * Bit 25, 24 = opcode
221 * Bit 23-16 = address
222 * Bit 15-0 = write data
223 *
224 * Returns the word read from nvram @addr.
225 */
226static uint16_t
227qla2x00_nvram_request(scsi_qla_host_t *ha, uint32_t nv_cmd)
228{
229 uint8_t cnt;
230 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
231 uint16_t data = 0;
232 uint16_t reg_data;
233
234 /* Send command to NVRAM. */
235 nv_cmd <<= 5;
236 for (cnt = 0; cnt < 11; cnt++) {
237 if (nv_cmd & BIT_31)
238 qla2x00_nv_write(ha, NVR_DATA_OUT);
239 else
240 qla2x00_nv_write(ha, 0);
241 nv_cmd <<= 1;
242 }
243
244 /* Read data from NVRAM. */
245 for (cnt = 0; cnt < 16; cnt++) {
246 WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
247 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
248 NVRAM_DELAY();
249 data <<= 1;
250 reg_data = RD_REG_WORD(&reg->nvram);
251 if (reg_data & NVR_DATA_IN)
252 data |= BIT_0;
253 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
254 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
255 NVRAM_DELAY();
256 }
257
258 /* Deselect chip. */
259 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
260 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
261 NVRAM_DELAY();
262
263 return (data);
264}
265
266/**
267 * qla2x00_nv_write() - Clean NVRAM operations.
268 * @ha: HA context
269 */
270static void
271qla2x00_nv_deselect(scsi_qla_host_t *ha)
272{
273 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
274
275 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
276 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
277 NVRAM_DELAY();
278}
279
280/**
281 * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
282 * @ha: HA context
283 * @data: Serial interface selector
284 */
285static void
286qla2x00_nv_write(scsi_qla_host_t *ha, uint16_t data)
287{
288 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
289
290 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
291 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
292 NVRAM_DELAY();
293 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT| NVR_CLOCK |
294 NVR_WRT_ENABLE);
295 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
296 NVRAM_DELAY();
297 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
298 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
299 NVRAM_DELAY();
300}
301
302/**
303 * qla2x00_clear_nvram_protection() - 300 * qla2x00_clear_nvram_protection() -
304 * @ha: HA context 301 * @ha: HA context
305 */ 302 */
306static int 303static int
307qla2x00_clear_nvram_protection(scsi_qla_host_t *ha) 304qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
308{ 305{
309 int ret, stat; 306 int ret, stat;
310 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 307 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -352,9 +349,8 @@ qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
352 wait_cnt = NVR_WAIT_CNT; 349 wait_cnt = NVR_WAIT_CNT;
353 do { 350 do {
354 if (!--wait_cnt) { 351 if (!--wait_cnt) {
355 DEBUG9_10(printk("%s(%ld): NVRAM didn't go " 352 DEBUG9_10(qla_printk(
356 "ready...\n", __func__, 353 "NVRAM didn't go ready...\n"));
357 ha->host_no));
358 break; 354 break;
359 } 355 }
360 NVRAM_DELAY(); 356 NVRAM_DELAY();
@@ -370,7 +366,7 @@ qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
370} 366}
371 367
372static void 368static void
373qla2x00_set_nvram_protection(scsi_qla_host_t *ha, int stat) 369qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
374{ 370{
375 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 371 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
376 uint32_t word, wait_cnt; 372 uint32_t word, wait_cnt;
@@ -412,8 +408,7 @@ qla2x00_set_nvram_protection(scsi_qla_host_t *ha, int stat)
412 wait_cnt = NVR_WAIT_CNT; 408 wait_cnt = NVR_WAIT_CNT;
413 do { 409 do {
414 if (!--wait_cnt) { 410 if (!--wait_cnt) {
415 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", 411 DEBUG9_10(qla_printk("NVRAM didn't go ready...\n"));
416 __func__, ha->host_no));
417 break; 412 break;
418 } 413 }
419 NVRAM_DELAY(); 414 NVRAM_DELAY();
@@ -454,7 +449,7 @@ nvram_data_to_access_addr(uint32_t naddr)
454} 449}
455 450
456static uint32_t 451static uint32_t
457qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr) 452qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr)
458{ 453{
459 int rval; 454 int rval;
460 uint32_t cnt, data; 455 uint32_t cnt, data;
@@ -482,21 +477,20 @@ qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
482} 477}
483 478
484uint32_t * 479uint32_t *
485qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, 480qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
486 uint32_t dwords) 481 uint32_t dwords)
487{ 482{
488 uint32_t i; 483 uint32_t i;
489
490 /* Dword reads to flash. */ 484 /* Dword reads to flash. */
491 for (i = 0; i < dwords; i++, faddr++) 485 for (i = 0; i < dwords; i++, faddr++)
492 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 486 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
493 flash_data_to_access_addr(faddr))); 487 flash_data_to_access_addr(faddr)));
494 488
495 return dwptr; 489 return dwptr;
496} 490}
497 491
498static int 492static int
499qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data) 493qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
500{ 494{
501 int rval; 495 int rval;
502 uint32_t cnt; 496 uint32_t cnt;
@@ -519,7 +513,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
519} 513}
520 514
521static void 515static void
522qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, 516qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
523 uint8_t *flash_id) 517 uint8_t *flash_id)
524{ 518{
525 uint32_t ids; 519 uint32_t ids;
@@ -544,7 +538,7 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
544} 538}
545 539
546static int 540static int
547qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start) 541qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
548{ 542{
549 const char *loc, *locations[] = { "DEF", "PCI" }; 543 const char *loc, *locations[] = { "DEF", "PCI" };
550 uint32_t pcihdr, pcids; 544 uint32_t pcihdr, pcids;
@@ -552,6 +546,8 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
552 uint8_t *buf, *bcode, last_image; 546 uint8_t *buf, *bcode, last_image;
553 uint16_t cnt, chksum, *wptr; 547 uint16_t cnt, chksum, *wptr;
554 struct qla_flt_location *fltl; 548 struct qla_flt_location *fltl;
549 struct qla_hw_data *ha = vha->hw;
550 struct req_que *req = ha->req_q_map[0];
555 551
556 /* 552 /*
557 * FLT-location structure resides after the last PCI region. 553 * FLT-location structure resides after the last PCI region.
@@ -563,20 +559,20 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
563 FA_FLASH_LAYOUT_ADDR; 559 FA_FLASH_LAYOUT_ADDR;
564 560
565 /* Begin with first PCI expansion ROM header. */ 561 /* Begin with first PCI expansion ROM header. */
566 buf = (uint8_t *)ha->request_ring; 562 buf = (uint8_t *)req->ring;
567 dcode = (uint32_t *)ha->request_ring; 563 dcode = (uint32_t *)req->ring;
568 pcihdr = 0; 564 pcihdr = 0;
569 last_image = 1; 565 last_image = 1;
570 do { 566 do {
571 /* Verify PCI expansion ROM header. */ 567 /* Verify PCI expansion ROM header. */
572 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20); 568 qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
573 bcode = buf + (pcihdr % 4); 569 bcode = buf + (pcihdr % 4);
574 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) 570 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
575 goto end; 571 goto end;
576 572
577 /* Locate PCI data structure. */ 573 /* Locate PCI data structure. */
578 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); 574 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
579 qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20); 575 qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
580 bcode = buf + (pcihdr % 4); 576 bcode = buf + (pcihdr % 4);
581 577
582 /* Validate signature of PCI data structure. */ 578 /* Validate signature of PCI data structure. */
@@ -591,14 +587,14 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
591 } while (!last_image); 587 } while (!last_image);
592 588
593 /* Now verify FLT-location structure. */ 589 /* Now verify FLT-location structure. */
594 fltl = (struct qla_flt_location *)ha->request_ring; 590 fltl = (struct qla_flt_location *)req->ring;
595 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 591 qla24xx_read_flash_data(vha, dcode, pcihdr >> 2,
596 sizeof(struct qla_flt_location) >> 2); 592 sizeof(struct qla_flt_location) >> 2);
597 if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' || 593 if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
598 fltl->sig[2] != 'L' || fltl->sig[3] != 'T') 594 fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
599 goto end; 595 goto end;
600 596
601 wptr = (uint16_t *)ha->request_ring; 597 wptr = (uint16_t *)req->ring;
602 cnt = sizeof(struct qla_flt_location) >> 1; 598 cnt = sizeof(struct qla_flt_location) >> 1;
603 for (chksum = 0; cnt; cnt--) 599 for (chksum = 0; cnt; cnt--)
604 chksum += le16_to_cpu(*wptr++); 600 chksum += le16_to_cpu(*wptr++);
@@ -619,7 +615,7 @@ end:
619} 615}
620 616
621static void 617static void
622qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr) 618qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
623{ 619{
624 const char *loc, *locations[] = { "DEF", "FLT" }; 620 const char *loc, *locations[] = { "DEF", "FLT" };
625 uint16_t *wptr; 621 uint16_t *wptr;
@@ -627,12 +623,14 @@ qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
627 uint32_t start; 623 uint32_t start;
628 struct qla_flt_header *flt; 624 struct qla_flt_header *flt;
629 struct qla_flt_region *region; 625 struct qla_flt_region *region;
626 struct qla_hw_data *ha = vha->hw;
627 struct req_que *req = ha->req_q_map[0];
630 628
631 ha->flt_region_flt = flt_addr; 629 ha->flt_region_flt = flt_addr;
632 wptr = (uint16_t *)ha->request_ring; 630 wptr = (uint16_t *)req->ring;
633 flt = (struct qla_flt_header *)ha->request_ring; 631 flt = (struct qla_flt_header *)req->ring;
634 region = (struct qla_flt_region *)&flt[1]; 632 region = (struct qla_flt_region *)&flt[1];
635 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, 633 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
636 flt_addr << 2, OPTROM_BURST_SIZE); 634 flt_addr << 2, OPTROM_BURST_SIZE);
637 if (*wptr == __constant_cpu_to_le16(0xffff)) 635 if (*wptr == __constant_cpu_to_le16(0xffff))
638 goto no_flash_data; 636 goto no_flash_data;
@@ -720,7 +718,7 @@ done:
720} 718}
721 719
722static void 720static void
723qla2xxx_get_fdt_info(scsi_qla_host_t *ha) 721qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
724{ 722{
725#define FLASH_BLK_SIZE_4K 0x1000 723#define FLASH_BLK_SIZE_4K 0x1000
726#define FLASH_BLK_SIZE_32K 0x8000 724#define FLASH_BLK_SIZE_32K 0x8000
@@ -731,10 +729,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
731 struct qla_fdt_layout *fdt; 729 struct qla_fdt_layout *fdt;
732 uint8_t man_id, flash_id; 730 uint8_t man_id, flash_id;
733 uint16_t mid, fid; 731 uint16_t mid, fid;
732 struct qla_hw_data *ha = vha->hw;
733 struct req_que *req = ha->req_q_map[0];
734 734
735 wptr = (uint16_t *)ha->request_ring; 735 wptr = (uint16_t *)req->ring;
736 fdt = (struct qla_fdt_layout *)ha->request_ring; 736 fdt = (struct qla_fdt_layout *)req->ring;
737 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, 737 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
738 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE); 738 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
739 if (*wptr == __constant_cpu_to_le16(0xffff)) 739 if (*wptr == __constant_cpu_to_le16(0xffff))
740 goto no_flash_data; 740 goto no_flash_data;
@@ -807,38 +807,41 @@ done:
807} 807}
808 808
809int 809int
810qla2xxx_get_flash_info(scsi_qla_host_t *ha) 810qla2xxx_get_flash_info(scsi_qla_host_t *vha)
811{ 811{
812 int ret; 812 int ret;
813 uint32_t flt_addr; 813 uint32_t flt_addr;
814 struct qla_hw_data *ha = vha->hw;
814 815
815 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 816 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
816 return QLA_SUCCESS; 817 return QLA_SUCCESS;
817 818
818 ret = qla2xxx_find_flt_start(ha, &flt_addr); 819 ret = qla2xxx_find_flt_start(vha, &flt_addr);
819 if (ret != QLA_SUCCESS) 820 if (ret != QLA_SUCCESS)
820 return ret; 821 return ret;
821 822
822 qla2xxx_get_flt_info(ha, flt_addr); 823 qla2xxx_get_flt_info(vha, flt_addr);
823 qla2xxx_get_fdt_info(ha); 824 qla2xxx_get_fdt_info(vha);
824 825
825 return QLA_SUCCESS; 826 return QLA_SUCCESS;
826} 827}
827 828
828void 829void
829qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha) 830qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
830{ 831{
831#define NPIV_CONFIG_SIZE (16*1024) 832#define NPIV_CONFIG_SIZE (16*1024)
832 void *data; 833 void *data;
833 uint16_t *wptr; 834 uint16_t *wptr;
834 uint16_t cnt, chksum; 835 uint16_t cnt, chksum;
836 int i;
835 struct qla_npiv_header hdr; 837 struct qla_npiv_header hdr;
836 struct qla_npiv_entry *entry; 838 struct qla_npiv_entry *entry;
839 struct qla_hw_data *ha = vha->hw;
837 840
838 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 841 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
839 return; 842 return;
840 843
841 ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr, 844 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
842 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); 845 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
843 if (hdr.version == __constant_cpu_to_le16(0xffff)) 846 if (hdr.version == __constant_cpu_to_le16(0xffff))
844 return; 847 return;
@@ -857,7 +860,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
857 return; 860 return;
858 } 861 }
859 862
860 ha->isp_ops->read_optrom(ha, (uint8_t *)data, 863 ha->isp_ops->read_optrom(vha, (uint8_t *)data,
861 ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE); 864 ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
862 865
863 cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) * 866 cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
@@ -874,7 +877,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
874 877
875 entry = data + sizeof(struct qla_npiv_header); 878 entry = data + sizeof(struct qla_npiv_header);
876 cnt = le16_to_cpu(hdr.entries); 879 cnt = le16_to_cpu(hdr.entries);
877 for ( ; cnt; cnt--, entry++) { 880 for (i = 0; cnt; cnt--, entry++, i++) {
878 uint16_t flags; 881 uint16_t flags;
879 struct fc_vport_identifiers vid; 882 struct fc_vport_identifiers vid;
880 struct fc_vport *vport; 883 struct fc_vport *vport;
@@ -892,25 +895,29 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
892 vid.port_name = wwn_to_u64(entry->port_name); 895 vid.port_name = wwn_to_u64(entry->port_name);
893 vid.node_name = wwn_to_u64(entry->node_name); 896 vid.node_name = wwn_to_u64(entry->node_name);
894 897
898 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
899
895 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx " 900 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
896 "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt, 901 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
897 (unsigned long long)vid.port_name, 902 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
898 (unsigned long long)vid.node_name, 903 entry->q_qos, entry->f_qos));
899 le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos))); 904
900 905 if (i < QLA_PRECONFIG_VPORTS) {
901 vport = fc_vport_create(ha->host, 0, &vid); 906 vport = fc_vport_create(vha->host, 0, &vid);
902 if (!vport) 907 if (!vport)
903 qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to " 908 qla_printk(KERN_INFO, ha,
904 "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt, 909 "NPIV-Config: Failed to create vport [%02x]: "
905 (unsigned long long)vid.port_name, 910 "wwpn=%llx wwnn=%llx.\n", cnt,
906 (unsigned long long)vid.node_name); 911 vid.port_name, vid.node_name);
912 }
907 } 913 }
908done: 914done:
909 kfree(data); 915 kfree(data);
916 ha->npiv_info = NULL;
910} 917}
911 918
912static void 919static void
913qla24xx_unprotect_flash(scsi_qla_host_t *ha) 920qla24xx_unprotect_flash(struct qla_hw_data *ha)
914{ 921{
915 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 922 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
916 923
@@ -929,7 +936,7 @@ qla24xx_unprotect_flash(scsi_qla_host_t *ha)
929} 936}
930 937
931static void 938static void
932qla24xx_protect_flash(scsi_qla_host_t *ha) 939qla24xx_protect_flash(struct qla_hw_data *ha)
933{ 940{
934 uint32_t cnt; 941 uint32_t cnt;
935 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 942 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -955,7 +962,7 @@ skip_wrt_protect:
955} 962}
956 963
957static int 964static int
958qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, 965qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
959 uint32_t dwords) 966 uint32_t dwords)
960{ 967{
961 int ret; 968 int ret;
@@ -965,6 +972,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
965 dma_addr_t optrom_dma; 972 dma_addr_t optrom_dma;
966 void *optrom = NULL; 973 void *optrom = NULL;
967 uint32_t *s, *d; 974 uint32_t *s, *d;
975 struct qla_hw_data *ha = vha->hw;
968 976
969 ret = QLA_SUCCESS; 977 ret = QLA_SUCCESS;
970 978
@@ -1002,9 +1010,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1002 (fdata & 0xff00) |((fdata << 16) & 1010 (fdata & 0xff00) |((fdata << 16) &
1003 0xff0000) | ((fdata >> 16) & 0xff)); 1011 0xff0000) | ((fdata >> 16) & 0xff));
1004 if (ret != QLA_SUCCESS) { 1012 if (ret != QLA_SUCCESS) {
1005 DEBUG9(printk("%s(%ld) Unable to flash " 1013 DEBUG9(qla_printk("Unable to flash sector: "
1006 "sector: address=%x.\n", __func__, 1014 "address=%x.\n", faddr));
1007 ha->host_no, faddr));
1008 break; 1015 break;
1009 } 1016 }
1010 } 1017 }
@@ -1016,7 +1023,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1016 miter < OPTROM_BURST_DWORDS; miter++, s++, d++) 1023 miter < OPTROM_BURST_DWORDS; miter++, s++, d++)
1017 *s = cpu_to_le32(*d); 1024 *s = cpu_to_le32(*d);
1018 1025
1019 ret = qla2x00_load_ram(ha, optrom_dma, 1026 ret = qla2x00_load_ram(vha, optrom_dma,
1020 flash_data_to_access_addr(faddr), 1027 flash_data_to_access_addr(faddr),
1021 OPTROM_BURST_DWORDS); 1028 OPTROM_BURST_DWORDS);
1022 if (ret != QLA_SUCCESS) { 1029 if (ret != QLA_SUCCESS) {
@@ -1044,7 +1051,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1044 if (ret != QLA_SUCCESS) { 1051 if (ret != QLA_SUCCESS) {
1045 DEBUG9(printk("%s(%ld) Unable to program flash " 1052 DEBUG9(printk("%s(%ld) Unable to program flash "
1046 "address=%x data=%x.\n", __func__, 1053 "address=%x data=%x.\n", __func__,
1047 ha->host_no, faddr, *dwptr)); 1054 vha->host_no, faddr, *dwptr));
1048 break; 1055 break;
1049 } 1056 }
1050 1057
@@ -1067,11 +1074,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1067} 1074}
1068 1075
1069uint8_t * 1076uint8_t *
1070qla2x00_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1077qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1071 uint32_t bytes) 1078 uint32_t bytes)
1072{ 1079{
1073 uint32_t i; 1080 uint32_t i;
1074 uint16_t *wptr; 1081 uint16_t *wptr;
1082 struct qla_hw_data *ha = vha->hw;
1075 1083
1076 /* Word reads to NVRAM via registers. */ 1084 /* Word reads to NVRAM via registers. */
1077 wptr = (uint16_t *)buf; 1085 wptr = (uint16_t *)buf;
@@ -1085,7 +1093,7 @@ qla2x00_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1085} 1093}
1086 1094
1087uint8_t * 1095uint8_t *
1088qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1096qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1089 uint32_t bytes) 1097 uint32_t bytes)
1090{ 1098{
1091 uint32_t i; 1099 uint32_t i;
@@ -1094,20 +1102,21 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1094 /* Dword reads to flash. */ 1102 /* Dword reads to flash. */
1095 dwptr = (uint32_t *)buf; 1103 dwptr = (uint32_t *)buf;
1096 for (i = 0; i < bytes >> 2; i++, naddr++) 1104 for (i = 0; i < bytes >> 2; i++, naddr++)
1097 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 1105 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
1098 nvram_data_to_access_addr(naddr))); 1106 nvram_data_to_access_addr(naddr)));
1099 1107
1100 return buf; 1108 return buf;
1101} 1109}
1102 1110
1103int 1111int
1104qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1112qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1105 uint32_t bytes) 1113 uint32_t bytes)
1106{ 1114{
1107 int ret, stat; 1115 int ret, stat;
1108 uint32_t i; 1116 uint32_t i;
1109 uint16_t *wptr; 1117 uint16_t *wptr;
1110 unsigned long flags; 1118 unsigned long flags;
1119 struct qla_hw_data *ha = vha->hw;
1111 1120
1112 ret = QLA_SUCCESS; 1121 ret = QLA_SUCCESS;
1113 1122
@@ -1134,12 +1143,13 @@ qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1134} 1143}
1135 1144
1136int 1145int
1137qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1146qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1138 uint32_t bytes) 1147 uint32_t bytes)
1139{ 1148{
1140 int ret; 1149 int ret;
1141 uint32_t i; 1150 uint32_t i;
1142 uint32_t *dwptr; 1151 uint32_t *dwptr;
1152 struct qla_hw_data *ha = vha->hw;
1143 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1153 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1144 1154
1145 ret = QLA_SUCCESS; 1155 ret = QLA_SUCCESS;
@@ -1162,9 +1172,8 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1162 nvram_data_to_access_addr(naddr), 1172 nvram_data_to_access_addr(naddr),
1163 cpu_to_le32(*dwptr)); 1173 cpu_to_le32(*dwptr));
1164 if (ret != QLA_SUCCESS) { 1174 if (ret != QLA_SUCCESS) {
1165 DEBUG9(printk("%s(%ld) Unable to program " 1175 DEBUG9(qla_printk("Unable to program nvram address=%x "
1166 "nvram address=%x data=%x.\n", __func__, 1176 "data=%x.\n", naddr, *dwptr));
1167 ha->host_no, naddr, *dwptr));
1168 break; 1177 break;
1169 } 1178 }
1170 } 1179 }
@@ -1182,11 +1191,12 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1182} 1191}
1183 1192
1184uint8_t * 1193uint8_t *
1185qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1194qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1186 uint32_t bytes) 1195 uint32_t bytes)
1187{ 1196{
1188 uint32_t i; 1197 uint32_t i;
1189 uint32_t *dwptr; 1198 uint32_t *dwptr;
1199 struct qla_hw_data *ha = vha->hw;
1190 1200
1191 /* Dword reads to flash. */ 1201 /* Dword reads to flash. */
1192 dwptr = (uint32_t *)buf; 1202 dwptr = (uint32_t *)buf;
@@ -1199,19 +1209,20 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1199} 1209}
1200 1210
1201int 1211int
1202qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1212qla25xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1203 uint32_t bytes) 1213 uint32_t bytes)
1204{ 1214{
1215 struct qla_hw_data *ha = vha->hw;
1205#define RMW_BUFFER_SIZE (64 * 1024) 1216#define RMW_BUFFER_SIZE (64 * 1024)
1206 uint8_t *dbuf; 1217 uint8_t *dbuf;
1207 1218
1208 dbuf = vmalloc(RMW_BUFFER_SIZE); 1219 dbuf = vmalloc(RMW_BUFFER_SIZE);
1209 if (!dbuf) 1220 if (!dbuf)
1210 return QLA_MEMORY_ALLOC_FAILED; 1221 return QLA_MEMORY_ALLOC_FAILED;
1211 ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2, 1222 ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
1212 RMW_BUFFER_SIZE); 1223 RMW_BUFFER_SIZE);
1213 memcpy(dbuf + (naddr << 2), buf, bytes); 1224 memcpy(dbuf + (naddr << 2), buf, bytes);
1214 ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2, 1225 ha->isp_ops->write_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
1215 RMW_BUFFER_SIZE); 1226 RMW_BUFFER_SIZE);
1216 vfree(dbuf); 1227 vfree(dbuf);
1217 1228
@@ -1219,7 +1230,7 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1219} 1230}
1220 1231
1221static inline void 1232static inline void
1222qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 1233qla2x00_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
1223{ 1234{
1224 if (IS_QLA2322(ha)) { 1235 if (IS_QLA2322(ha)) {
1225 /* Flip all colors. */ 1236 /* Flip all colors. */
@@ -1249,12 +1260,13 @@ qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
1249#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r)) 1260#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r))
1250 1261
1251void 1262void
1252qla2x00_beacon_blink(struct scsi_qla_host *ha) 1263qla2x00_beacon_blink(struct scsi_qla_host *vha)
1253{ 1264{
1254 uint16_t gpio_enable; 1265 uint16_t gpio_enable;
1255 uint16_t gpio_data; 1266 uint16_t gpio_data;
1256 uint16_t led_color = 0; 1267 uint16_t led_color = 0;
1257 unsigned long flags; 1268 unsigned long flags;
1269 struct qla_hw_data *ha = vha->hw;
1258 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1270 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1259 1271
1260 spin_lock_irqsave(&ha->hardware_lock, flags); 1272 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1298,17 +1310,18 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
1298} 1310}
1299 1311
1300int 1312int
1301qla2x00_beacon_on(struct scsi_qla_host *ha) 1313qla2x00_beacon_on(struct scsi_qla_host *vha)
1302{ 1314{
1303 uint16_t gpio_enable; 1315 uint16_t gpio_enable;
1304 uint16_t gpio_data; 1316 uint16_t gpio_data;
1305 unsigned long flags; 1317 unsigned long flags;
1318 struct qla_hw_data *ha = vha->hw;
1306 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1319 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1307 1320
1308 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1321 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1309 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; 1322 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
1310 1323
1311 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 1324 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1312 qla_printk(KERN_WARNING, ha, 1325 qla_printk(KERN_WARNING, ha,
1313 "Unable to update fw options (beacon on).\n"); 1326 "Unable to update fw options (beacon on).\n");
1314 return QLA_FUNCTION_FAILED; 1327 return QLA_FUNCTION_FAILED;
@@ -1354,9 +1367,10 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
1354} 1367}
1355 1368
1356int 1369int
1357qla2x00_beacon_off(struct scsi_qla_host *ha) 1370qla2x00_beacon_off(struct scsi_qla_host *vha)
1358{ 1371{
1359 int rval = QLA_SUCCESS; 1372 int rval = QLA_SUCCESS;
1373 struct qla_hw_data *ha = vha->hw;
1360 1374
1361 ha->beacon_blink_led = 0; 1375 ha->beacon_blink_led = 0;
1362 1376
@@ -1366,12 +1380,12 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
1366 else 1380 else
1367 ha->beacon_color_state = QLA_LED_GRN_ON; 1381 ha->beacon_color_state = QLA_LED_GRN_ON;
1368 1382
1369 ha->isp_ops->beacon_blink(ha); /* This turns green LED off */ 1383 ha->isp_ops->beacon_blink(vha); /* This turns green LED off */
1370 1384
1371 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1385 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1372 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7; 1386 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
1373 1387
1374 rval = qla2x00_set_fw_options(ha, ha->fw_options); 1388 rval = qla2x00_set_fw_options(vha, ha->fw_options);
1375 if (rval != QLA_SUCCESS) 1389 if (rval != QLA_SUCCESS)
1376 qla_printk(KERN_WARNING, ha, 1390 qla_printk(KERN_WARNING, ha,
1377 "Unable to update fw options (beacon off).\n"); 1391 "Unable to update fw options (beacon off).\n");
@@ -1380,7 +1394,7 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
1380 1394
1381 1395
1382static inline void 1396static inline void
1383qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 1397qla24xx_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
1384{ 1398{
1385 /* Flip all colors. */ 1399 /* Flip all colors. */
1386 if (ha->beacon_color_state == QLA_LED_ALL_ON) { 1400 if (ha->beacon_color_state == QLA_LED_ALL_ON) {
@@ -1395,11 +1409,12 @@ qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
1395} 1409}
1396 1410
1397void 1411void
1398qla24xx_beacon_blink(struct scsi_qla_host *ha) 1412qla24xx_beacon_blink(struct scsi_qla_host *vha)
1399{ 1413{
1400 uint16_t led_color = 0; 1414 uint16_t led_color = 0;
1401 uint32_t gpio_data; 1415 uint32_t gpio_data;
1402 unsigned long flags; 1416 unsigned long flags;
1417 struct qla_hw_data *ha = vha->hw;
1403 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1418 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1404 1419
1405 /* Save the Original GPIOD. */ 1420 /* Save the Original GPIOD. */
@@ -1428,20 +1443,21 @@ qla24xx_beacon_blink(struct scsi_qla_host *ha)
1428} 1443}
1429 1444
1430int 1445int
1431qla24xx_beacon_on(struct scsi_qla_host *ha) 1446qla24xx_beacon_on(struct scsi_qla_host *vha)
1432{ 1447{
1433 uint32_t gpio_data; 1448 uint32_t gpio_data;
1434 unsigned long flags; 1449 unsigned long flags;
1450 struct qla_hw_data *ha = vha->hw;
1435 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1451 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1436 1452
1437 if (ha->beacon_blink_led == 0) { 1453 if (ha->beacon_blink_led == 0) {
1438 /* Enable firmware for update */ 1454 /* Enable firmware for update */
1439 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; 1455 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
1440 1456
1441 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) 1457 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS)
1442 return QLA_FUNCTION_FAILED; 1458 return QLA_FUNCTION_FAILED;
1443 1459
1444 if (qla2x00_get_fw_options(ha, ha->fw_options) != 1460 if (qla2x00_get_fw_options(vha, ha->fw_options) !=
1445 QLA_SUCCESS) { 1461 QLA_SUCCESS) {
1446 qla_printk(KERN_WARNING, ha, 1462 qla_printk(KERN_WARNING, ha,
1447 "Unable to update fw options (beacon on).\n"); 1463 "Unable to update fw options (beacon on).\n");
@@ -1469,16 +1485,17 @@ qla24xx_beacon_on(struct scsi_qla_host *ha)
1469} 1485}
1470 1486
1471int 1487int
1472qla24xx_beacon_off(struct scsi_qla_host *ha) 1488qla24xx_beacon_off(struct scsi_qla_host *vha)
1473{ 1489{
1474 uint32_t gpio_data; 1490 uint32_t gpio_data;
1475 unsigned long flags; 1491 unsigned long flags;
1492 struct qla_hw_data *ha = vha->hw;
1476 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1493 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1477 1494
1478 ha->beacon_blink_led = 0; 1495 ha->beacon_blink_led = 0;
1479 ha->beacon_color_state = QLA_LED_ALL_ON; 1496 ha->beacon_color_state = QLA_LED_ALL_ON;
1480 1497
1481 ha->isp_ops->beacon_blink(ha); /* Will flip to all off. */ 1498 ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */
1482 1499
1483 /* Give control back to firmware. */ 1500 /* Give control back to firmware. */
1484 spin_lock_irqsave(&ha->hardware_lock, flags); 1501 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1492,13 +1509,13 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
1492 1509
1493 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; 1510 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
1494 1511
1495 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 1512 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1496 qla_printk(KERN_WARNING, ha, 1513 qla_printk(KERN_WARNING, ha,
1497 "Unable to update fw options (beacon off).\n"); 1514 "Unable to update fw options (beacon off).\n");
1498 return QLA_FUNCTION_FAILED; 1515 return QLA_FUNCTION_FAILED;
1499 } 1516 }
1500 1517
1501 if (qla2x00_get_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 1518 if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1502 qla_printk(KERN_WARNING, ha, 1519 qla_printk(KERN_WARNING, ha,
1503 "Unable to get fw options (beacon off).\n"); 1520 "Unable to get fw options (beacon off).\n");
1504 return QLA_FUNCTION_FAILED; 1521 return QLA_FUNCTION_FAILED;
@@ -1517,7 +1534,7 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
1517 * @ha: HA context 1534 * @ha: HA context
1518 */ 1535 */
1519static void 1536static void
1520qla2x00_flash_enable(scsi_qla_host_t *ha) 1537qla2x00_flash_enable(struct qla_hw_data *ha)
1521{ 1538{
1522 uint16_t data; 1539 uint16_t data;
1523 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1540 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1533,7 +1550,7 @@ qla2x00_flash_enable(scsi_qla_host_t *ha)
1533 * @ha: HA context 1550 * @ha: HA context
1534 */ 1551 */
1535static void 1552static void
1536qla2x00_flash_disable(scsi_qla_host_t *ha) 1553qla2x00_flash_disable(struct qla_hw_data *ha)
1537{ 1554{
1538 uint16_t data; 1555 uint16_t data;
1539 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1556 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1554,7 +1571,7 @@ qla2x00_flash_disable(scsi_qla_host_t *ha)
1554 * Returns the byte read from flash @addr. 1571 * Returns the byte read from flash @addr.
1555 */ 1572 */
1556static uint8_t 1573static uint8_t
1557qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr) 1574qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
1558{ 1575{
1559 uint16_t data; 1576 uint16_t data;
1560 uint16_t bank_select; 1577 uint16_t bank_select;
@@ -1615,7 +1632,7 @@ qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
1615 * @data: Data to write 1632 * @data: Data to write
1616 */ 1633 */
1617static void 1634static void
1618qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data) 1635qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
1619{ 1636{
1620 uint16_t bank_select; 1637 uint16_t bank_select;
1621 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1638 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1678,7 +1695,7 @@ qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
1678 * Returns 0 on success, else non-zero. 1695 * Returns 0 on success, else non-zero.
1679 */ 1696 */
1680static int 1697static int
1681qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data, 1698qla2x00_poll_flash(struct qla_hw_data *ha, uint32_t addr, uint8_t poll_data,
1682 uint8_t man_id, uint8_t flash_id) 1699 uint8_t man_id, uint8_t flash_id)
1683{ 1700{
1684 int status; 1701 int status;
@@ -1718,8 +1735,8 @@ qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
1718 * Returns 0 on success, else non-zero. 1735 * Returns 0 on success, else non-zero.
1719 */ 1736 */
1720static int 1737static int
1721qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data, 1738qla2x00_program_flash_address(struct qla_hw_data *ha, uint32_t addr,
1722 uint8_t man_id, uint8_t flash_id) 1739 uint8_t data, uint8_t man_id, uint8_t flash_id)
1723{ 1740{
1724 /* Write Program Command Sequence. */ 1741 /* Write Program Command Sequence. */
1725 if (IS_OEM_001(ha)) { 1742 if (IS_OEM_001(ha)) {
@@ -1755,7 +1772,7 @@ qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data,
1755 * Returns 0 on success, else non-zero. 1772 * Returns 0 on success, else non-zero.
1756 */ 1773 */
1757static int 1774static int
1758qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id) 1775qla2x00_erase_flash(struct qla_hw_data *ha, uint8_t man_id, uint8_t flash_id)
1759{ 1776{
1760 /* Individual Sector Erase Command Sequence */ 1777 /* Individual Sector Erase Command Sequence */
1761 if (IS_OEM_001(ha)) { 1778 if (IS_OEM_001(ha)) {
@@ -1791,7 +1808,7 @@ qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id)
1791 * Returns 0 on success, else non-zero. 1808 * Returns 0 on success, else non-zero.
1792 */ 1809 */
1793static int 1810static int
1794qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr, 1811qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr,
1795 uint32_t sec_mask, uint8_t man_id, uint8_t flash_id) 1812 uint32_t sec_mask, uint8_t man_id, uint8_t flash_id)
1796{ 1813{
1797 /* Individual Sector Erase Command Sequence */ 1814 /* Individual Sector Erase Command Sequence */
@@ -1817,7 +1834,7 @@ qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr,
1817 * @flash_id: Flash ID 1834 * @flash_id: Flash ID
1818 */ 1835 */
1819static void 1836static void
1820qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, 1837qla2x00_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
1821 uint8_t *flash_id) 1838 uint8_t *flash_id)
1822{ 1839{
1823 qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1840 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
@@ -1831,8 +1848,8 @@ qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
1831} 1848}
1832 1849
1833static void 1850static void
1834qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr, 1851qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
1835 uint32_t length) 1852 uint32_t saddr, uint32_t length)
1836{ 1853{
1837 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1854 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1838 uint32_t midpoint, ilength; 1855 uint32_t midpoint, ilength;
@@ -1856,14 +1873,15 @@ qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
1856} 1873}
1857 1874
1858static inline void 1875static inline void
1859qla2x00_suspend_hba(struct scsi_qla_host *ha) 1876qla2x00_suspend_hba(struct scsi_qla_host *vha)
1860{ 1877{
1861 int cnt; 1878 int cnt;
1862 unsigned long flags; 1879 unsigned long flags;
1880 struct qla_hw_data *ha = vha->hw;
1863 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1881 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1864 1882
1865 /* Suspend HBA. */ 1883 /* Suspend HBA. */
1866 scsi_block_requests(ha->host); 1884 scsi_block_requests(vha->host);
1867 ha->isp_ops->disable_intrs(ha); 1885 ha->isp_ops->disable_intrs(ha);
1868 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1886 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1869 1887
@@ -1884,26 +1902,29 @@ qla2x00_suspend_hba(struct scsi_qla_host *ha)
1884} 1902}
1885 1903
1886static inline void 1904static inline void
1887qla2x00_resume_hba(struct scsi_qla_host *ha) 1905qla2x00_resume_hba(struct scsi_qla_host *vha)
1888{ 1906{
1907 struct qla_hw_data *ha = vha->hw;
1908
1889 /* Resume HBA. */ 1909 /* Resume HBA. */
1890 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1910 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1891 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1911 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1892 qla2xxx_wake_dpc(ha); 1912 qla2xxx_wake_dpc(vha);
1893 qla2x00_wait_for_hba_online(ha); 1913 qla2x00_wait_for_hba_online(vha);
1894 scsi_unblock_requests(ha->host); 1914 scsi_unblock_requests(vha->host);
1895} 1915}
1896 1916
1897uint8_t * 1917uint8_t *
1898qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1918qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
1899 uint32_t offset, uint32_t length) 1919 uint32_t offset, uint32_t length)
1900{ 1920{
1901 uint32_t addr, midpoint; 1921 uint32_t addr, midpoint;
1902 uint8_t *data; 1922 uint8_t *data;
1923 struct qla_hw_data *ha = vha->hw;
1903 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1924 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1904 1925
1905 /* Suspend HBA. */ 1926 /* Suspend HBA. */
1906 qla2x00_suspend_hba(ha); 1927 qla2x00_suspend_hba(vha);
1907 1928
1908 /* Go with read. */ 1929 /* Go with read. */
1909 midpoint = ha->optrom_size / 2; 1930 midpoint = ha->optrom_size / 2;
@@ -1922,13 +1943,13 @@ qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1922 qla2x00_flash_disable(ha); 1943 qla2x00_flash_disable(ha);
1923 1944
1924 /* Resume HBA. */ 1945 /* Resume HBA. */
1925 qla2x00_resume_hba(ha); 1946 qla2x00_resume_hba(vha);
1926 1947
1927 return buf; 1948 return buf;
1928} 1949}
1929 1950
1930int 1951int
1931qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1952qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
1932 uint32_t offset, uint32_t length) 1953 uint32_t offset, uint32_t length)
1933{ 1954{
1934 1955
@@ -1936,10 +1957,11 @@ qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1936 uint8_t man_id, flash_id, sec_number, data; 1957 uint8_t man_id, flash_id, sec_number, data;
1937 uint16_t wd; 1958 uint16_t wd;
1938 uint32_t addr, liter, sec_mask, rest_addr; 1959 uint32_t addr, liter, sec_mask, rest_addr;
1960 struct qla_hw_data *ha = vha->hw;
1939 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1961 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1940 1962
1941 /* Suspend HBA. */ 1963 /* Suspend HBA. */
1942 qla2x00_suspend_hba(ha); 1964 qla2x00_suspend_hba(vha);
1943 1965
1944 rval = QLA_SUCCESS; 1966 rval = QLA_SUCCESS;
1945 sec_number = 0; 1967 sec_number = 0;
@@ -2139,55 +2161,58 @@ update_flash:
2139 qla2x00_flash_disable(ha); 2161 qla2x00_flash_disable(ha);
2140 2162
2141 /* Resume HBA. */ 2163 /* Resume HBA. */
2142 qla2x00_resume_hba(ha); 2164 qla2x00_resume_hba(vha);
2143 2165
2144 return rval; 2166 return rval;
2145} 2167}
2146 2168
2147uint8_t * 2169uint8_t *
2148qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 2170qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2149 uint32_t offset, uint32_t length) 2171 uint32_t offset, uint32_t length)
2150{ 2172{
2173 struct qla_hw_data *ha = vha->hw;
2174
2151 /* Suspend HBA. */ 2175 /* Suspend HBA. */
2152 scsi_block_requests(ha->host); 2176 scsi_block_requests(vha->host);
2153 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2177 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2154 2178
2155 /* Go with read. */ 2179 /* Go with read. */
2156 qla24xx_read_flash_data(ha, (uint32_t *)buf, offset >> 2, length >> 2); 2180 qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2);
2157 2181
2158 /* Resume HBA. */ 2182 /* Resume HBA. */
2159 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2183 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2160 scsi_unblock_requests(ha->host); 2184 scsi_unblock_requests(vha->host);
2161 2185
2162 return buf; 2186 return buf;
2163} 2187}
2164 2188
2165int 2189int
2166qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 2190qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2167 uint32_t offset, uint32_t length) 2191 uint32_t offset, uint32_t length)
2168{ 2192{
2169 int rval; 2193 int rval;
2194 struct qla_hw_data *ha = vha->hw;
2170 2195
2171 /* Suspend HBA. */ 2196 /* Suspend HBA. */
2172 scsi_block_requests(ha->host); 2197 scsi_block_requests(vha->host);
2173 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2198 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2174 2199
2175 /* Go with write. */ 2200 /* Go with write. */
2176 rval = qla24xx_write_flash_data(ha, (uint32_t *)buf, offset >> 2, 2201 rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2,
2177 length >> 2); 2202 length >> 2);
2178 2203
2179 /* Resume HBA -- RISC reset needed. */ 2204 /* Resume HBA -- RISC reset needed. */
2180 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2205 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2181 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2206 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2182 qla2xxx_wake_dpc(ha); 2207 qla2xxx_wake_dpc(vha);
2183 qla2x00_wait_for_hba_online(ha); 2208 qla2x00_wait_for_hba_online(vha);
2184 scsi_unblock_requests(ha->host); 2209 scsi_unblock_requests(vha->host);
2185 2210
2186 return rval; 2211 return rval;
2187} 2212}
2188 2213
2189uint8_t * 2214uint8_t *
2190qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 2215qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2191 uint32_t offset, uint32_t length) 2216 uint32_t offset, uint32_t length)
2192{ 2217{
2193 int rval; 2218 int rval;
@@ -2195,6 +2220,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
2195 void *optrom; 2220 void *optrom;
2196 uint8_t *pbuf; 2221 uint8_t *pbuf;
2197 uint32_t faddr, left, burst; 2222 uint32_t faddr, left, burst;
2223 struct qla_hw_data *ha = vha->hw;
2198 2224
2199 if (offset & 0xfff) 2225 if (offset & 0xfff)
2200 goto slow_read; 2226 goto slow_read;
@@ -2219,7 +2245,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
2219 if (burst > left) 2245 if (burst > left)
2220 burst = left; 2246 burst = left;
2221 2247
2222 rval = qla2x00_dump_ram(ha, optrom_dma, 2248 rval = qla2x00_dump_ram(vha, optrom_dma,
2223 flash_data_to_access_addr(faddr), burst); 2249 flash_data_to_access_addr(faddr), burst);
2224 if (rval) { 2250 if (rval) {
2225 qla_printk(KERN_WARNING, ha, 2251 qla_printk(KERN_WARNING, ha,
@@ -2248,7 +2274,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
2248 return buf; 2274 return buf;
2249 2275
2250slow_read: 2276slow_read:
2251 return qla24xx_read_optrom_data(ha, buf, offset, length); 2277 return qla24xx_read_optrom_data(vha, buf, offset, length);
2252} 2278}
2253 2279
2254/** 2280/**
@@ -2270,7 +2296,7 @@ slow_read:
2270 * Returns QLA_SUCCESS on successful retrieval of version. 2296 * Returns QLA_SUCCESS on successful retrieval of version.
2271 */ 2297 */
2272static void 2298static void
2273qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids) 2299qla2x00_get_fcode_version(struct qla_hw_data *ha, uint32_t pcids)
2274{ 2300{
2275 int ret = QLA_FUNCTION_FAILED; 2301 int ret = QLA_FUNCTION_FAILED;
2276 uint32_t istart, iend, iter, vend; 2302 uint32_t istart, iend, iter, vend;
@@ -2344,13 +2370,14 @@ qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids)
2344} 2370}
2345 2371
2346int 2372int
2347qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf) 2373qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2348{ 2374{
2349 int ret = QLA_SUCCESS; 2375 int ret = QLA_SUCCESS;
2350 uint8_t code_type, last_image; 2376 uint8_t code_type, last_image;
2351 uint32_t pcihdr, pcids; 2377 uint32_t pcihdr, pcids;
2352 uint8_t *dbyte; 2378 uint8_t *dbyte;
2353 uint16_t *dcode; 2379 uint16_t *dcode;
2380 struct qla_hw_data *ha = vha->hw;
2354 2381
2355 if (!ha->pio_address || !mbuf) 2382 if (!ha->pio_address || !mbuf)
2356 return QLA_FUNCTION_FAILED; 2383 return QLA_FUNCTION_FAILED;
@@ -2370,8 +2397,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2370 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 || 2397 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
2371 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) { 2398 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
2372 /* No signature */ 2399 /* No signature */
2373 DEBUG2(printk("scsi(%ld): No matching ROM " 2400 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
2374 "signature.\n", ha->host_no)); 2401 "signature.\n"));
2375 ret = QLA_FUNCTION_FAILED; 2402 ret = QLA_FUNCTION_FAILED;
2376 break; 2403 break;
2377 } 2404 }
@@ -2387,8 +2414,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2387 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' || 2414 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
2388 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') { 2415 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
2389 /* Incorrect header. */ 2416 /* Incorrect header. */
2390 DEBUG2(printk("%s(): PCI data struct not found " 2417 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
2391 "pcir_adr=%x.\n", __func__, pcids)); 2418 "found pcir_adr=%x.\n", pcids));
2392 ret = QLA_FUNCTION_FAILED; 2419 ret = QLA_FUNCTION_FAILED;
2393 break; 2420 break;
2394 } 2421 }
@@ -2402,7 +2429,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2402 qla2x00_read_flash_byte(ha, pcids + 0x12); 2429 qla2x00_read_flash_byte(ha, pcids + 0x12);
2403 ha->bios_revision[1] = 2430 ha->bios_revision[1] =
2404 qla2x00_read_flash_byte(ha, pcids + 0x13); 2431 qla2x00_read_flash_byte(ha, pcids + 0x13);
2405 DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__, 2432 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
2406 ha->bios_revision[1], ha->bios_revision[0])); 2433 ha->bios_revision[1], ha->bios_revision[0]));
2407 break; 2434 break;
2408 case ROM_CODE_TYPE_FCODE: 2435 case ROM_CODE_TYPE_FCODE:
@@ -2416,12 +2443,12 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2416 qla2x00_read_flash_byte(ha, pcids + 0x12); 2443 qla2x00_read_flash_byte(ha, pcids + 0x12);
2417 ha->efi_revision[1] = 2444 ha->efi_revision[1] =
2418 qla2x00_read_flash_byte(ha, pcids + 0x13); 2445 qla2x00_read_flash_byte(ha, pcids + 0x13);
2419 DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__, 2446 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
2420 ha->efi_revision[1], ha->efi_revision[0])); 2447 ha->efi_revision[1], ha->efi_revision[0]));
2421 break; 2448 break;
2422 default: 2449 default:
2423 DEBUG2(printk("%s(): Unrecognized code type %x at " 2450 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
2424 "pcids %x.\n", __func__, code_type, pcids)); 2451 "type %x at pcids %x.\n", code_type, pcids));
2425 break; 2452 break;
2426 } 2453 }
2427 2454
@@ -2441,16 +2468,16 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2441 2468
2442 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10, 2469 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
2443 8); 2470 8);
2444 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n", 2471 DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from "
2445 __func__, ha->host_no)); 2472 "flash:\n"));
2446 DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8)); 2473 DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8));
2447 2474
2448 if ((dcode[0] == 0xffff && dcode[1] == 0xffff && 2475 if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
2449 dcode[2] == 0xffff && dcode[3] == 0xffff) || 2476 dcode[2] == 0xffff && dcode[3] == 0xffff) ||
2450 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2477 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2451 dcode[3] == 0)) { 2478 dcode[3] == 0)) {
2452 DEBUG2(printk("%s(): Unrecognized fw revision at " 2479 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
2453 "%x.\n", __func__, ha->flt_region_fw * 4)); 2480 "revision at %x.\n", ha->flt_region_fw * 4));
2454 } else { 2481 } else {
2455 /* values are in big endian */ 2482 /* values are in big endian */
2456 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; 2483 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@@ -2465,7 +2492,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2465} 2492}
2466 2493
2467int 2494int
2468qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf) 2495qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2469{ 2496{
2470 int ret = QLA_SUCCESS; 2497 int ret = QLA_SUCCESS;
2471 uint32_t pcihdr, pcids; 2498 uint32_t pcihdr, pcids;
@@ -2473,6 +2500,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2473 uint8_t *bcode; 2500 uint8_t *bcode;
2474 uint8_t code_type, last_image; 2501 uint8_t code_type, last_image;
2475 int i; 2502 int i;
2503 struct qla_hw_data *ha = vha->hw;
2476 2504
2477 if (!mbuf) 2505 if (!mbuf)
2478 return QLA_FUNCTION_FAILED; 2506 return QLA_FUNCTION_FAILED;
@@ -2489,12 +2517,12 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2489 last_image = 1; 2517 last_image = 1;
2490 do { 2518 do {
2491 /* Verify PCI expansion ROM header. */ 2519 /* Verify PCI expansion ROM header. */
2492 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20); 2520 qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
2493 bcode = mbuf + (pcihdr % 4); 2521 bcode = mbuf + (pcihdr % 4);
2494 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { 2522 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
2495 /* No signature */ 2523 /* No signature */
2496 DEBUG2(printk("scsi(%ld): No matching ROM " 2524 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
2497 "signature.\n", ha->host_no)); 2525 "signature.\n"));
2498 ret = QLA_FUNCTION_FAILED; 2526 ret = QLA_FUNCTION_FAILED;
2499 break; 2527 break;
2500 } 2528 }
@@ -2502,15 +2530,15 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2502 /* Locate PCI data structure. */ 2530 /* Locate PCI data structure. */
2503 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); 2531 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
2504 2532
2505 qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20); 2533 qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
2506 bcode = mbuf + (pcihdr % 4); 2534 bcode = mbuf + (pcihdr % 4);
2507 2535
2508 /* Validate signature of PCI data structure. */ 2536 /* Validate signature of PCI data structure. */
2509 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || 2537 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
2510 bcode[0x2] != 'I' || bcode[0x3] != 'R') { 2538 bcode[0x2] != 'I' || bcode[0x3] != 'R') {
2511 /* Incorrect header. */ 2539 /* Incorrect header. */
2512 DEBUG2(printk("%s(): PCI data struct not found " 2540 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
2513 "pcir_adr=%x.\n", __func__, pcids)); 2541 "found pcir_adr=%x.\n", pcids));
2514 ret = QLA_FUNCTION_FAILED; 2542 ret = QLA_FUNCTION_FAILED;
2515 break; 2543 break;
2516 } 2544 }
@@ -2522,26 +2550,26 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2522 /* Intel x86, PC-AT compatible. */ 2550 /* Intel x86, PC-AT compatible. */
2523 ha->bios_revision[0] = bcode[0x12]; 2551 ha->bios_revision[0] = bcode[0x12];
2524 ha->bios_revision[1] = bcode[0x13]; 2552 ha->bios_revision[1] = bcode[0x13];
2525 DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__, 2553 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
2526 ha->bios_revision[1], ha->bios_revision[0])); 2554 ha->bios_revision[1], ha->bios_revision[0]));
2527 break; 2555 break;
2528 case ROM_CODE_TYPE_FCODE: 2556 case ROM_CODE_TYPE_FCODE:
2529 /* Open Firmware standard for PCI (FCode). */ 2557 /* Open Firmware standard for PCI (FCode). */
2530 ha->fcode_revision[0] = bcode[0x12]; 2558 ha->fcode_revision[0] = bcode[0x12];
2531 ha->fcode_revision[1] = bcode[0x13]; 2559 ha->fcode_revision[1] = bcode[0x13];
2532 DEBUG3(printk("%s(): read FCODE %d.%d.\n", __func__, 2560 DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n",
2533 ha->fcode_revision[1], ha->fcode_revision[0])); 2561 ha->fcode_revision[1], ha->fcode_revision[0]));
2534 break; 2562 break;
2535 case ROM_CODE_TYPE_EFI: 2563 case ROM_CODE_TYPE_EFI:
2536 /* Extensible Firmware Interface (EFI). */ 2564 /* Extensible Firmware Interface (EFI). */
2537 ha->efi_revision[0] = bcode[0x12]; 2565 ha->efi_revision[0] = bcode[0x12];
2538 ha->efi_revision[1] = bcode[0x13]; 2566 ha->efi_revision[1] = bcode[0x13];
2539 DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__, 2567 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
2540 ha->efi_revision[1], ha->efi_revision[0])); 2568 ha->efi_revision[1], ha->efi_revision[0]));
2541 break; 2569 break;
2542 default: 2570 default:
2543 DEBUG2(printk("%s(): Unrecognized code type %x at " 2571 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
2544 "pcids %x.\n", __func__, code_type, pcids)); 2572 "type %x at pcids %x.\n", code_type, pcids));
2545 break; 2573 break;
2546 } 2574 }
2547 2575
@@ -2555,7 +2583,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2555 memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); 2583 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
2556 dcode = mbuf; 2584 dcode = mbuf;
2557 2585
2558 qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4); 2586 qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4, 4);
2559 for (i = 0; i < 4; i++) 2587 for (i = 0; i < 4; i++)
2560 dcode[i] = be32_to_cpu(dcode[i]); 2588 dcode[i] = be32_to_cpu(dcode[i]);
2561 2589
@@ -2563,8 +2591,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2563 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 2591 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
2564 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2592 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2565 dcode[3] == 0)) { 2593 dcode[3] == 0)) {
2566 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n", 2594 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
2567 __func__, ha->flt_region_fw)); 2595 "revision at %x.\n", ha->flt_region_fw * 4));
2568 } else { 2596 } else {
2569 ha->fw_revision[0] = dcode[0]; 2597 ha->fw_revision[0] = dcode[0];
2570 ha->fw_revision[1] = dcode[1]; 2598 ha->fw_revision[1] = dcode[1];
@@ -2593,8 +2621,9 @@ qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end)
2593} 2621}
2594 2622
2595int 2623int
2596qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size) 2624qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
2597{ 2625{
2626 struct qla_hw_data *ha = vha->hw;
2598 uint8_t *pos = ha->vpd; 2627 uint8_t *pos = ha->vpd;
2599 uint8_t *end = pos + ha->vpd_size; 2628 uint8_t *end = pos + ha->vpd_size;
2600 int len = 0; 2629 int len = 0;
@@ -2621,9 +2650,10 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size)
2621} 2650}
2622 2651
2623static int 2652static int
2624qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata) 2653qla2xxx_hw_event_store(scsi_qla_host_t *vha, uint32_t *fdata)
2625{ 2654{
2626 uint32_t d[2], faddr; 2655 uint32_t d[2], faddr;
2656 struct qla_hw_data *ha = vha->hw;
2627 2657
2628 /* Locate first empty entry. */ 2658 /* Locate first empty entry. */
2629 for (;;) { 2659 for (;;) {
@@ -2634,7 +2664,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2634 return QLA_MEMORY_ALLOC_FAILED; 2664 return QLA_MEMORY_ALLOC_FAILED;
2635 } 2665 }
2636 2666
2637 qla24xx_read_flash_data(ha, d, ha->hw_event_ptr, 2); 2667 qla24xx_read_flash_data(vha, d, ha->hw_event_ptr, 2);
2638 faddr = flash_data_to_access_addr(ha->hw_event_ptr); 2668 faddr = flash_data_to_access_addr(ha->hw_event_ptr);
2639 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE; 2669 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
2640 if (d[0] == __constant_cpu_to_le32(0xffffffff) && 2670 if (d[0] == __constant_cpu_to_le32(0xffffffff) &&
@@ -2655,12 +2685,12 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2655} 2685}
2656 2686
2657int 2687int
2658qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1, 2688qla2xxx_hw_event_log(scsi_qla_host_t *vha, uint16_t code, uint16_t d1,
2659 uint16_t d2, uint16_t d3) 2689 uint16_t d2, uint16_t d3)
2660{ 2690{
2661#define QMARK(a, b, c, d) \ 2691#define QMARK(a, b, c, d) \
2662 cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d)) 2692 cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d))
2663 2693 struct qla_hw_data *ha = vha->hw;
2664 int rval; 2694 int rval;
2665 uint32_t marker[2], fdata[4]; 2695 uint32_t marker[2], fdata[4];
2666 2696
@@ -2681,7 +2711,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2681 /* Locate marker. */ 2711 /* Locate marker. */
2682 ha->hw_event_ptr = ha->flt_region_hw_event; 2712 ha->hw_event_ptr = ha->flt_region_hw_event;
2683 for (;;) { 2713 for (;;) {
2684 qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr, 2714 qla24xx_read_flash_data(vha, fdata, ha->hw_event_ptr,
2685 4); 2715 4);
2686 if (fdata[0] == __constant_cpu_to_le32(0xffffffff) && 2716 if (fdata[0] == __constant_cpu_to_le32(0xffffffff) &&
2687 fdata[1] == __constant_cpu_to_le32(0xffffffff)) 2717 fdata[1] == __constant_cpu_to_le32(0xffffffff))
@@ -2700,7 +2730,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2700 } 2730 }
2701 /* No marker, write it. */ 2731 /* No marker, write it. */
2702 if (!ha->flags.hw_event_marker_found) { 2732 if (!ha->flags.hw_event_marker_found) {
2703 rval = qla2xxx_hw_event_store(ha, marker); 2733 rval = qla2xxx_hw_event_store(vha, marker);
2704 if (rval != QLA_SUCCESS) { 2734 if (rval != QLA_SUCCESS) {
2705 DEBUG2(qla_printk(KERN_WARNING, ha, 2735 DEBUG2(qla_printk(KERN_WARNING, ha,
2706 "HW event -- Failed marker write=%x.!\n", 2736 "HW event -- Failed marker write=%x.!\n",
@@ -2714,7 +2744,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2714 /* Store error. */ 2744 /* Store error. */
2715 fdata[0] = cpu_to_le32(code << 16 | d1); 2745 fdata[0] = cpu_to_le32(code << 16 | d1);
2716 fdata[1] = cpu_to_le32(d2 << 16 | d3); 2746 fdata[1] = cpu_to_le32(d2 << 16 | d3);
2717 rval = qla2xxx_hw_event_store(ha, fdata); 2747 rval = qla2xxx_hw_event_store(vha, fdata);
2718 if (rval != QLA_SUCCESS) { 2748 if (rval != QLA_SUCCESS) {
2719 DEBUG2(qla_printk(KERN_WARNING, ha, 2749 DEBUG2(qla_printk(KERN_WARNING, ha,
2720 "HW event -- Failed error write=%x.!\n", 2750 "HW event -- Failed error write=%x.!\n",
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index eea6720adf16..be22f3a09f8d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k9" 10#define QLA2XXX_VERSION "8.02.03-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
14#define QLA_DRIVER_PATCH_VER 1 14#define QLA_DRIVER_PATCH_VER 3
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index de7b3bc2cbc9..1ad51552d6b1 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -23,7 +23,7 @@
23 Functions as standalone, loadable, and PCMCIA driver, the latter from 23 Functions as standalone, loadable, and PCMCIA driver, the latter from
24 Dave Hinds' PCMCIA package. 24 Dave Hinds' PCMCIA package.
25 25
26 Cleaned up 26/10/2002 by Alan Cox <alan@redhat.com> as part of the 2.5 26 Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5
27 SCSI driver cleanup and audit. This driver still needs work on the 27 SCSI driver cleanup and audit. This driver still needs work on the
28 following 28 following
29 - Non terminating hardware waits 29 - Non terminating hardware waits
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index edfaf241c5ba..381838ebd460 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -136,7 +136,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
136 else 136 else
137 eh_timed_out = NULL; 137 eh_timed_out = NULL;
138 138
139 if (eh_timed_out) 139 if (eh_timed_out) {
140 rtn = eh_timed_out(scmd); 140 rtn = eh_timed_out(scmd);
141 switch (rtn) { 141 switch (rtn) {
142 case BLK_EH_NOT_HANDLED: 142 case BLK_EH_NOT_HANDLED:
@@ -144,6 +144,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
144 default: 144 default:
145 return rtn; 145 return rtn;
146 } 146 }
147 }
147 148
148 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 149 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
149 scmd->result |= DID_TIME_OUT << 16; 150 scmd->result |= DID_TIME_OUT << 16;
@@ -1405,8 +1406,9 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1405 return ADD_TO_MLQUEUE; 1406 return ADD_TO_MLQUEUE;
1406 case GOOD: 1407 case GOOD:
1407 case COMMAND_TERMINATED: 1408 case COMMAND_TERMINATED:
1408 case TASK_ABORTED:
1409 return SUCCESS; 1409 return SUCCESS;
1410 case TASK_ABORTED:
1411 goto maybe_retry;
1410 case CHECK_CONDITION: 1412 case CHECK_CONDITION:
1411 rtn = scsi_check_sense(scmd); 1413 rtn = scsi_check_sense(scmd);
1412 if (rtn == NEEDS_RETRY) 1414 if (rtn == NEEDS_RETRY)
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index dc1cfb2fd76b..2ae4f8fc5831 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -94,7 +94,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
94 SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd)); 94 SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd));
95 95
96 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, 96 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0,
97 &sshdr, timeout, retries); 97 &sshdr, timeout, retries, NULL);
98 98
99 SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result)); 99 SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result));
100 100
@@ -270,11 +270,11 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
270EXPORT_SYMBOL(scsi_ioctl); 270EXPORT_SYMBOL(scsi_ioctl);
271 271
272/** 272/**
273 * scsi_nonblock_ioctl() - Handle SG_SCSI_RESET 273 * scsi_nonblockable_ioctl() - Handle SG_SCSI_RESET
274 * @sdev: scsi device receiving ioctl 274 * @sdev: scsi device receiving ioctl
275 * @cmd: Must be SC_SCSI_RESET 275 * @cmd: Must be SC_SCSI_RESET
276 * @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST} 276 * @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST}
277 * @filp: either NULL or a &struct file which must have the O_NONBLOCK flag. 277 * @ndelay: file mode O_NDELAY flag
278 */ 278 */
279int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, 279int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
280 void __user *arg, int ndelay) 280 void __user *arg, int ndelay)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 148d3af92aef..f2f51e0333eb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -183,13 +183,15 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
183 * @timeout: request timeout in seconds 183 * @timeout: request timeout in seconds
184 * @retries: number of times to retry request 184 * @retries: number of times to retry request
185 * @flags: or into request flags; 185 * @flags: or into request flags;
186 * @resid: optional residual length
186 * 187 *
187 * returns the req->errors value which is the scsi_cmnd result 188 * returns the req->errors value which is the scsi_cmnd result
188 * field. 189 * field.
189 */ 190 */
190int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 191int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
191 int data_direction, void *buffer, unsigned bufflen, 192 int data_direction, void *buffer, unsigned bufflen,
192 unsigned char *sense, int timeout, int retries, int flags) 193 unsigned char *sense, int timeout, int retries, int flags,
194 int *resid)
193{ 195{
194 struct request *req; 196 struct request *req;
195 int write = (data_direction == DMA_TO_DEVICE); 197 int write = (data_direction == DMA_TO_DEVICE);
@@ -224,6 +226,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
224 if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 226 if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
225 memset(buffer + (bufflen - req->data_len), 0, req->data_len); 227 memset(buffer + (bufflen - req->data_len), 0, req->data_len);
226 228
229 if (resid)
230 *resid = req->data_len;
227 ret = req->errors; 231 ret = req->errors;
228 out: 232 out:
229 blk_put_request(req); 233 blk_put_request(req);
@@ -235,7 +239,8 @@ EXPORT_SYMBOL(scsi_execute);
235 239
236int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 240int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
237 int data_direction, void *buffer, unsigned bufflen, 241 int data_direction, void *buffer, unsigned bufflen,
238 struct scsi_sense_hdr *sshdr, int timeout, int retries) 242 struct scsi_sense_hdr *sshdr, int timeout, int retries,
243 int *resid)
239{ 244{
240 char *sense = NULL; 245 char *sense = NULL;
241 int result; 246 int result;
@@ -246,7 +251,7 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
246 return DRIVER_ERROR << 24; 251 return DRIVER_ERROR << 24;
247 } 252 }
248 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 253 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
249 sense, timeout, retries, 0); 254 sense, timeout, retries, 0, resid);
250 if (sshdr) 255 if (sshdr)
251 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 256 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
252 257
@@ -875,16 +880,24 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
875 * (the normal case for most drivers), we don't need 880 * (the normal case for most drivers), we don't need
876 * the logic to deal with cleaning up afterwards. 881 * the logic to deal with cleaning up afterwards.
877 * 882 *
878 * We must do one of several things here: 883 * We must call scsi_end_request(). This will finish off
884 * the specified number of sectors. If we are done, the
885 * command block will be released and the queue function
886 * will be goosed. If we are not done then we have to
887 * figure out what to do next:
879 * 888 *
880 * a) Call scsi_end_request. This will finish off the 889 * a) We can call scsi_requeue_command(). The request
881 * specified number of sectors. If we are done, the 890 * will be unprepared and put back on the queue. Then
882 * command block will be released, and the queue 891 * a new command will be created for it. This should
883 * function will be goosed. If we are not done, then 892 * be used if we made forward progress, or if we want
884 * scsi_end_request will directly goose the queue. 893 * to switch from READ(10) to READ(6) for example.
885 * 894 *
886 * b) We can just use scsi_requeue_command() here. This would 895 * b) We can call scsi_queue_insert(). The request will
887 * be used if we just wanted to retry, for example. 896 * be put back on the queue and retried using the same
897 * command as before, possibly after a delay.
898 *
899 * c) We can call blk_end_request() with -EIO to fail
900 * the remainder of the request.
888 */ 901 */
889void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 902void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
890{ 903{
@@ -896,6 +909,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
896 struct scsi_sense_hdr sshdr; 909 struct scsi_sense_hdr sshdr;
897 int sense_valid = 0; 910 int sense_valid = 0;
898 int sense_deferred = 0; 911 int sense_deferred = 0;
912 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
913 ACTION_DELAYED_RETRY} action;
914 char *description = NULL;
899 915
900 if (result) { 916 if (result) {
901 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 917 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -947,10 +963,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
947 return; 963 return;
948 this_count = blk_rq_bytes(req); 964 this_count = blk_rq_bytes(req);
949 965
950 /* good_bytes = 0, or (inclusive) there were leftovers and 966 if (host_byte(result) == DID_RESET) {
951 * result = 0, so scsi_end_request couldn't retry. 967 /* Third party bus reset or reset for error recovery
952 */ 968 * reasons. Just retry the command and see what
953 if (sense_valid && !sense_deferred) { 969 * happens.
970 */
971 action = ACTION_RETRY;
972 } else if (sense_valid && !sense_deferred) {
954 switch (sshdr.sense_key) { 973 switch (sshdr.sense_key) {
955 case UNIT_ATTENTION: 974 case UNIT_ATTENTION:
956 if (cmd->device->removable) { 975 if (cmd->device->removable) {
@@ -958,16 +977,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
958 * and quietly refuse further access. 977 * and quietly refuse further access.
959 */ 978 */
960 cmd->device->changed = 1; 979 cmd->device->changed = 1;
961 scsi_end_request(cmd, -EIO, this_count, 1); 980 description = "Media Changed";
962 return; 981 action = ACTION_FAIL;
963 } else { 982 } else {
964 /* Must have been a power glitch, or a 983 /* Must have been a power glitch, or a
965 * bus reset. Could not have been a 984 * bus reset. Could not have been a
966 * media change, so we just retry the 985 * media change, so we just retry the
967 * request and see what happens. 986 * command and see what happens.
968 */ 987 */
969 scsi_requeue_command(q, cmd); 988 action = ACTION_RETRY;
970 return;
971 } 989 }
972 break; 990 break;
973 case ILLEGAL_REQUEST: 991 case ILLEGAL_REQUEST:
@@ -983,21 +1001,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
983 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 1001 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
984 (cmd->cmnd[0] == READ_10 || 1002 (cmd->cmnd[0] == READ_10 ||
985 cmd->cmnd[0] == WRITE_10)) { 1003 cmd->cmnd[0] == WRITE_10)) {
1004 /* This will issue a new 6-byte command. */
986 cmd->device->use_10_for_rw = 0; 1005 cmd->device->use_10_for_rw = 0;
987 /* This will cause a retry with a 1006 action = ACTION_REPREP;
988 * 6-byte command. 1007 } else
989 */ 1008 action = ACTION_FAIL;
990 scsi_requeue_command(q, cmd); 1009 break;
991 } else if (sshdr.asc == 0x10) /* DIX */
992 scsi_end_request(cmd, -EIO, this_count, 0);
993 else
994 scsi_end_request(cmd, -EIO, this_count, 1);
995 return;
996 case ABORTED_COMMAND: 1010 case ABORTED_COMMAND:
997 if (sshdr.asc == 0x10) { /* DIF */ 1011 if (sshdr.asc == 0x10) { /* DIF */
998 scsi_end_request(cmd, -EIO, this_count, 0); 1012 action = ACTION_FAIL;
999 return; 1013 description = "Data Integrity Failure";
1000 } 1014 } else
1015 action = ACTION_RETRY;
1001 break; 1016 break;
1002 case NOT_READY: 1017 case NOT_READY:
1003 /* If the device is in the process of becoming 1018 /* If the device is in the process of becoming
@@ -1012,49 +1027,57 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1012 case 0x07: /* operation in progress */ 1027 case 0x07: /* operation in progress */
1013 case 0x08: /* Long write in progress */ 1028 case 0x08: /* Long write in progress */
1014 case 0x09: /* self test in progress */ 1029 case 0x09: /* self test in progress */
1015 scsi_requeue_command(q, cmd); 1030 action = ACTION_DELAYED_RETRY;
1016 return;
1017 default:
1018 break; 1031 break;
1019 } 1032 }
1033 } else {
1034 description = "Device not ready";
1035 action = ACTION_FAIL;
1020 } 1036 }
1021 if (!(req->cmd_flags & REQ_QUIET)) 1037 break;
1022 scsi_cmd_print_sense_hdr(cmd,
1023 "Device not ready",
1024 &sshdr);
1025
1026 scsi_end_request(cmd, -EIO, this_count, 1);
1027 return;
1028 case VOLUME_OVERFLOW: 1038 case VOLUME_OVERFLOW:
1029 if (!(req->cmd_flags & REQ_QUIET)) {
1030 scmd_printk(KERN_INFO, cmd,
1031 "Volume overflow, CDB: ");
1032 __scsi_print_command(cmd->cmnd);
1033 scsi_print_sense("", cmd);
1034 }
1035 /* See SSC3rXX or current. */ 1039 /* See SSC3rXX or current. */
1036 scsi_end_request(cmd, -EIO, this_count, 1); 1040 action = ACTION_FAIL;
1037 return; 1041 break;
1038 default: 1042 default:
1043 description = "Unhandled sense code";
1044 action = ACTION_FAIL;
1039 break; 1045 break;
1040 } 1046 }
1047 } else {
1048 description = "Unhandled error code";
1049 action = ACTION_FAIL;
1041 } 1050 }
1042 if (host_byte(result) == DID_RESET) { 1051
1043 /* Third party bus reset or reset for error recovery 1052 switch (action) {
1044 * reasons. Just retry the request and see what 1053 case ACTION_FAIL:
1045 * happens. 1054 /* Give up and fail the remainder of the request */
1046 */
1047 scsi_requeue_command(q, cmd);
1048 return;
1049 }
1050 if (result) {
1051 if (!(req->cmd_flags & REQ_QUIET)) { 1055 if (!(req->cmd_flags & REQ_QUIET)) {
1056 if (description)
1057 scmd_printk(KERN_INFO, cmd, "%s",
1058 description);
1052 scsi_print_result(cmd); 1059 scsi_print_result(cmd);
1053 if (driver_byte(result) & DRIVER_SENSE) 1060 if (driver_byte(result) & DRIVER_SENSE)
1054 scsi_print_sense("", cmd); 1061 scsi_print_sense("", cmd);
1055 } 1062 }
1063 blk_end_request(req, -EIO, blk_rq_bytes(req));
1064 scsi_next_command(cmd);
1065 break;
1066 case ACTION_REPREP:
1067 /* Unprep the request and put it back at the head of the queue.
1068 * A new command will be prepared and issued.
1069 */
1070 scsi_requeue_command(q, cmd);
1071 break;
1072 case ACTION_RETRY:
1073 /* Retry the same command immediately */
1074 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1075 break;
1076 case ACTION_DELAYED_RETRY:
1077 /* Retry the same command after a delay */
1078 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1079 break;
1056 } 1080 }
1057 scsi_end_request(cmd, -EIO, this_count, !result);
1058} 1081}
1059 1082
1060static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 1083static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
@@ -1998,7 +2021,7 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1998 } 2021 }
1999 2022
2000 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 2023 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2001 sshdr, timeout, retries); 2024 sshdr, timeout, retries, NULL);
2002 kfree(real_buffer); 2025 kfree(real_buffer);
2003 return ret; 2026 return ret;
2004} 2027}
@@ -2063,7 +2086,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2063 memset(buffer, 0, len); 2086 memset(buffer, 0, len);
2064 2087
2065 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 2088 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2066 sshdr, timeout, retries); 2089 sshdr, timeout, retries, NULL);
2067 2090
2068 /* This code looks awful: what it's doing is making sure an 2091 /* This code looks awful: what it's doing is making sure an
2069 * ILLEGAL REQUEST sense return identifies the actual command 2092 * ILLEGAL REQUEST sense return identifies the actual command
@@ -2145,7 +2168,7 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2145 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2168 /* try to eat the UNIT_ATTENTION if there are enough retries */
2146 do { 2169 do {
2147 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2170 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2148 timeout, retries); 2171 timeout, retries, NULL);
2149 if (sdev->removable && scsi_sense_valid(sshdr) && 2172 if (sdev->removable && scsi_sense_valid(sshdr) &&
2150 sshdr->sense_key == UNIT_ATTENTION) 2173 sshdr->sense_key == UNIT_ATTENTION)
2151 sdev->changed = 1; 2174 sdev->changed = 1;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index b14dc02c3ded..18486b51668d 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -216,7 +216,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
216 scsi_cmd[4] = 0x2a; /* size */ 216 scsi_cmd[4] = 0x2a; /* size */
217 scsi_cmd[5] = 0; 217 scsi_cmd[5] = 0;
218 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL, 218 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
219 SCSI_TIMEOUT, 3); 219 SCSI_TIMEOUT, 3, NULL);
220} 220}
221 221
222/** 222/**
@@ -573,6 +573,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
573 573
574 /* Each pass gets up to three chances to ignore Unit Attention */ 574 /* Each pass gets up to three chances to ignore Unit Attention */
575 for (count = 0; count < 3; ++count) { 575 for (count = 0; count < 3; ++count) {
576 int resid;
577
576 memset(scsi_cmd, 0, 6); 578 memset(scsi_cmd, 0, 6);
577 scsi_cmd[0] = INQUIRY; 579 scsi_cmd[0] = INQUIRY;
578 scsi_cmd[4] = (unsigned char) try_inquiry_len; 580 scsi_cmd[4] = (unsigned char) try_inquiry_len;
@@ -581,7 +583,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
581 583
582 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, 584 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
583 inq_result, try_inquiry_len, &sshdr, 585 inq_result, try_inquiry_len, &sshdr,
584 HZ / 2 + HZ * scsi_inq_timeout, 3); 586 HZ / 2 + HZ * scsi_inq_timeout, 3,
587 &resid);
585 588
586 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s " 589 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s "
587 "with code 0x%x\n", 590 "with code 0x%x\n",
@@ -602,6 +605,14 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
602 (sshdr.ascq == 0)) 605 (sshdr.ascq == 0))
603 continue; 606 continue;
604 } 607 }
608 } else {
609 /*
610 * if nothing was transferred, we try
611 * again. It's a workaround for some USB
612 * devices.
613 */
614 if (resid == try_inquiry_len)
615 continue;
605 } 616 }
606 break; 617 break;
607 } 618 }
@@ -1390,7 +1401,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1390 1401
1391 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, 1402 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1392 lun_data, length, &sshdr, 1403 lun_data, length, &sshdr,
1393 SCSI_TIMEOUT + 4 * HZ, 3); 1404 SCSI_TIMEOUT + 4 * HZ, 3, NULL);
1394 1405
1395 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS" 1406 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS"
1396 " %s (try %d) result 0x%x\n", result 1407 " %s (try %d) result 0x%x\n", result
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1e71abf0607a..062304de4854 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3012,6 +3012,16 @@ fc_timeout_deleted_rport(struct work_struct *work)
3012 rport->port_state = FC_PORTSTATE_NOTPRESENT; 3012 rport->port_state = FC_PORTSTATE_NOTPRESENT;
3013 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; 3013 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3014 3014
3015 /*
3016 * Pre-emptively kill I/O rather than waiting for the work queue
3017 * item to teardown the starget. (FCOE libFC folks prefer this
3018 * and to have the rport_port_id still set when it's done).
3019 */
3020 spin_unlock_irqrestore(shost->host_lock, flags);
3021 fc_terminate_rport_io(rport);
3022
3023 BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT);
3024
3015 /* remove the identifiers that aren't used in the consisting binding */ 3025 /* remove the identifiers that aren't used in the consisting binding */
3016 switch (fc_host->tgtid_bind_type) { 3026 switch (fc_host->tgtid_bind_type) {
3017 case FC_TGTID_BIND_BY_WWPN: 3027 case FC_TGTID_BIND_BY_WWPN:
@@ -3035,9 +3045,6 @@ fc_timeout_deleted_rport(struct work_struct *work)
3035 * went away and didn't come back - we'll remove 3045 * went away and didn't come back - we'll remove
3036 * all attached scsi devices. 3046 * all attached scsi devices.
3037 */ 3047 */
3038 spin_unlock_irqrestore(shost->host_lock, flags);
3039
3040 scsi_target_unblock(&rport->dev);
3041 fc_queue_work(shost, &rport->stgt_delete_work); 3048 fc_queue_work(shost, &rport->stgt_delete_work);
3042} 3049}
3043 3050
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 7c2d28924d2a..f49f55c6bfc8 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -111,8 +111,9 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
111 sense, DV_TIMEOUT, /* retries */ 1, 111 sense, DV_TIMEOUT, /* retries */ 1,
112 REQ_FAILFAST_DEV | 112 REQ_FAILFAST_DEV |
113 REQ_FAILFAST_TRANSPORT | 113 REQ_FAILFAST_TRANSPORT |
114 REQ_FAILFAST_DRIVER); 114 REQ_FAILFAST_DRIVER,
115 if (result & DRIVER_SENSE) { 115 NULL);
116 if (driver_byte(result) & DRIVER_SENSE) {
116 struct scsi_sense_hdr sshdr_tmp; 117 struct scsi_sense_hdr sshdr_tmp;
117 if (!sshdr) 118 if (!sshdr)
118 sshdr = &sshdr_tmp; 119 sshdr = &sshdr_tmp;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5081b3981d3c..62b28d58e65e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -884,7 +884,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
884 * flush everything. 884 * flush everything.
885 */ 885 */
886 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 886 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
887 SD_TIMEOUT, SD_MAX_RETRIES); 887 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
888 if (res == 0) 888 if (res == 0)
889 break; 889 break;
890 } 890 }
@@ -1134,7 +1134,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
1134 the_result = scsi_execute_req(sdkp->device, cmd, 1134 the_result = scsi_execute_req(sdkp->device, cmd,
1135 DMA_NONE, NULL, 0, 1135 DMA_NONE, NULL, 0,
1136 &sshdr, SD_TIMEOUT, 1136 &sshdr, SD_TIMEOUT,
1137 SD_MAX_RETRIES); 1137 SD_MAX_RETRIES, NULL);
1138 1138
1139 /* 1139 /*
1140 * If the drive has indicated to us that it 1140 * If the drive has indicated to us that it
@@ -1192,7 +1192,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
1192 cmd[4] |= 1 << 4; 1192 cmd[4] |= 1 << 4;
1193 scsi_execute_req(sdkp->device, cmd, DMA_NONE, 1193 scsi_execute_req(sdkp->device, cmd, DMA_NONE,
1194 NULL, 0, &sshdr, 1194 NULL, 0, &sshdr,
1195 SD_TIMEOUT, SD_MAX_RETRIES); 1195 SD_TIMEOUT, SD_MAX_RETRIES,
1196 NULL);
1196 spintime_expire = jiffies + 100 * HZ; 1197 spintime_expire = jiffies + 100 * HZ;
1197 spintime = 1; 1198 spintime = 1;
1198 } 1199 }
@@ -1306,7 +1307,7 @@ repeat:
1306 1307
1307 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 1308 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1308 buffer, longrc ? 13 : 8, &sshdr, 1309 buffer, longrc ? 13 : 8, &sshdr,
1309 SD_TIMEOUT, SD_MAX_RETRIES); 1310 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1310 1311
1311 if (media_not_present(sdkp, &sshdr)) 1312 if (media_not_present(sdkp, &sshdr))
1312 return; 1313 return;
@@ -1986,7 +1987,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
1986 return -ENODEV; 1987 return -ENODEV;
1987 1988
1988 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 1989 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
1989 SD_TIMEOUT, SD_MAX_RETRIES); 1990 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1990 if (res) { 1991 if (res) {
1991 sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n"); 1992 sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
1992 sd_print_result(sdkp, res); 1993 sd_print_result(sdkp, res);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 1bcf3c33d7ff..7f0df29f3a64 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -77,7 +77,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
77 }; 77 };
78 78
79 return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 79 return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
80 NULL, SES_TIMEOUT, SES_RETRIES); 80 NULL, SES_TIMEOUT, SES_RETRIES, NULL);
81} 81}
82 82
83static int ses_send_diag(struct scsi_device *sdev, int page_code, 83static int ses_send_diag(struct scsi_device *sdev, int page_code,
@@ -95,7 +95,7 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
95 }; 95 };
96 96
97 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen, 97 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
98 NULL, SES_TIMEOUT, SES_RETRIES); 98 NULL, SES_TIMEOUT, SES_RETRIES, NULL);
99 if (result) 99 if (result)
100 sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n", 100 sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
101 result); 101 result);
@@ -369,7 +369,8 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
369 return; 369 return;
370 370
371 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, 371 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
372 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES)) 372 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES,
373 NULL))
373 goto free; 374 goto free;
374 375
375 vpd_len = (buf[2] << 8) + buf[3]; 376 vpd_len = (buf[2] << 8) + buf[3];
@@ -380,7 +381,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
380 cmd[3] = vpd_len >> 8; 381 cmd[3] = vpd_len >> 8;
381 cmd[4] = vpd_len & 0xff; 382 cmd[4] = vpd_len & 0xff;
382 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, 383 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
383 vpd_len, NULL, SES_TIMEOUT, SES_RETRIES)) 384 vpd_len, NULL, SES_TIMEOUT, SES_RETRIES, NULL))
384 goto free; 385 goto free;
385 386
386 desc = buf + 4; 387 desc = buf + 4;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 45b66b98a516..e7fa3caead79 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -177,7 +177,7 @@ int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr)
177 do { 177 do {
178 the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 178 the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL,
179 0, sshdr, SR_TIMEOUT, 179 0, sshdr, SR_TIMEOUT,
180 retries--); 180 retries--, NULL);
181 if (scsi_sense_valid(sshdr) && 181 if (scsi_sense_valid(sshdr) &&
182 sshdr->sense_key == UNIT_ATTENTION) 182 sshdr->sense_key == UNIT_ATTENTION)
183 sdev->changed = 1; 183 sdev->changed = 1;
@@ -681,7 +681,7 @@ static void get_sectorsize(struct scsi_cd *cd)
681 /* Do the command and wait.. */ 681 /* Do the command and wait.. */
682 the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE, 682 the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
683 buffer, sizeof(buffer), NULL, 683 buffer, sizeof(buffer), NULL,
684 SR_TIMEOUT, MAX_RETRIES); 684 SR_TIMEOUT, MAX_RETRIES, NULL);
685 685
686 retries--; 686 retries--;
687 687
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index ae87d08df588..d92ff512d213 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -207,7 +207,7 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
207 memset(sense, 0, sizeof(*sense)); 207 memset(sense, 0, sizeof(*sense));
208 result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, 208 result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
209 cgc->buffer, cgc->buflen, (char *)sense, 209 cgc->buffer, cgc->buflen, (char *)sense,
210 cgc->timeout, IOCTL_RETRIES, 0); 210 cgc->timeout, IOCTL_RETRIES, 0, NULL);
211 211
212 scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr); 212 scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr);
213 213
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c959bdc55f4f..7f3f317ee6ca 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -451,9 +451,23 @@ static void st_sleep_done(void *data, char *sense, int result, int resid)
451 complete(SRpnt->waiting); 451 complete(SRpnt->waiting);
452} 452}
453 453
454static struct st_request *st_allocate_request(void) 454static struct st_request *st_allocate_request(struct scsi_tape *stp)
455{ 455{
456 return kzalloc(sizeof(struct st_request), GFP_KERNEL); 456 struct st_request *streq;
457
458 streq = kzalloc(sizeof(*streq), GFP_KERNEL);
459 if (streq)
460 streq->stp = stp;
461 else {
462 DEBC(printk(KERN_ERR "%s: Can't get SCSI request.\n",
463 tape_name(stp)););
464 if (signal_pending(current))
465 stp->buffer->syscall_result = -EINTR;
466 else
467 stp->buffer->syscall_result = -EBUSY;
468 }
469
470 return streq;
457} 471}
458 472
459static void st_release_request(struct st_request *streq) 473static void st_release_request(struct st_request *streq)
@@ -481,18 +495,10 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
481 return NULL; 495 return NULL;
482 } 496 }
483 497
484 if (SRpnt == NULL) { 498 if (!SRpnt) {
485 SRpnt = st_allocate_request(); 499 SRpnt = st_allocate_request(STp);
486 if (SRpnt == NULL) { 500 if (!SRpnt)
487 DEBC( printk(KERN_ERR "%s: Can't get SCSI request.\n",
488 tape_name(STp)); );
489 if (signal_pending(current))
490 (STp->buffer)->syscall_result = (-EINTR);
491 else
492 (STp->buffer)->syscall_result = (-EBUSY);
493 return NULL; 501 return NULL;
494 }
495 SRpnt->stp = STp;
496 } 502 }
497 503
498 /* If async IO, set last_SRpnt. This ptr tells write_behind_check 504 /* If async IO, set last_SRpnt. This ptr tells write_behind_check
@@ -527,6 +533,28 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
527 return SRpnt; 533 return SRpnt;
528} 534}
529 535
536static int st_scsi_kern_execute(struct st_request *streq,
537 const unsigned char *cmd, int data_direction,
538 void *buffer, unsigned bufflen, int timeout,
539 int retries)
540{
541 struct scsi_tape *stp = streq->stp;
542 int ret, resid;
543
544 stp->buffer->cmdstat.have_sense = 0;
545 memcpy(streq->cmd, cmd, sizeof(streq->cmd));
546
547 ret = scsi_execute(stp->device, cmd, data_direction, buffer, bufflen,
548 streq->sense, timeout, retries, 0, &resid);
549 if (driver_byte(ret) & DRIVER_ERROR)
550 return -EBUSY;
551
552 stp->buffer->cmdstat.midlevel_result = streq->result = ret;
553 stp->buffer->cmdstat.residual = resid;
554 stp->buffer->syscall_result = st_chk_result(stp, streq);
555
556 return 0;
557}
530 558
531/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if 559/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if
532 write has been correct but EOM early warning reached, -EIO if write ended in 560 write has been correct but EOM early warning reached, -EIO if write ended in
@@ -599,6 +627,7 @@ static int cross_eof(struct scsi_tape * STp, int forward)
599{ 627{
600 struct st_request *SRpnt; 628 struct st_request *SRpnt;
601 unsigned char cmd[MAX_COMMAND_SIZE]; 629 unsigned char cmd[MAX_COMMAND_SIZE];
630 int ret;
602 631
603 cmd[0] = SPACE; 632 cmd[0] = SPACE;
604 cmd[1] = 0x01; /* Space FileMarks */ 633 cmd[1] = 0x01; /* Space FileMarks */
@@ -612,19 +641,26 @@ static int cross_eof(struct scsi_tape * STp, int forward)
612 DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n", 641 DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n",
613 tape_name(STp), forward ? "forward" : "backward")); 642 tape_name(STp), forward ? "forward" : "backward"));
614 643
615 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, 644 SRpnt = st_allocate_request(STp);
616 STp->device->timeout, MAX_RETRIES, 1);
617 if (!SRpnt) 645 if (!SRpnt)
618 return (STp->buffer)->syscall_result; 646 return STp->buffer->syscall_result;
619 647
620 st_release_request(SRpnt); 648 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
621 SRpnt = NULL; 649 STp->device->request_queue->rq_timeout,
650 MAX_RETRIES);
651 if (ret)
652 goto out;
653
654 ret = STp->buffer->syscall_result;
622 655
623 if ((STp->buffer)->cmdstat.midlevel_result != 0) 656 if ((STp->buffer)->cmdstat.midlevel_result != 0)
624 printk(KERN_ERR "%s: Stepping over filemark %s failed.\n", 657 printk(KERN_ERR "%s: Stepping over filemark %s failed.\n",
625 tape_name(STp), forward ? "forward" : "backward"); 658 tape_name(STp), forward ? "forward" : "backward");
626 659
627 return (STp->buffer)->syscall_result; 660out:
661 st_release_request(SRpnt);
662
663 return ret;
628} 664}
629 665
630 666
@@ -657,7 +693,8 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
657 cmd[4] = blks; 693 cmd[4] = blks;
658 694
659 SRpnt = st_do_scsi(NULL, STp, cmd, transfer, DMA_TO_DEVICE, 695 SRpnt = st_do_scsi(NULL, STp, cmd, transfer, DMA_TO_DEVICE,
660 STp->device->timeout, MAX_WRITE_RETRIES, 1); 696 STp->device->request_queue->rq_timeout,
697 MAX_WRITE_RETRIES, 1);
661 if (!SRpnt) 698 if (!SRpnt)
662 return (STp->buffer)->syscall_result; 699 return (STp->buffer)->syscall_result;
663 700
@@ -844,21 +881,24 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
844 int attentions, waits, max_wait, scode; 881 int attentions, waits, max_wait, scode;
845 int retval = CHKRES_READY, new_session = 0; 882 int retval = CHKRES_READY, new_session = 0;
846 unsigned char cmd[MAX_COMMAND_SIZE]; 883 unsigned char cmd[MAX_COMMAND_SIZE];
847 struct st_request *SRpnt = NULL; 884 struct st_request *SRpnt;
848 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 885 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
849 886
887 SRpnt = st_allocate_request(STp);
888 if (!SRpnt)
889 return STp->buffer->syscall_result;
890
850 max_wait = do_wait ? ST_BLOCK_SECONDS : 0; 891 max_wait = do_wait ? ST_BLOCK_SECONDS : 0;
851 892
852 for (attentions=waits=0; ; ) { 893 for (attentions=waits=0; ; ) {
853 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); 894 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
854 cmd[0] = TEST_UNIT_READY; 895 cmd[0] = TEST_UNIT_READY;
855 SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
856 STp->long_timeout, MAX_READY_RETRIES, 1);
857 896
858 if (!SRpnt) { 897 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
859 retval = (STp->buffer)->syscall_result; 898 STp->long_timeout,
899 MAX_READY_RETRIES);
900 if (retval)
860 break; 901 break;
861 }
862 902
863 if (cmdstatp->have_sense) { 903 if (cmdstatp->have_sense) {
864 904
@@ -902,8 +942,8 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
902 break; 942 break;
903 } 943 }
904 944
905 if (SRpnt != NULL) 945 st_release_request(SRpnt);
906 st_release_request(SRpnt); 946
907 return retval; 947 return retval;
908} 948}
909 949
@@ -980,16 +1020,24 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
980 } 1020 }
981 } 1021 }
982 1022
1023 SRpnt = st_allocate_request(STp);
1024 if (!SRpnt) {
1025 retval = STp->buffer->syscall_result;
1026 goto err_out;
1027 }
1028
983 if (STp->omit_blklims) 1029 if (STp->omit_blklims)
984 STp->min_block = STp->max_block = (-1); 1030 STp->min_block = STp->max_block = (-1);
985 else { 1031 else {
986 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); 1032 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
987 cmd[0] = READ_BLOCK_LIMITS; 1033 cmd[0] = READ_BLOCK_LIMITS;
988 1034
989 SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE, 1035 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
990 STp->device->timeout, MAX_READY_RETRIES, 1); 1036 STp->buffer->b_data, 6,
991 if (!SRpnt) { 1037 STp->device->request_queue->rq_timeout,
992 retval = (STp->buffer)->syscall_result; 1038 MAX_READY_RETRIES);
1039 if (retval) {
1040 st_release_request(SRpnt);
993 goto err_out; 1041 goto err_out;
994 } 1042 }
995 1043
@@ -1013,10 +1061,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
1013 cmd[0] = MODE_SENSE; 1061 cmd[0] = MODE_SENSE;
1014 cmd[4] = 12; 1062 cmd[4] = 12;
1015 1063
1016 SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE, 1064 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
1017 STp->device->timeout, MAX_READY_RETRIES, 1); 1065 STp->buffer->b_data, 12,
1018 if (!SRpnt) { 1066 STp->device->request_queue->rq_timeout,
1019 retval = (STp->buffer)->syscall_result; 1067 MAX_READY_RETRIES);
1068 if (retval) {
1069 st_release_request(SRpnt);
1020 goto err_out; 1070 goto err_out;
1021 } 1071 }
1022 1072
@@ -1246,10 +1296,17 @@ static int st_flush(struct file *filp, fl_owner_t id)
1246 cmd[0] = WRITE_FILEMARKS; 1296 cmd[0] = WRITE_FILEMARKS;
1247 cmd[4] = 1 + STp->two_fm; 1297 cmd[4] = 1 + STp->two_fm;
1248 1298
1249 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, 1299 SRpnt = st_allocate_request(STp);
1250 STp->device->timeout, MAX_WRITE_RETRIES, 1);
1251 if (!SRpnt) { 1300 if (!SRpnt) {
1252 result = (STp->buffer)->syscall_result; 1301 result = STp->buffer->syscall_result;
1302 goto out;
1303 }
1304
1305 result = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
1306 STp->device->request_queue->rq_timeout,
1307 MAX_WRITE_RETRIES);
1308 if (result) {
1309 st_release_request(SRpnt);
1253 goto out; 1310 goto out;
1254 } 1311 }
1255 1312
@@ -1634,7 +1691,8 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
1634 cmd[4] = blks; 1691 cmd[4] = blks;
1635 1692
1636 SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, DMA_TO_DEVICE, 1693 SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, DMA_TO_DEVICE,
1637 STp->device->timeout, MAX_WRITE_RETRIES, !async_write); 1694 STp->device->request_queue->rq_timeout,
1695 MAX_WRITE_RETRIES, !async_write);
1638 if (!SRpnt) { 1696 if (!SRpnt) {
1639 retval = STbp->syscall_result; 1697 retval = STbp->syscall_result;
1640 goto out; 1698 goto out;
@@ -1804,7 +1862,8 @@ static long read_tape(struct scsi_tape *STp, long count,
1804 1862
1805 SRpnt = *aSRpnt; 1863 SRpnt = *aSRpnt;
1806 SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE, 1864 SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE,
1807 STp->device->timeout, MAX_RETRIES, 1); 1865 STp->device->request_queue->rq_timeout,
1866 MAX_RETRIES, 1);
1808 release_buffering(STp, 1); 1867 release_buffering(STp, 1);
1809 *aSRpnt = SRpnt; 1868 *aSRpnt = SRpnt;
1810 if (!SRpnt) 1869 if (!SRpnt)
@@ -2213,7 +2272,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
2213 DEBC( printk(KERN_INFO "%s: Long timeout set to %d seconds.\n", name, 2272 DEBC( printk(KERN_INFO "%s: Long timeout set to %d seconds.\n", name,
2214 (value & ~MT_ST_SET_LONG_TIMEOUT))); 2273 (value & ~MT_ST_SET_LONG_TIMEOUT)));
2215 } else { 2274 } else {
2216 STp->device->timeout = value * HZ; 2275 blk_queue_rq_timeout(STp->device->request_queue,
2276 value * HZ);
2217 DEBC( printk(KERN_INFO "%s: Normal timeout set to %d seconds.\n", 2277 DEBC( printk(KERN_INFO "%s: Normal timeout set to %d seconds.\n",
2218 name, value) ); 2278 name, value) );
2219 } 2279 }
@@ -2311,7 +2371,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
2311static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs) 2371static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2312{ 2372{
2313 unsigned char cmd[MAX_COMMAND_SIZE]; 2373 unsigned char cmd[MAX_COMMAND_SIZE];
2314 struct st_request *SRpnt = NULL; 2374 struct st_request *SRpnt;
2375 int ret;
2315 2376
2316 memset(cmd, 0, MAX_COMMAND_SIZE); 2377 memset(cmd, 0, MAX_COMMAND_SIZE);
2317 cmd[0] = MODE_SENSE; 2378 cmd[0] = MODE_SENSE;
@@ -2320,14 +2381,17 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2320 cmd[2] = page; 2381 cmd[2] = page;
2321 cmd[4] = 255; 2382 cmd[4] = 255;
2322 2383
2323 SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, 2384 SRpnt = st_allocate_request(STp);
2324 STp->device->timeout, 0, 1); 2385 if (!SRpnt)
2325 if (SRpnt == NULL) 2386 return STp->buffer->syscall_result;
2326 return (STp->buffer)->syscall_result;
2327 2387
2388 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
2389 STp->buffer->b_data, cmd[4],
2390 STp->device->request_queue->rq_timeout,
2391 MAX_RETRIES);
2328 st_release_request(SRpnt); 2392 st_release_request(SRpnt);
2329 2393
2330 return (STp->buffer)->syscall_result; 2394 return ret ? : STp->buffer->syscall_result;
2331} 2395}
2332 2396
2333 2397
@@ -2335,9 +2399,9 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2335 in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */ 2399 in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */
2336static int write_mode_page(struct scsi_tape *STp, int page, int slow) 2400static int write_mode_page(struct scsi_tape *STp, int page, int slow)
2337{ 2401{
2338 int pgo; 2402 int pgo, timeout, ret = 0;
2339 unsigned char cmd[MAX_COMMAND_SIZE]; 2403 unsigned char cmd[MAX_COMMAND_SIZE];
2340 struct st_request *SRpnt = NULL; 2404 struct st_request *SRpnt;
2341 2405
2342 memset(cmd, 0, MAX_COMMAND_SIZE); 2406 memset(cmd, 0, MAX_COMMAND_SIZE);
2343 cmd[0] = MODE_SELECT; 2407 cmd[0] = MODE_SELECT;
@@ -2351,14 +2415,21 @@ static int write_mode_page(struct scsi_tape *STp, int page, int slow)
2351 (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP; 2415 (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP;
2352 (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR; 2416 (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR;
2353 2417
2354 SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, 2418 SRpnt = st_allocate_request(STp);
2355 (slow ? STp->long_timeout : STp->device->timeout), 0, 1); 2419 if (!SRpnt)
2356 if (SRpnt == NULL) 2420 return ret;
2357 return (STp->buffer)->syscall_result; 2421
2422 timeout = slow ? STp->long_timeout :
2423 STp->device->request_queue->rq_timeout;
2424
2425 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_TO_DEVICE,
2426 STp->buffer->b_data, cmd[4], timeout, 0);
2427 if (!ret)
2428 ret = STp->buffer->syscall_result;
2358 2429
2359 st_release_request(SRpnt); 2430 st_release_request(SRpnt);
2360 2431
2361 return (STp->buffer)->syscall_result; 2432 return ret;
2362} 2433}
2363 2434
2364 2435
@@ -2464,7 +2535,7 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
2464 } 2535 }
2465 if (STp->immediate) { 2536 if (STp->immediate) {
2466 cmd[1] = 1; /* Don't wait for completion */ 2537 cmd[1] = 1; /* Don't wait for completion */
2467 timeout = STp->device->timeout; 2538 timeout = STp->device->request_queue->rq_timeout;
2468 } 2539 }
2469 else 2540 else
2470 timeout = STp->long_timeout; 2541 timeout = STp->long_timeout;
@@ -2476,13 +2547,16 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
2476 printk(ST_DEB_MSG "%s: Loading tape.\n", name); 2547 printk(ST_DEB_MSG "%s: Loading tape.\n", name);
2477 ); 2548 );
2478 2549
2479 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, 2550 SRpnt = st_allocate_request(STp);
2480 timeout, MAX_RETRIES, 1);
2481 if (!SRpnt) 2551 if (!SRpnt)
2482 return (STp->buffer)->syscall_result; 2552 return STp->buffer->syscall_result;
2553
2554 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0, timeout,
2555 MAX_RETRIES);
2556 if (retval)
2557 goto out;
2483 2558
2484 retval = (STp->buffer)->syscall_result; 2559 retval = (STp->buffer)->syscall_result;
2485 st_release_request(SRpnt);
2486 2560
2487 if (!retval) { /* SCSI command successful */ 2561 if (!retval) { /* SCSI command successful */
2488 2562
@@ -2501,6 +2575,8 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
2501 STps = &(STp->ps[STp->partition]); 2575 STps = &(STp->ps[STp->partition]);
2502 STps->drv_file = STps->drv_block = (-1); 2576 STps->drv_file = STps->drv_block = (-1);
2503 } 2577 }
2578out:
2579 st_release_request(SRpnt);
2504 2580
2505 return retval; 2581 return retval;
2506} 2582}
@@ -2638,7 +2714,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2638 cmd[2] = (arg >> 16); 2714 cmd[2] = (arg >> 16);
2639 cmd[3] = (arg >> 8); 2715 cmd[3] = (arg >> 8);
2640 cmd[4] = arg; 2716 cmd[4] = arg;
2641 timeout = STp->device->timeout; 2717 timeout = STp->device->request_queue->rq_timeout;
2642 DEBC( 2718 DEBC(
2643 if (cmd_in == MTWEOF) 2719 if (cmd_in == MTWEOF)
2644 printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name, 2720 printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name,
@@ -2656,7 +2732,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2656 cmd[0] = REZERO_UNIT; 2732 cmd[0] = REZERO_UNIT;
2657 if (STp->immediate) { 2733 if (STp->immediate) {
2658 cmd[1] = 1; /* Don't wait for completion */ 2734 cmd[1] = 1; /* Don't wait for completion */
2659 timeout = STp->device->timeout; 2735 timeout = STp->device->request_queue->rq_timeout;
2660 } 2736 }
2661 DEBC(printk(ST_DEB_MSG "%s: Rewinding tape.\n", name)); 2737 DEBC(printk(ST_DEB_MSG "%s: Rewinding tape.\n", name));
2662 fileno = blkno = at_sm = 0; 2738 fileno = blkno = at_sm = 0;
@@ -2669,7 +2745,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2669 cmd[0] = START_STOP; 2745 cmd[0] = START_STOP;
2670 if (STp->immediate) { 2746 if (STp->immediate) {
2671 cmd[1] = 1; /* Don't wait for completion */ 2747 cmd[1] = 1; /* Don't wait for completion */
2672 timeout = STp->device->timeout; 2748 timeout = STp->device->request_queue->rq_timeout;
2673 } 2749 }
2674 cmd[4] = 3; 2750 cmd[4] = 3;
2675 DEBC(printk(ST_DEB_MSG "%s: Retensioning tape.\n", name)); 2751 DEBC(printk(ST_DEB_MSG "%s: Retensioning tape.\n", name));
@@ -2702,7 +2778,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2702 cmd[1] = (arg ? 1 : 0); /* Long erase with non-zero argument */ 2778 cmd[1] = (arg ? 1 : 0); /* Long erase with non-zero argument */
2703 if (STp->immediate) { 2779 if (STp->immediate) {
2704 cmd[1] |= 2; /* Don't wait for completion */ 2780 cmd[1] |= 2; /* Don't wait for completion */
2705 timeout = STp->device->timeout; 2781 timeout = STp->device->request_queue->rq_timeout;
2706 } 2782 }
2707 else 2783 else
2708 timeout = STp->long_timeout * 8; 2784 timeout = STp->long_timeout * 8;
@@ -2754,7 +2830,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2754 (STp->buffer)->b_data[9] = (ltmp >> 16); 2830 (STp->buffer)->b_data[9] = (ltmp >> 16);
2755 (STp->buffer)->b_data[10] = (ltmp >> 8); 2831 (STp->buffer)->b_data[10] = (ltmp >> 8);
2756 (STp->buffer)->b_data[11] = ltmp; 2832 (STp->buffer)->b_data[11] = ltmp;
2757 timeout = STp->device->timeout; 2833 timeout = STp->device->request_queue->rq_timeout;
2758 DEBC( 2834 DEBC(
2759 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) 2835 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK)
2760 printk(ST_DEB_MSG 2836 printk(ST_DEB_MSG
@@ -2776,12 +2852,15 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2776 return (-ENOSYS); 2852 return (-ENOSYS);
2777 } 2853 }
2778 2854
2779 SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction, 2855 SRpnt = st_allocate_request(STp);
2780 timeout, MAX_RETRIES, 1);
2781 if (!SRpnt) 2856 if (!SRpnt)
2782 return (STp->buffer)->syscall_result; 2857 return (STp->buffer)->syscall_result;
2783 2858
2784 ioctl_result = (STp->buffer)->syscall_result; 2859 ioctl_result = st_scsi_kern_execute(SRpnt, cmd, direction,
2860 STp->buffer->b_data, datalen,
2861 timeout, MAX_RETRIES);
2862 if (!ioctl_result)
2863 ioctl_result = (STp->buffer)->syscall_result;
2785 2864
2786 if (!ioctl_result) { /* SCSI command successful */ 2865 if (!ioctl_result) { /* SCSI command successful */
2787 st_release_request(SRpnt); 2866 st_release_request(SRpnt);
@@ -2943,10 +3022,17 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
2943 if (!logical && !STp->scsi2_logical) 3022 if (!logical && !STp->scsi2_logical)
2944 scmd[1] = 1; 3023 scmd[1] = 1;
2945 } 3024 }
2946 SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE, 3025
2947 STp->device->timeout, MAX_READY_RETRIES, 1); 3026 SRpnt = st_allocate_request(STp);
2948 if (!SRpnt) 3027 if (!SRpnt)
2949 return (STp->buffer)->syscall_result; 3028 return STp->buffer->syscall_result;
3029
3030 result = st_scsi_kern_execute(SRpnt, scmd, DMA_FROM_DEVICE,
3031 STp->buffer->b_data, 20,
3032 STp->device->request_queue->rq_timeout,
3033 MAX_READY_RETRIES);
3034 if (result)
3035 goto out;
2950 3036
2951 if ((STp->buffer)->syscall_result != 0 || 3037 if ((STp->buffer)->syscall_result != 0 ||
2952 (STp->device->scsi_level >= SCSI_2 && 3038 (STp->device->scsi_level >= SCSI_2 &&
@@ -2974,6 +3060,7 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
2974 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name, 3060 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name,
2975 *block, *partition)); 3061 *block, *partition));
2976 } 3062 }
3063out:
2977 st_release_request(SRpnt); 3064 st_release_request(SRpnt);
2978 SRpnt = NULL; 3065 SRpnt = NULL;
2979 3066
@@ -3045,13 +3132,17 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
3045 } 3132 }
3046 if (STp->immediate) { 3133 if (STp->immediate) {
3047 scmd[1] |= 1; /* Don't wait for completion */ 3134 scmd[1] |= 1; /* Don't wait for completion */
3048 timeout = STp->device->timeout; 3135 timeout = STp->device->request_queue->rq_timeout;
3049 } 3136 }
3050 3137
3051 SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE, 3138 SRpnt = st_allocate_request(STp);
3052 timeout, MAX_READY_RETRIES, 1);
3053 if (!SRpnt) 3139 if (!SRpnt)
3054 return (STp->buffer)->syscall_result; 3140 return STp->buffer->syscall_result;
3141
3142 result = st_scsi_kern_execute(SRpnt, scmd, DMA_NONE, NULL, 0,
3143 timeout, MAX_READY_RETRIES);
3144 if (result)
3145 goto out;
3055 3146
3056 STps->drv_block = STps->drv_file = (-1); 3147 STps->drv_block = STps->drv_file = (-1);
3057 STps->eof = ST_NOEOF; 3148 STps->eof = ST_NOEOF;
@@ -3076,7 +3167,7 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
3076 STps->drv_block = STps->drv_file = 0; 3167 STps->drv_block = STps->drv_file = 0;
3077 result = 0; 3168 result = 0;
3078 } 3169 }
3079 3170out:
3080 st_release_request(SRpnt); 3171 st_release_request(SRpnt);
3081 SRpnt = NULL; 3172 SRpnt = NULL;
3082 3173
@@ -4029,7 +4120,7 @@ static int st_probe(struct device *dev)
4029 tpnt->partition = 0; 4120 tpnt->partition = 0;
4030 tpnt->new_partition = 0; 4121 tpnt->new_partition = 0;
4031 tpnt->nbr_partitions = 0; 4122 tpnt->nbr_partitions = 0;
4032 tpnt->device->timeout = ST_TIMEOUT; 4123 blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
4033 tpnt->long_timeout = ST_LONG_TIMEOUT; 4124 tpnt->long_timeout = ST_LONG_TIMEOUT;
4034 tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma; 4125 tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
4035 4126
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 2fa830c0be27..a3a18ad73125 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1108,8 +1108,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1108 goto out_scsi_host_put; 1108 goto out_scsi_host_put;
1109 } 1109 }
1110 1110
1111 hba->mmio_base = ioremap_nocache(pci_resource_start(pdev, 0), 1111 hba->mmio_base = pci_ioremap_bar(pdev, 0);
1112 pci_resource_len(pdev, 0));
1113 if ( !hba->mmio_base) { 1112 if ( !hba->mmio_base) {
1114 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n", 1113 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1115 pci_name(pdev)); 1114 pci_name(pdev));
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index f7d279542fa5..e5c369bb568f 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -6,7 +6,7 @@
6 * Changes : 6 * Changes :
7 * 7 *
8 * Marcelo Tosatti <marcelo@conectiva.com.br> : Added io_request_lock locking 8 * Marcelo Tosatti <marcelo@conectiva.com.br> : Added io_request_lock locking
9 * Alan Cox <alan@redhat.com> : Cleaned up code formatting 9 * Alan Cox <alan@lxorguk.ukuu.org.uk> : Cleaned up code formatting
10 * Fixed an irq locking bug 10 * Fixed an irq locking bug
11 * Added ISAPnP support 11 * Added ISAPnP support
12 * Bjoern A. Zeeb <bzeeb@zabbadoz.net> : Initial irq locking updates 12 * Bjoern A. Zeeb <bzeeb@zabbadoz.net> : Initial irq locking updates
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 69ac6e590f1d..9a4273445c0d 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2572,9 +2572,10 @@ static struct pci_driver dc390_driver = {
2572 2572
2573static int __init dc390_module_init(void) 2573static int __init dc390_module_init(void)
2574{ 2574{
2575 if (!disable_clustering) 2575 if (!disable_clustering) {
2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n"); 2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
2577 printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n"); 2577 printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
2578 }
2578 2579
2579 if (tmscsim[0] == -1 || tmscsim[0] > 15) { 2580 if (tmscsim[0] == -1 || tmscsim[0] > 15) {
2580 tmscsim[0] = 7; 2581 tmscsim[0] = 7;
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 329eb8780e74..601e95141cbe 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1111,7 +1111,8 @@ static int u14_34f_detect(struct scsi_host_template *tpnt) {
1111 1111
1112static void map_dma(unsigned int i, unsigned int j) { 1112static void map_dma(unsigned int i, unsigned int j) {
1113 unsigned int data_len = 0; 1113 unsigned int data_len = 0;
1114 unsigned int k, count, pci_dir; 1114 unsigned int k, pci_dir;
1115 int count;
1115 struct scatterlist *sg; 1116 struct scatterlist *sg;
1116 struct mscp *cpp; 1117 struct mscp *cpp;
1117 struct scsi_cmnd *SCpnt; 1118 struct scsi_cmnd *SCpnt;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index d4c13561f4a6..093610bcfcce 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -146,13 +146,13 @@
146 * 146 *
147 * use host->host_lock, not io_request_lock, cleanups 147 * use host->host_lock, not io_request_lock, cleanups
148 * 148 *
149 * 2002/10/04 - Alan Cox <alan@redhat.com> 149 * 2002/10/04 - Alan Cox <alan@lxorguk.ukuu.org.uk>
150 * 150 *
151 * Use dev_id for interrupts, kill __func__ pasting 151 * Use dev_id for interrupts, kill __func__ pasting
152 * Add a lock for the scb pool, clean up all other cli/sti usage stuff 152 * Add a lock for the scb pool, clean up all other cli/sti usage stuff
153 * Use the adapter lock for the other places we had the cli's 153 * Use the adapter lock for the other places we had the cli's
154 * 154 *
155 * 2002/10/06 - Alan Cox <alan@redhat.com> 155 * 2002/10/06 - Alan Cox <alan@lxorguk.ukuu.org.uk>
156 * 156 *
157 * Switch to new style error handling 157 * Switch to new style error handling
158 * Clean up delay to udelay, and yielding sleeps 158 * Clean up delay to udelay, and yielding sleeps
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 579d63a81aa2..b695ab3142d8 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -447,7 +447,7 @@ config SERIAL_CLPS711X_CONSOLE
447 447
448config SERIAL_SAMSUNG 448config SERIAL_SAMSUNG
449 tristate "Samsung SoC serial support" 449 tristate "Samsung SoC serial support"
450 depends on ARM && PLAT_S3C24XX 450 depends on ARM && PLAT_S3C
451 select SERIAL_CORE 451 select SERIAL_CORE
452 help 452 help
453 Support for the on-chip UARTs on the Samsung S3C24XX series CPUs, 453 Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
@@ -455,6 +455,16 @@ config SERIAL_SAMSUNG
455 provide all of these ports, depending on how the serial port 455 provide all of these ports, depending on how the serial port
456 pins are configured. 456 pins are configured.
457 457
458config SERIAL_SAMSUNG_UARTS
459 int
460 depends on SERIAL_SAMSUNG
461 default 2 if ARCH_S3C2400
462 default 4 if ARCH_S3C64XX || CPU_S3C2443
463 default 3
464 help
465 Select the number of available UART ports for the Samsung S3C
466 serial driver
467
458config SERIAL_SAMSUNG_DEBUG 468config SERIAL_SAMSUNG_DEBUG
459 bool "Samsung SoC serial debug" 469 bool "Samsung SoC serial debug"
460 depends on SERIAL_SAMSUNG && DEBUG_LL 470 depends on SERIAL_SAMSUNG && DEBUG_LL
@@ -508,7 +518,20 @@ config SERIAL_S3C2440
508 help 518 help
509 Serial port support for the Samsung S3C2440 and S3C2442 SoC 519 Serial port support for the Samsung S3C2440 and S3C2442 SoC
510 520
521config SERIAL_S3C24A0
522 tristate "Samsung S3C24A0 Serial port support"
523 depends on SERIAL_SAMSUNG && CPU_S3C24A0
524 default y if CPU_S3C24A0
525 help
526 Serial port support for the Samsung S3C24A0 SoC
511 527
528config SERIAL_S3C6400
529 tristate "Samsung S3C6400/S3C6410 Serial port support"
530 depends on SERIAL_SAMSUNG && (CPU_S3C600 || CPU_S3C6410)
531 default y
532 help
533 Serial port support for the Samsung S3C6400 and S3C6410
534 SoCs
512 535
513config SERIAL_DZ 536config SERIAL_DZ
514 bool "DECstation DZ serial driver" 537 bool "DECstation DZ serial driver"
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 0c17c8ddb19d..dfe775ac45b2 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -41,6 +41,8 @@ obj-$(CONFIG_SERIAL_S3C2400) += s3c2400.o
41obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o 41obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
42obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o 42obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o
43obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o 43obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
44obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o
45obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
44obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o 46obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
45obj-$(CONFIG_SERIAL_MUX) += mux.o 47obj-$(CONFIG_SERIAL_MUX) += mux.o
46obj-$(CONFIG_SERIAL_68328) += 68328serial.o 48obj-$(CONFIG_SERIAL_68328) += 68328serial.o
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 71562689116f..e3a5ad5ef1d6 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -692,7 +692,7 @@ static int pl010_probe(struct amba_device *dev, void *id)
692 goto free; 692 goto free;
693 } 693 }
694 694
695 uap->clk = clk_get(&dev->dev, "UARTCLK"); 695 uap->clk = clk_get(&dev->dev, NULL);
696 if (IS_ERR(uap->clk)) { 696 if (IS_ERR(uap->clk)) {
697 ret = PTR_ERR(uap->clk); 697 ret = PTR_ERR(uap->clk);
698 goto unmap; 698 goto unmap;
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index b7180046f8db..8b2b9700f3e4 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -756,7 +756,7 @@ static int pl011_probe(struct amba_device *dev, void *id)
756 goto free; 756 goto free;
757 } 757 }
758 758
759 uap->clk = clk_get(&dev->dev, "UARTCLK"); 759 uap->clk = clk_get(&dev->dev, NULL);
760 if (IS_ERR(uap->clk)) { 760 if (IS_ERR(uap->clk)) {
761 ret = PTR_ERR(uap->clk); 761 ret = PTR_ERR(uap->clk);
762 goto unmap; 762 goto unmap;
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 3f90f1bbbbcd..a50954612b60 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -66,7 +66,7 @@
66#define ONEMS 0xb0 /* One Millisecond register */ 66#define ONEMS 0xb0 /* One Millisecond register */
67#define UTS 0xb4 /* UART Test Register */ 67#define UTS 0xb4 /* UART Test Register */
68#endif 68#endif
69#ifdef CONFIG_ARCH_IMX 69#if defined(CONFIG_ARCH_IMX) || defined(CONFIG_ARCH_MX1)
70#define BIPR1 0xb0 /* Incremental Preset Register 1 */ 70#define BIPR1 0xb0 /* Incremental Preset Register 1 */
71#define BIPR2 0xb4 /* Incremental Preset Register 2 */ 71#define BIPR2 0xb4 /* Incremental Preset Register 2 */
72#define BIPR3 0xb8 /* Incremental Preset Register 3 */ 72#define BIPR3 0xb8 /* Incremental Preset Register 3 */
@@ -96,7 +96,7 @@
96#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */ 96#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
97#define UCR1_SNDBRK (1<<4) /* Send break */ 97#define UCR1_SNDBRK (1<<4) /* Send break */
98#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */ 98#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
99#ifdef CONFIG_ARCH_IMX 99#if defined(CONFIG_ARCH_IMX) || defined(CONFIG_ARCH_MX1)
100#define UCR1_UARTCLKEN (1<<2) /* UART clock enabled */ 100#define UCR1_UARTCLKEN (1<<2) /* UART clock enabled */
101#endif 101#endif
102#if defined CONFIG_ARCH_MX3 || defined CONFIG_ARCH_MX2 102#if defined CONFIG_ARCH_MX3 || defined CONFIG_ARCH_MX2
@@ -187,11 +187,11 @@
187#define MAX_INTERNAL_IRQ IMX_IRQS 187#define MAX_INTERNAL_IRQ IMX_IRQS
188#endif 188#endif
189 189
190#if defined CONFIG_ARCH_MX3 || defined CONFIG_ARCH_MX2 190#ifdef CONFIG_ARCH_MXC
191#define SERIAL_IMX_MAJOR 207 191#define SERIAL_IMX_MAJOR 207
192#define MINOR_START 16 192#define MINOR_START 16
193#define DEV_NAME "ttymxc" 193#define DEV_NAME "ttymxc"
194#define MAX_INTERNAL_IRQ MXC_MAX_INT_LINES 194#define MAX_INTERNAL_IRQ MXC_INTERNAL_IRQS
195#endif 195#endif
196 196
197/* 197/*
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index abc00be55433..f6e3b86bb0be 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -48,6 +48,7 @@
48#include <mach/hardware.h> 48#include <mach/hardware.h>
49#include <asm/irq.h> 49#include <asm/irq.h>
50#include <mach/pxa-regs.h> 50#include <mach/pxa-regs.h>
51#include <mach/regs-uart.h>
51 52
52 53
53struct uart_pxa_port { 54struct uart_pxa_port {
@@ -766,7 +767,7 @@ static int serial_pxa_probe(struct platform_device *dev)
766 if (!sport) 767 if (!sport)
767 return -ENOMEM; 768 return -ENOMEM;
768 769
769 sport->clk = clk_get(&dev->dev, "UARTCLK"); 770 sport->clk = clk_get(&dev->dev, NULL);
770 if (IS_ERR(sport->clk)) { 771 if (IS_ERR(sport->clk)) {
771 ret = PTR_ERR(sport->clk); 772 ret = PTR_ERR(sport->clk);
772 goto err_free; 773 goto err_free;
diff --git a/drivers/serial/s3c24a0.c b/drivers/serial/s3c24a0.c
new file mode 100644
index 000000000000..ebf2fd3c8f7d
--- /dev/null
+++ b/drivers/serial/s3c24a0.c
@@ -0,0 +1,118 @@
1/* linux/drivers/serial/s3c24a0.c
2 *
3 * Driver for Samsung S3C24A0 SoC onboard UARTs.
4 *
5 * Based on drivers/serial/s3c2410.c
6 *
7 * Author: Sandeep Patil <sandeep.patil@azingo.com>
8 *
9 * Ben Dooks, Copyright (c) 2003-2005,2008 Simtec Electronics
10 * http://armlinux.simtec.co.uk/
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15*/
16
17#include <linux/module.h>
18#include <linux/ioport.h>
19#include <linux/platform_device.h>
20#include <linux/init.h>
21#include <linux/serial_core.h>
22#include <linux/serial.h>
23#include <linux/io.h>
24#include <linux/irq.h>
25
26#include <mach/hardware.h>
27
28#include <plat/regs-serial.h>
29#include <mach/regs-gpio.h>
30
31#include "samsung.h"
32
33static int s3c24a0_serial_setsource(struct uart_port *port,
34 struct s3c24xx_uart_clksrc *clk)
35{
36 unsigned long ucon = rd_regl(port, S3C2410_UCON);
37
38 if (strcmp(clk->name, "uclk") == 0)
39 ucon |= S3C2410_UCON_UCLK;
40 else
41 ucon &= ~S3C2410_UCON_UCLK;
42
43 wr_regl(port, S3C2410_UCON, ucon);
44 return 0;
45}
46
47static int s3c24a0_serial_getsource(struct uart_port *port,
48 struct s3c24xx_uart_clksrc *clk)
49{
50 unsigned long ucon = rd_regl(port, S3C2410_UCON);
51
52 clk->divisor = 1;
53 clk->name = (ucon & S3C2410_UCON_UCLK) ? "uclk" : "pclk";
54
55 return 0;
56}
57
58static int s3c24a0_serial_resetport(struct uart_port *port,
59 struct s3c2410_uartcfg *cfg)
60{
61 dbg("s3c24a0_serial_resetport: port=%p (%08lx), cfg=%p\n",
62 port, port->mapbase, cfg);
63
64 wr_regl(port, S3C2410_UCON, cfg->ucon);
65 wr_regl(port, S3C2410_ULCON, cfg->ulcon);
66
67 /* reset both fifos */
68
69 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
70 wr_regl(port, S3C2410_UFCON, cfg->ufcon);
71
72 return 0;
73}
74
75static struct s3c24xx_uart_info s3c24a0_uart_inf = {
76 .name = "Samsung S3C24A0 UART",
77 .type = PORT_S3C2410,
78 .fifosize = 16,
79 .rx_fifomask = S3C24A0_UFSTAT_RXMASK,
80 .rx_fifoshift = S3C24A0_UFSTAT_RXSHIFT,
81 .rx_fifofull = S3C24A0_UFSTAT_RXFULL,
82 .tx_fifofull = S3C24A0_UFSTAT_TXFULL,
83 .tx_fifomask = S3C24A0_UFSTAT_TXMASK,
84 .tx_fifoshift = S3C24A0_UFSTAT_TXSHIFT,
85 .get_clksrc = s3c24a0_serial_getsource,
86 .set_clksrc = s3c24a0_serial_setsource,
87 .reset_port = s3c24a0_serial_resetport,
88};
89
90static int s3c24a0_serial_probe(struct platform_device *dev)
91{
92 return s3c24xx_serial_probe(dev, &s3c24a0_uart_inf);
93}
94
95static struct platform_driver s3c24a0_serial_drv = {
96 .probe = s3c24a0_serial_probe,
97 .remove = s3c24xx_serial_remove,
98 .driver = {
99 .name = "s3c24a0-uart",
100 .owner = THIS_MODULE,
101 },
102};
103
104s3c24xx_console_init(&s3c24a0_serial_drv, &s3c24a0_uart_inf);
105
106static int __init s3c24a0_serial_init(void)
107{
108 return s3c24xx_serial_init(&s3c24a0_serial_drv, &s3c24a0_uart_inf);
109}
110
111static void __exit s3c24a0_serial_exit(void)
112{
113 platform_driver_unregister(&s3c24a0_serial_drv);
114}
115
116module_init(s3c24a0_serial_init);
117module_exit(s3c24a0_serial_exit);
118
diff --git a/drivers/serial/s3c6400.c b/drivers/serial/s3c6400.c
new file mode 100644
index 000000000000..06936d13393f
--- /dev/null
+++ b/drivers/serial/s3c6400.c
@@ -0,0 +1,151 @@
1/* linux/drivers/serial/s3c6400.c
2 *
3 * Driver for Samsung S3C6400 and S3C6410 SoC onboard UARTs.
4 *
5 * Copyright 2008 Openmoko, Inc.
6 * Copyright 2008 Simtec Electronics
7 * Ben Dooks <ben@simtec.co.uk>
8 * http://armlinux.simtec.co.uk/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#include <linux/module.h>
16#include <linux/ioport.h>
17#include <linux/io.h>
18#include <linux/platform_device.h>
19#include <linux/init.h>
20#include <linux/serial_core.h>
21#include <linux/serial.h>
22
23#include <asm/irq.h>
24#include <mach/hardware.h>
25
26#include <plat/regs-serial.h>
27
28#include "samsung.h"
29
30static int s3c6400_serial_setsource(struct uart_port *port,
31 struct s3c24xx_uart_clksrc *clk)
32{
33 unsigned long ucon = rd_regl(port, S3C2410_UCON);
34
35 if (strcmp(clk->name, "uclk0") == 0) {
36 ucon &= ~S3C6400_UCON_CLKMASK;
37 ucon |= S3C6400_UCON_UCLK0;
38 } else if (strcmp(clk->name, "uclk1") == 0)
39 ucon |= S3C6400_UCON_UCLK1;
40 else if (strcmp(clk->name, "pclk") == 0) {
41 /* See notes about transitioning from UCLK to PCLK */
42 ucon &= ~S3C6400_UCON_UCLK0;
43 } else {
44 printk(KERN_ERR "unknown clock source %s\n", clk->name);
45 return -EINVAL;
46 }
47
48 wr_regl(port, S3C2410_UCON, ucon);
49 return 0;
50}
51
52
53static int s3c6400_serial_getsource(struct uart_port *port,
54 struct s3c24xx_uart_clksrc *clk)
55{
56 u32 ucon = rd_regl(port, S3C2410_UCON);
57
58 clk->divisor = 1;
59
60 switch (ucon & S3C6400_UCON_CLKMASK) {
61 case S3C6400_UCON_UCLK0:
62 clk->name = "uclk0";
63 break;
64
65 case S3C6400_UCON_UCLK1:
66 clk->name = "uclk1";
67 break;
68
69 case S3C6400_UCON_PCLK:
70 case S3C6400_UCON_PCLK2:
71 clk->name = "pclk";
72 break;
73 }
74
75 return 0;
76}
77
78static int s3c6400_serial_resetport(struct uart_port *port,
79 struct s3c2410_uartcfg *cfg)
80{
81 unsigned long ucon = rd_regl(port, S3C2410_UCON);
82
83 dbg("s3c6400_serial_resetport: port=%p (%08lx), cfg=%p\n",
84 port, port->mapbase, cfg);
85
86 /* ensure we don't change the clock settings... */
87
88 ucon &= S3C6400_UCON_CLKMASK;
89
90 wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
91 wr_regl(port, S3C2410_ULCON, cfg->ulcon);
92
93 /* reset both fifos */
94
95 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
96 wr_regl(port, S3C2410_UFCON, cfg->ufcon);
97
98 return 0;
99}
100
101static struct s3c24xx_uart_info s3c6400_uart_inf = {
102 .name = "Samsung S3C6400 UART",
103 .type = PORT_S3C6400,
104 .fifosize = 64,
105 .rx_fifomask = S3C2440_UFSTAT_RXMASK,
106 .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
107 .rx_fifofull = S3C2440_UFSTAT_RXFULL,
108 .tx_fifofull = S3C2440_UFSTAT_TXFULL,
109 .tx_fifomask = S3C2440_UFSTAT_TXMASK,
110 .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
111 .get_clksrc = s3c6400_serial_getsource,
112 .set_clksrc = s3c6400_serial_setsource,
113 .reset_port = s3c6400_serial_resetport,
114};
115
116/* device management */
117
118static int s3c6400_serial_probe(struct platform_device *dev)
119{
120 dbg("s3c6400_serial_probe: dev=%p\n", dev);
121 return s3c24xx_serial_probe(dev, &s3c6400_uart_inf);
122}
123
124static struct platform_driver s3c6400_serial_drv = {
125 .probe = s3c6400_serial_probe,
126 .remove = s3c24xx_serial_remove,
127 .driver = {
128 .name = "s3c6400-uart",
129 .owner = THIS_MODULE,
130 },
131};
132
133s3c24xx_console_init(&s3c6400_serial_drv, &s3c6400_uart_inf);
134
135static int __init s3c6400_serial_init(void)
136{
137 return s3c24xx_serial_init(&s3c6400_serial_drv, &s3c6400_uart_inf);
138}
139
140static void __exit s3c6400_serial_exit(void)
141{
142 platform_driver_unregister(&s3c6400_serial_drv);
143}
144
145module_init(s3c6400_serial_init);
146module_exit(s3c6400_serial_exit);
147
148MODULE_DESCRIPTION("Samsung S3C6400,S3C6410 SoC Serial port driver");
149MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
150MODULE_LICENSE("GPL v2");
151MODULE_ALIAS("platform:s3c6400-uart");
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
index 1e219d3d0352..41ac94872b8d 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/serial/samsung.c
@@ -42,13 +42,14 @@
42#include <linux/serial.h> 42#include <linux/serial.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/clk.h> 44#include <linux/clk.h>
45#include <linux/cpufreq.h>
45 46
46#include <asm/irq.h> 47#include <asm/irq.h>
47 48
48#include <mach/hardware.h> 49#include <mach/hardware.h>
50#include <mach/map.h>
49 51
50#include <plat/regs-serial.h> 52#include <plat/regs-serial.h>
51#include <mach/regs-gpio.h>
52 53
53#include "samsung.h" 54#include "samsung.h"
54 55
@@ -58,19 +59,6 @@
58#define S3C24XX_SERIAL_MAJOR 204 59#define S3C24XX_SERIAL_MAJOR 204
59#define S3C24XX_SERIAL_MINOR 64 60#define S3C24XX_SERIAL_MINOR 64
60 61
61/* we can support 3 uarts, but not always use them */
62
63#ifdef CONFIG_CPU_S3C2400
64#define NR_PORTS (2)
65#else
66#define NR_PORTS (3)
67#endif
68
69/* port irq numbers */
70
71#define TX_IRQ(port) ((port)->irq + 1)
72#define RX_IRQ(port) ((port)->irq)
73
74/* macros to change one thing to another */ 62/* macros to change one thing to another */
75 63
76#define tx_enabled(port) ((port)->unused[0]) 64#define tx_enabled(port) ((port)->unused[0])
@@ -136,8 +124,10 @@ static void s3c24xx_serial_rx_disable(struct uart_port *port)
136 124
137static void s3c24xx_serial_stop_tx(struct uart_port *port) 125static void s3c24xx_serial_stop_tx(struct uart_port *port)
138{ 126{
127 struct s3c24xx_uart_port *ourport = to_ourport(port);
128
139 if (tx_enabled(port)) { 129 if (tx_enabled(port)) {
140 disable_irq(TX_IRQ(port)); 130 disable_irq(ourport->tx_irq);
141 tx_enabled(port) = 0; 131 tx_enabled(port) = 0;
142 if (port->flags & UPF_CONS_FLOW) 132 if (port->flags & UPF_CONS_FLOW)
143 s3c24xx_serial_rx_enable(port); 133 s3c24xx_serial_rx_enable(port);
@@ -146,11 +136,13 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
146 136
147static void s3c24xx_serial_start_tx(struct uart_port *port) 137static void s3c24xx_serial_start_tx(struct uart_port *port)
148{ 138{
139 struct s3c24xx_uart_port *ourport = to_ourport(port);
140
149 if (!tx_enabled(port)) { 141 if (!tx_enabled(port)) {
150 if (port->flags & UPF_CONS_FLOW) 142 if (port->flags & UPF_CONS_FLOW)
151 s3c24xx_serial_rx_disable(port); 143 s3c24xx_serial_rx_disable(port);
152 144
153 enable_irq(TX_IRQ(port)); 145 enable_irq(ourport->tx_irq);
154 tx_enabled(port) = 1; 146 tx_enabled(port) = 1;
155 } 147 }
156} 148}
@@ -158,9 +150,11 @@ static void s3c24xx_serial_start_tx(struct uart_port *port)
158 150
159static void s3c24xx_serial_stop_rx(struct uart_port *port) 151static void s3c24xx_serial_stop_rx(struct uart_port *port)
160{ 152{
153 struct s3c24xx_uart_port *ourport = to_ourport(port);
154
161 if (rx_enabled(port)) { 155 if (rx_enabled(port)) {
162 dbg("s3c24xx_serial_stop_rx: port=%p\n", port); 156 dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
163 disable_irq(RX_IRQ(port)); 157 disable_irq(ourport->rx_irq);
164 rx_enabled(port) = 0; 158 rx_enabled(port) = 0;
165 } 159 }
166} 160}
@@ -384,13 +378,13 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
384 struct s3c24xx_uart_port *ourport = to_ourport(port); 378 struct s3c24xx_uart_port *ourport = to_ourport(port);
385 379
386 if (ourport->tx_claimed) { 380 if (ourport->tx_claimed) {
387 free_irq(TX_IRQ(port), ourport); 381 free_irq(ourport->tx_irq, ourport);
388 tx_enabled(port) = 0; 382 tx_enabled(port) = 0;
389 ourport->tx_claimed = 0; 383 ourport->tx_claimed = 0;
390 } 384 }
391 385
392 if (ourport->rx_claimed) { 386 if (ourport->rx_claimed) {
393 free_irq(RX_IRQ(port), ourport); 387 free_irq(ourport->rx_irq, ourport);
394 ourport->rx_claimed = 0; 388 ourport->rx_claimed = 0;
395 rx_enabled(port) = 0; 389 rx_enabled(port) = 0;
396 } 390 }
@@ -407,12 +401,11 @@ static int s3c24xx_serial_startup(struct uart_port *port)
407 401
408 rx_enabled(port) = 1; 402 rx_enabled(port) = 1;
409 403
410 ret = request_irq(RX_IRQ(port), 404 ret = request_irq(ourport->rx_irq, s3c24xx_serial_rx_chars, 0,
411 s3c24xx_serial_rx_chars, 0,
412 s3c24xx_serial_portname(port), ourport); 405 s3c24xx_serial_portname(port), ourport);
413 406
414 if (ret != 0) { 407 if (ret != 0) {
415 printk(KERN_ERR "cannot get irq %d\n", RX_IRQ(port)); 408 printk(KERN_ERR "cannot get irq %d\n", ourport->rx_irq);
416 return ret; 409 return ret;
417 } 410 }
418 411
@@ -422,12 +415,11 @@ static int s3c24xx_serial_startup(struct uart_port *port)
422 415
423 tx_enabled(port) = 1; 416 tx_enabled(port) = 1;
424 417
425 ret = request_irq(TX_IRQ(port), 418 ret = request_irq(ourport->tx_irq, s3c24xx_serial_tx_chars, 0,
426 s3c24xx_serial_tx_chars, 0,
427 s3c24xx_serial_portname(port), ourport); 419 s3c24xx_serial_portname(port), ourport);
428 420
429 if (ret) { 421 if (ret) {
430 printk(KERN_ERR "cannot get irq %d\n", TX_IRQ(port)); 422 printk(KERN_ERR "cannot get irq %d\n", ourport->tx_irq);
431 goto err; 423 goto err;
432 } 424 }
433 425
@@ -452,6 +444,8 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
452{ 444{
453 struct s3c24xx_uart_port *ourport = to_ourport(port); 445 struct s3c24xx_uart_port *ourport = to_ourport(port);
454 446
447 ourport->pm_level = level;
448
455 switch (level) { 449 switch (level) {
456 case 3: 450 case 3:
457 if (!IS_ERR(ourport->baudclk) && ourport->baudclk != NULL) 451 if (!IS_ERR(ourport->baudclk) && ourport->baudclk != NULL)
@@ -661,6 +655,7 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
661 655
662 ourport->clksrc = clksrc; 656 ourport->clksrc = clksrc;
663 ourport->baudclk = clk; 657 ourport->baudclk = clk;
658 ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
664 } 659 }
665 660
666 switch (termios->c_cflag & CSIZE) { 661 switch (termios->c_cflag & CSIZE) {
@@ -752,6 +747,8 @@ static const char *s3c24xx_serial_type(struct uart_port *port)
752 return "S3C2440"; 747 return "S3C2440";
753 case PORT_S3C2412: 748 case PORT_S3C2412:
754 return "S3C2412"; 749 return "S3C2412";
750 case PORT_S3C6400:
751 return "S3C6400/10";
755 default: 752 default:
756 return NULL; 753 return NULL;
757 } 754 }
@@ -827,14 +824,14 @@ static struct uart_ops s3c24xx_serial_ops = {
827static struct uart_driver s3c24xx_uart_drv = { 824static struct uart_driver s3c24xx_uart_drv = {
828 .owner = THIS_MODULE, 825 .owner = THIS_MODULE,
829 .dev_name = "s3c2410_serial", 826 .dev_name = "s3c2410_serial",
830 .nr = 3, 827 .nr = CONFIG_SERIAL_SAMSUNG_UARTS,
831 .cons = S3C24XX_SERIAL_CONSOLE, 828 .cons = S3C24XX_SERIAL_CONSOLE,
832 .driver_name = S3C24XX_SERIAL_NAME, 829 .driver_name = S3C24XX_SERIAL_NAME,
833 .major = S3C24XX_SERIAL_MAJOR, 830 .major = S3C24XX_SERIAL_MAJOR,
834 .minor = S3C24XX_SERIAL_MINOR, 831 .minor = S3C24XX_SERIAL_MINOR,
835}; 832};
836 833
837static struct s3c24xx_uart_port s3c24xx_serial_ports[NR_PORTS] = { 834static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
838 [0] = { 835 [0] = {
839 .port = { 836 .port = {
840 .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[0].port.lock), 837 .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[0].port.lock),
@@ -859,7 +856,7 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[NR_PORTS] = {
859 .line = 1, 856 .line = 1,
860 } 857 }
861 }, 858 },
862#if NR_PORTS > 2 859#if CONFIG_SERIAL_SAMSUNG_UARTS > 2
863 860
864 [2] = { 861 [2] = {
865 .port = { 862 .port = {
@@ -872,6 +869,20 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[NR_PORTS] = {
872 .flags = UPF_BOOT_AUTOCONF, 869 .flags = UPF_BOOT_AUTOCONF,
873 .line = 2, 870 .line = 2,
874 } 871 }
872 },
873#endif
874#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
875 [3] = {
876 .port = {
877 .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[3].port.lock),
878 .iotype = UPIO_MEM,
879 .irq = IRQ_S3CUART_RX3,
880 .uartclk = 0,
881 .fifosize = 16,
882 .ops = &s3c24xx_serial_ops,
883 .flags = UPF_BOOT_AUTOCONF,
884 .line = 3,
885 }
875 } 886 }
876#endif 887#endif
877}; 888};
@@ -890,6 +901,89 @@ static inline int s3c24xx_serial_resetport(struct uart_port *port,
890 return (info->reset_port)(port, cfg); 901 return (info->reset_port)(port, cfg);
891} 902}
892 903
904
905#ifdef CONFIG_CPU_FREQ
906
907static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb,
908 unsigned long val, void *data)
909{
910 struct s3c24xx_uart_port *port;
911 struct uart_port *uport;
912
913 port = container_of(nb, struct s3c24xx_uart_port, freq_transition);
914 uport = &port->port;
915
916 /* check to see if port is enabled */
917
918 if (port->pm_level != 0)
919 return 0;
920
921 /* try and work out if the baudrate is changing, we can detect
922 * a change in rate, but we do not have support for detecting
923 * a disturbance in the clock-rate over the change.
924 */
925
926 if (IS_ERR(port->clk))
927 goto exit;
928
929 if (port->baudclk_rate == clk_get_rate(port->clk))
930 goto exit;
931
932 if (val == CPUFREQ_PRECHANGE) {
933 /* we should really shut the port down whilst the
934 * frequency change is in progress. */
935
936 } else if (val == CPUFREQ_POSTCHANGE) {
937 struct ktermios *termios;
938 struct tty_struct *tty;
939
940 if (uport->info == NULL)
941 goto exit;
942
943 tty = uport->info->port.tty;
944
945 if (tty == NULL)
946 goto exit;
947
948 termios = tty->termios;
949
950 if (termios == NULL) {
951 printk(KERN_WARNING "%s: no termios?\n", __func__);
952 goto exit;
953 }
954
955 s3c24xx_serial_set_termios(uport, termios, NULL);
956 }
957
958 exit:
959 return 0;
960}
961
962static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
963{
964 port->freq_transition.notifier_call = s3c24xx_serial_cpufreq_transition;
965
966 return cpufreq_register_notifier(&port->freq_transition,
967 CPUFREQ_TRANSITION_NOTIFIER);
968}
969
970static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
971{
972 cpufreq_unregister_notifier(&port->freq_transition,
973 CPUFREQ_TRANSITION_NOTIFIER);
974}
975
976#else
977static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
978{
979 return 0;
980}
981
982static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
983{
984}
985#endif
986
893/* s3c24xx_serial_init_port 987/* s3c24xx_serial_init_port
894 * 988 *
895 * initialise a single serial port from the platform device given 989 * initialise a single serial port from the platform device given
@@ -914,8 +1008,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
914 if (port->mapbase != 0) 1008 if (port->mapbase != 0)
915 return 0; 1009 return 0;
916 1010
917 if (cfg->hwport > 3) 1011 if (cfg->hwport > CONFIG_SERIAL_SAMSUNG_UARTS) {
918 return -EINVAL; 1012 printk(KERN_ERR "%s: port %d bigger than %d\n", __func__,
1013 cfg->hwport, CONFIG_SERIAL_SAMSUNG_UARTS);
1014 return -ERANGE;
1015 }
919 1016
920 /* setup info for port */ 1017 /* setup info for port */
921 port->dev = &platdev->dev; 1018 port->dev = &platdev->dev;
@@ -943,18 +1040,26 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
943 1040
944 dbg("resource %p (%lx..%lx)\n", res, res->start, res->end); 1041 dbg("resource %p (%lx..%lx)\n", res, res->start, res->end);
945 1042
946 port->mapbase = res->start; 1043 port->mapbase = res->start;
947 port->membase = S3C24XX_VA_UART + (res->start - S3C24XX_PA_UART); 1044 port->membase = S3C_VA_UART + res->start - (S3C_PA_UART & 0xfff00000);
948 ret = platform_get_irq(platdev, 0); 1045 ret = platform_get_irq(platdev, 0);
949 if (ret < 0) 1046 if (ret < 0)
950 port->irq = 0; 1047 port->irq = 0;
951 else 1048 else {
952 port->irq = ret; 1049 port->irq = ret;
1050 ourport->rx_irq = ret;
1051 ourport->tx_irq = ret + 1;
1052 }
1053
1054 ret = platform_get_irq(platdev, 1);
1055 if (ret > 0)
1056 ourport->tx_irq = ret;
953 1057
954 ourport->clk = clk_get(&platdev->dev, "uart"); 1058 ourport->clk = clk_get(&platdev->dev, "uart");
955 1059
956 dbg("port: map=%08x, mem=%08x, irq=%d, clock=%ld\n", 1060 dbg("port: map=%08x, mem=%08x, irq=%d (%d,%d), clock=%ld\n",
957 port->mapbase, port->membase, port->irq, port->uartclk); 1061 port->mapbase, port->membase, port->irq,
1062 ourport->rx_irq, ourport->tx_irq, port->uartclk);
958 1063
959 /* reset the fifos (and setup the uart) */ 1064 /* reset the fifos (and setup the uart) */
960 s3c24xx_serial_resetport(port, cfg); 1065 s3c24xx_serial_resetport(port, cfg);
@@ -1002,6 +1107,10 @@ int s3c24xx_serial_probe(struct platform_device *dev,
1002 if (ret < 0) 1107 if (ret < 0)
1003 printk(KERN_ERR "%s: failed to add clksrc attr.\n", __func__); 1108 printk(KERN_ERR "%s: failed to add clksrc attr.\n", __func__);
1004 1109
1110 ret = s3c24xx_serial_cpufreq_register(ourport);
1111 if (ret < 0)
1112 dev_err(&dev->dev, "failed to add cpufreq notifier\n");
1113
1005 return 0; 1114 return 0;
1006 1115
1007 probe_err: 1116 probe_err:
@@ -1015,6 +1124,7 @@ int s3c24xx_serial_remove(struct platform_device *dev)
1015 struct uart_port *port = s3c24xx_dev_to_port(&dev->dev); 1124 struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
1016 1125
1017 if (port) { 1126 if (port) {
1127 s3c24xx_serial_cpufreq_deregister(to_ourport(port));
1018 device_remove_file(&dev->dev, &dev_attr_clock_source); 1128 device_remove_file(&dev->dev, &dev_attr_clock_source);
1019 uart_remove_one_port(&s3c24xx_uart_drv, port); 1129 uart_remove_one_port(&s3c24xx_uart_drv, port);
1020 } 1130 }
@@ -1219,7 +1329,7 @@ static int s3c24xx_serial_init_ports(struct s3c24xx_uart_info *info)
1219 1329
1220 platdev_ptr = s3c24xx_uart_devs; 1330 platdev_ptr = s3c24xx_uart_devs;
1221 1331
1222 for (i = 0; i < NR_PORTS; i++, ptr++, platdev_ptr++) { 1332 for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++, ptr++, platdev_ptr++) {
1223 s3c24xx_serial_init_port(ptr, info, *platdev_ptr); 1333 s3c24xx_serial_init_port(ptr, info, *platdev_ptr);
1224 } 1334 }
1225 1335
@@ -1240,7 +1350,7 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
1240 1350
1241 /* is this a valid port */ 1351 /* is this a valid port */
1242 1352
1243 if (co->index == -1 || co->index >= NR_PORTS) 1353 if (co->index == -1 || co->index >= CONFIG_SERIAL_SAMSUNG_UARTS)
1244 co->index = 0; 1354 co->index = 0;
1245 1355
1246 port = &s3c24xx_serial_ports[co->index].port; 1356 port = &s3c24xx_serial_ports[co->index].port;
diff --git a/drivers/serial/samsung.h b/drivers/serial/samsung.h
index 5c92ebbe7d9e..571d6b90d206 100644
--- a/drivers/serial/samsung.h
+++ b/drivers/serial/samsung.h
@@ -33,12 +33,21 @@ struct s3c24xx_uart_info {
33struct s3c24xx_uart_port { 33struct s3c24xx_uart_port {
34 unsigned char rx_claimed; 34 unsigned char rx_claimed;
35 unsigned char tx_claimed; 35 unsigned char tx_claimed;
36 unsigned int pm_level;
37 unsigned long baudclk_rate;
38
39 unsigned int rx_irq;
40 unsigned int tx_irq;
36 41
37 struct s3c24xx_uart_info *info; 42 struct s3c24xx_uart_info *info;
38 struct s3c24xx_uart_clksrc *clksrc; 43 struct s3c24xx_uart_clksrc *clksrc;
39 struct clk *clk; 44 struct clk *clk;
40 struct clk *baudclk; 45 struct clk *baudclk;
41 struct uart_port port; 46 struct uart_port port;
47
48#ifdef CONFIG_CPU_FREQ
49 struct notifier_block freq_transition;
50#endif
42}; 51};
43 52
44/* conversion functions */ 53/* conversion functions */
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/serial/serial_lh7a40x.c
index 61dc8b3daa26..a7bf024a8286 100644
--- a/drivers/serial/serial_lh7a40x.c
+++ b/drivers/serial/serial_lh7a40x.c
@@ -41,9 +41,10 @@
41#include <linux/tty_flip.h> 41#include <linux/tty_flip.h>
42#include <linux/serial_core.h> 42#include <linux/serial_core.h>
43#include <linux/serial.h> 43#include <linux/serial.h>
44#include <linux/io.h>
44 45
45#include <asm/io.h>
46#include <asm/irq.h> 46#include <asm/irq.h>
47#include <mach/hardware.h>
47 48
48#define DEV_MAJOR 204 49#define DEV_MAJOR 204
49#define DEV_MINOR 16 50#define DEV_MINOR 16
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index cf12f2d84be2..6104f461a3cd 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -32,8 +32,8 @@
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/irq.h> 33#include <asm/irq.h>
34#include <asm/delay.h> 34#include <asm/delay.h>
35#include <asm/dma.h>
36 35
36#include <mach/dma.h>
37#include <mach/hardware.h> 37#include <mach/hardware.h>
38#include <mach/pxa-regs.h> 38#include <mach/pxa-regs.h>
39#include <mach/regs-ssp.h> 39#include <mach/regs-ssp.h>
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index c252cbac00f1..256d18395a23 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -28,7 +28,7 @@
28#include <mach/hardware.h> 28#include <mach/hardware.h>
29 29
30#include <mach/regs-gpio.h> 30#include <mach/regs-gpio.h>
31#include <asm/plat-s3c24xx/regs-spi.h> 31#include <plat/regs-spi.h>
32#include <mach/spi.h> 32#include <mach/spi.h>
33 33
34struct s3c24xx_spi { 34struct s3c24xx_spi {
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 2dbc0db0b46c..8c5026be79d4 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2145,7 +2145,7 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
2145 if (irq < 0) 2145 if (irq < 0)
2146 return -ENODEV; 2146 return -ENODEV;
2147 2147
2148 dev->clk = clk_get(&pdev->dev, "UDCCLK"); 2148 dev->clk = clk_get(&pdev->dev, NULL);
2149 if (IS_ERR(dev->clk)) { 2149 if (IS_ERR(dev->clk)) {
2150 retval = PTR_ERR(dev->clk); 2150 retval = PTR_ERR(dev->clk);
2151 goto err_clk; 2151 goto err_clk;
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index caa37c95802c..944e4ff641df 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -2226,7 +2226,7 @@ static int __init pxa_udc_probe(struct platform_device *pdev)
2226 udc->dev = &pdev->dev; 2226 udc->dev = &pdev->dev;
2227 udc->mach = pdev->dev.platform_data; 2227 udc->mach = pdev->dev.platform_data;
2228 2228
2229 udc->clk = clk_get(&pdev->dev, "UDCCLK"); 2229 udc->clk = clk_get(&pdev->dev, NULL);
2230 if (IS_ERR(udc->clk)) { 2230 if (IS_ERR(udc->clk)) {
2231 retval = PTR_ERR(udc->clk); 2231 retval = PTR_ERR(udc->clk);
2232 goto err_clk; 2232 goto err_clk;
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 00ba06b44752..8d8d65165983 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -53,8 +53,8 @@
53#include <mach/hardware.h> 53#include <mach/hardware.h>
54#include <mach/regs-gpio.h> 54#include <mach/regs-gpio.h>
55 55
56#include <asm/plat-s3c24xx/regs-udc.h> 56#include <plat/regs-udc.h>
57#include <asm/plat-s3c24xx/udc.h> 57#include <plat/udc.h>
58 58
59 59
60#include "s3c2410_udc.h" 60#include "s3c2410_udc.h"
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 5416cf969005..9d487908012e 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -33,8 +33,9 @@
33/* 33/*
34 * Implement Orion USB controller specification guidelines 34 * Implement Orion USB controller specification guidelines
35 */ 35 */
36static void orion_usb_setup(struct usb_hcd *hcd) 36static void orion_usb_phy_v1_setup(struct usb_hcd *hcd)
37{ 37{
38 /* The below GLs are according to the Orion Errata document */
38 /* 39 /*
39 * Clear interrupt cause and mask 40 * Clear interrupt cause and mask
40 */ 41 */
@@ -258,9 +259,19 @@ static int __init ehci_orion_drv_probe(struct platform_device *pdev)
258 ehci_orion_conf_mbus_windows(hcd, pd->dram); 259 ehci_orion_conf_mbus_windows(hcd, pd->dram);
259 260
260 /* 261 /*
261 * setup Orion USB controller 262 * setup Orion USB controller.
262 */ 263 */
263 orion_usb_setup(hcd); 264 switch (pd->phy_version) {
265 case EHCI_PHY_NA: /* dont change USB phy settings */
266 break;
267 case EHCI_PHY_ORION:
268 orion_usb_phy_v1_setup(hcd);
269 break;
270 case EHCI_PHY_DD:
271 case EHCI_PHY_KW:
272 default:
273 printk(KERN_WARNING "Orion ehci -USB phy version isn't supported.\n");
274 }
264 275
265 err = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED); 276 err = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
266 if (err) 277 if (err)
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 91697bdb399f..4bbddb73abd9 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -18,6 +18,7 @@
18#include <linux/jiffies.h> 18#include <linux/jiffies.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/gpio.h>
21 22
22#include <mach/hardware.h> 23#include <mach/hardware.h>
23#include <asm/io.h> 24#include <asm/io.h>
@@ -25,7 +26,6 @@
25 26
26#include <mach/mux.h> 27#include <mach/mux.h>
27#include <mach/irqs.h> 28#include <mach/irqs.h>
28#include <mach/gpio.h>
29#include <mach/fpga.h> 29#include <mach/fpga.h>
30#include <mach/usb.h> 30#include <mach/usb.h>
31 31
@@ -254,8 +254,8 @@ static int ohci_omap_init(struct usb_hcd *hcd)
254 254
255 /* gpio9 for overcurrent detction */ 255 /* gpio9 for overcurrent detction */
256 omap_cfg_reg(W8_1610_GPIO9); 256 omap_cfg_reg(W8_1610_GPIO9);
257 omap_request_gpio(9); 257 gpio_request(9, "OHCI overcurrent");
258 omap_set_gpio_direction(9, 1 /* IN */); 258 gpio_direction_input(9);
259 259
260 /* for paranoia's sake: disable USB.PUEN */ 260 /* for paranoia's sake: disable USB.PUEN */
261 omap_cfg_reg(W4_USB_HIGHZ); 261 omap_cfg_reg(W4_USB_HIGHZ);
@@ -407,7 +407,7 @@ usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
407 put_device(ohci->transceiver->dev); 407 put_device(ohci->transceiver->dev);
408 } 408 }
409 if (machine_is_omap_osk()) 409 if (machine_is_omap_osk())
410 omap_free_gpio(9); 410 gpio_free(9);
411 iounmap(hcd->regs); 411 iounmap(hcd->regs);
412 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 412 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
413 usb_put_hcd(hcd); 413 usb_put_hcd(hcd);
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index e294d430733b..e44dc2cbca24 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -296,7 +296,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
296 return -ENXIO; 296 return -ENXIO;
297 } 297 }
298 298
299 usb_clk = clk_get(&pdev->dev, "USBCLK"); 299 usb_clk = clk_get(&pdev->dev, NULL);
300 if (IS_ERR(usb_clk)) 300 if (IS_ERR(usb_clk))
301 return PTR_ERR(usb_clk); 301 return PTR_ERR(usb_clk);
302 302
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d0c821992a99..6372f8b17b45 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -362,7 +362,7 @@ endchoice
362 362
363config FB_ACORN 363config FB_ACORN
364 bool "Acorn VIDC support" 364 bool "Acorn VIDC support"
365 depends on (FB = y) && ARM && (ARCH_ACORN || ARCH_CLPS7500) 365 depends on (FB = y) && ARM && ARCH_ACORN
366 select FB_CFB_FILLRECT 366 select FB_CFB_FILLRECT
367 select FB_CFB_COPYAREA 367 select FB_CFB_COPYAREA
368 select FB_CFB_IMAGEBLIT 368 select FB_CFB_IMAGEBLIT
@@ -1817,6 +1817,11 @@ config FB_PXA
1817 1817
1818 If unsure, say N. 1818 If unsure, say N.
1819 1819
1820config FB_PXA_OVERLAY
1821 bool "Support PXA27x/PXA3xx Overlay(s) as framebuffer"
1822 default n
1823 depends on FB_PXA && (PXA27x || PXA3xx)
1824
1820config FB_PXA_SMARTPANEL 1825config FB_PXA_SMARTPANEL
1821 bool "PXA Smartpanel LCD support" 1826 bool "PXA Smartpanel LCD support"
1822 default n 1827 default n
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index a7a1c891bfa2..2ac52fd8cc11 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -343,14 +343,14 @@ static int clcdfb_register(struct clcd_fb *fb)
343{ 343{
344 int ret; 344 int ret;
345 345
346 fb->clk = clk_get(&fb->dev->dev, "CLCDCLK"); 346 fb->clk = clk_get(&fb->dev->dev, NULL);
347 if (IS_ERR(fb->clk)) { 347 if (IS_ERR(fb->clk)) {
348 ret = PTR_ERR(fb->clk); 348 ret = PTR_ERR(fb->clk);
349 goto out; 349 goto out;
350 } 350 }
351 351
352 fb->fb.fix.mmio_start = fb->dev->res.start; 352 fb->fb.fix.mmio_start = fb->dev->res.start;
353 fb->fb.fix.mmio_len = SZ_4K; 353 fb->fb.fix.mmio_len = 4096;
354 354
355 fb->regs = ioremap(fb->fb.fix.mmio_start, fb->fb.fix.mmio_len); 355 fb->regs = ioremap(fb->fb.fix.mmio_start, fb->fb.fix.mmio_len);
356 if (!fb->regs) { 356 if (!fb->regs) {
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 448d209a0bf2..e6210725b9ab 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -112,6 +112,23 @@ static int vga_video_font_height;
112static int vga_scan_lines __read_mostly; 112static int vga_scan_lines __read_mostly;
113static unsigned int vga_rolled_over; 113static unsigned int vga_rolled_over;
114 114
115int vgacon_text_mode_force = 0;
116
117bool vgacon_text_force(void)
118{
119 return vgacon_text_mode_force ? true : false;
120}
121EXPORT_SYMBOL(vgacon_text_force);
122
123static int __init text_mode(char *str)
124{
125 vgacon_text_mode_force = 1;
126 return 1;
127}
128
129/* force text mode - used by kernel modesetting */
130__setup("nomodeset", text_mode);
131
115static int __init no_scroll(char *str) 132static int __init no_scroll(char *str)
116{ 133{
117 /* 134 /*
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 41d62632dcdb..39d5d643a50b 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1513,7 +1513,7 @@ static int cyberpro_pci_enable_mmio(struct cfb_info *cfb)
1513 1513
1514 iop = ioremap(0x3000000, 0x5000); 1514 iop = ioremap(0x3000000, 0x5000);
1515 if (iop == NULL) { 1515 if (iop == NULL) {
1516 prom_printf("iga5000: cannot map I/O\n"); 1516 printk(KERN_ERR "iga5000: cannot map I/O\n");
1517 return -ENOMEM; 1517 return -ENOMEM;
1518 } 1518 }
1519 1519
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index ccd986140c95..d58c68cd456e 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/drivers/video/imxfb.c
3 *
4 * Freescale i.MX Frame Buffer device driver 2 * Freescale i.MX Frame Buffer device driver
5 * 3 *
6 * Copyright (C) 2004 Sascha Hauer, Pengutronix 4 * Copyright (C) 2004 Sascha Hauer, Pengutronix
@@ -16,7 +14,6 @@
16 * linux-arm-kernel@lists.arm.linux.org.uk 14 * linux-arm-kernel@lists.arm.linux.org.uk
17 */ 15 */
18 16
19//#define DEBUG 1
20 17
21#include <linux/module.h> 18#include <linux/module.h>
22#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -32,9 +29,8 @@
32#include <linux/cpufreq.h> 29#include <linux/cpufreq.h>
33#include <linux/platform_device.h> 30#include <linux/platform_device.h>
34#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/io.h>
35 33
36#include <mach/hardware.h>
37#include <asm/io.h>
38#include <mach/imxfb.h> 34#include <mach/imxfb.h>
39 35
40/* 36/*
@@ -42,23 +38,150 @@
42 */ 38 */
43#define DEBUG_VAR 1 39#define DEBUG_VAR 1
44 40
45#include "imxfb.h" 41#define DRIVER_NAME "imx-fb"
42
43#define LCDC_SSA 0x00
44
45#define LCDC_SIZE 0x04
46#define SIZE_XMAX(x) ((((x) >> 4) & 0x3f) << 20)
47#define SIZE_YMAX(y) ((y) & 0x1ff)
48
49#define LCDC_VPW 0x08
50#define VPW_VPW(x) ((x) & 0x3ff)
51
52#define LCDC_CPOS 0x0C
53#define CPOS_CC1 (1<<31)
54#define CPOS_CC0 (1<<30)
55#define CPOS_OP (1<<28)
56#define CPOS_CXP(x) (((x) & 3ff) << 16)
57#define CPOS_CYP(y) ((y) & 0x1ff)
58
59#define LCDC_LCWHB 0x10
60#define LCWHB_BK_EN (1<<31)
61#define LCWHB_CW(w) (((w) & 0x1f) << 24)
62#define LCWHB_CH(h) (((h) & 0x1f) << 16)
63#define LCWHB_BD(x) ((x) & 0xff)
64
65#define LCDC_LCHCC 0x14
66#define LCHCC_CUR_COL_R(r) (((r) & 0x1f) << 11)
67#define LCHCC_CUR_COL_G(g) (((g) & 0x3f) << 5)
68#define LCHCC_CUR_COL_B(b) ((b) & 0x1f)
69
70#define LCDC_PCR 0x18
71
72#define LCDC_HCR 0x1C
73#define HCR_H_WIDTH(x) (((x) & 0x3f) << 26)
74#define HCR_H_WAIT_1(x) (((x) & 0xff) << 8)
75#define HCR_H_WAIT_2(x) ((x) & 0xff)
76
77#define LCDC_VCR 0x20
78#define VCR_V_WIDTH(x) (((x) & 0x3f) << 26)
79#define VCR_V_WAIT_1(x) (((x) & 0xff) << 8)
80#define VCR_V_WAIT_2(x) ((x) & 0xff)
81
82#define LCDC_POS 0x24
83#define POS_POS(x) ((x) & 1f)
84
85#define LCDC_LSCR1 0x28
86/* bit fields in imxfb.h */
87
88#define LCDC_PWMR 0x2C
89/* bit fields in imxfb.h */
90
91#define LCDC_DMACR 0x30
92/* bit fields in imxfb.h */
93
94#define LCDC_RMCR 0x34
95#define RMCR_LCDC_EN (1<<1)
96#define RMCR_SELF_REF (1<<0)
97
98#define LCDC_LCDICR 0x38
99#define LCDICR_INT_SYN (1<<2)
100#define LCDICR_INT_CON (1)
101
102#define LCDC_LCDISR 0x40
103#define LCDISR_UDR_ERR (1<<3)
104#define LCDISR_ERR_RES (1<<2)
105#define LCDISR_EOF (1<<1)
106#define LCDISR_BOF (1<<0)
107
108/*
109 * These are the bitfields for each
110 * display depth that we support.
111 */
112struct imxfb_rgb {
113 struct fb_bitfield red;
114 struct fb_bitfield green;
115 struct fb_bitfield blue;
116 struct fb_bitfield transp;
117};
118
119struct imxfb_info {
120 struct platform_device *pdev;
121 void __iomem *regs;
46 122
47static struct imxfb_rgb def_rgb_16 = { 123 u_int max_bpp;
48 .red = { .offset = 8, .length = 4, }, 124 u_int max_xres;
49 .green = { .offset = 4, .length = 4, }, 125 u_int max_yres;
50 .blue = { .offset = 0, .length = 4, }, 126
51 .transp = { .offset = 0, .length = 0, }, 127 /*
128 * These are the addresses we mapped
129 * the framebuffer memory region to.
130 */
131 dma_addr_t map_dma;
132 u_char *map_cpu;
133 u_int map_size;
134
135 u_char *screen_cpu;
136 dma_addr_t screen_dma;
137 u_int palette_size;
138
139 dma_addr_t dbar1;
140 dma_addr_t dbar2;
141
142 u_int pcr;
143 u_int pwmr;
144 u_int lscr1;
145 u_int dmacr;
146 u_int cmap_inverse:1,
147 cmap_static:1,
148 unused:30;
149
150 void (*lcd_power)(int);
151 void (*backlight_power)(int);
152};
153
154#define IMX_NAME "IMX"
155
156/*
157 * Minimum X and Y resolutions
158 */
159#define MIN_XRES 64
160#define MIN_YRES 64
161
162static struct imxfb_rgb def_rgb_16_tft = {
163 .red = {.offset = 11, .length = 5,},
164 .green = {.offset = 5, .length = 6,},
165 .blue = {.offset = 0, .length = 5,},
166 .transp = {.offset = 0, .length = 0,},
167};
168
169static struct imxfb_rgb def_rgb_16_stn = {
170 .red = {.offset = 8, .length = 4,},
171 .green = {.offset = 4, .length = 4,},
172 .blue = {.offset = 0, .length = 4,},
173 .transp = {.offset = 0, .length = 0,},
52}; 174};
53 175
54static struct imxfb_rgb def_rgb_8 = { 176static struct imxfb_rgb def_rgb_8 = {
55 .red = { .offset = 0, .length = 8, }, 177 .red = {.offset = 0, .length = 8,},
56 .green = { .offset = 0, .length = 8, }, 178 .green = {.offset = 0, .length = 8,},
57 .blue = { .offset = 0, .length = 8, }, 179 .blue = {.offset = 0, .length = 8,},
58 .transp = { .offset = 0, .length = 0, }, 180 .transp = {.offset = 0, .length = 0,},
59}; 181};
60 182
61static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *info); 183static int imxfb_activate_var(struct fb_var_screeninfo *var,
184 struct fb_info *info);
62 185
63static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf) 186static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf)
64{ 187{
@@ -67,10 +190,8 @@ static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf)
67 return chan << bf->offset; 190 return chan << bf->offset;
68} 191}
69 192
70#define LCDC_PALETTE(x) __REG2(IMX_LCDC_BASE+0x800, (x)<<2) 193static int imxfb_setpalettereg(u_int regno, u_int red, u_int green, u_int blue,
71static int 194 u_int trans, struct fb_info *info)
72imxfb_setpalettereg(u_int regno, u_int red, u_int green, u_int blue,
73 u_int trans, struct fb_info *info)
74{ 195{
75 struct imxfb_info *fbi = info->par; 196 struct imxfb_info *fbi = info->par;
76 u_int val, ret = 1; 197 u_int val, ret = 1;
@@ -81,14 +202,13 @@ imxfb_setpalettereg(u_int regno, u_int red, u_int green, u_int blue,
81 (CNVT_TOHW(green,4) << 4) | 202 (CNVT_TOHW(green,4) << 4) |
82 CNVT_TOHW(blue, 4); 203 CNVT_TOHW(blue, 4);
83 204
84 LCDC_PALETTE(regno) = val; 205 writel(val, fbi->regs + 0x800 + (regno << 2));
85 ret = 0; 206 ret = 0;
86 } 207 }
87 return ret; 208 return ret;
88} 209}
89 210
90static int 211static int imxfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
91imxfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
92 u_int trans, struct fb_info *info) 212 u_int trans, struct fb_info *info)
93{ 213{
94 struct imxfb_info *fbi = info->par; 214 struct imxfb_info *fbi = info->par;
@@ -148,11 +268,10 @@ imxfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
148 * yres, xres_virtual, yres_virtual, xoffset, yoffset, grayscale, 268 * yres, xres_virtual, yres_virtual, xoffset, yoffset, grayscale,
149 * bitfields, horizontal timing, vertical timing. 269 * bitfields, horizontal timing, vertical timing.
150 */ 270 */
151static int 271static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
152imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
153{ 272{
154 struct imxfb_info *fbi = info->par; 273 struct imxfb_info *fbi = info->par;
155 int rgbidx; 274 struct imxfb_rgb *rgb;
156 275
157 if (var->xres < MIN_XRES) 276 if (var->xres < MIN_XRES)
158 var->xres = MIN_XRES; 277 var->xres = MIN_XRES;
@@ -168,23 +287,25 @@ imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
168 pr_debug("var->bits_per_pixel=%d\n", var->bits_per_pixel); 287 pr_debug("var->bits_per_pixel=%d\n", var->bits_per_pixel);
169 switch (var->bits_per_pixel) { 288 switch (var->bits_per_pixel) {
170 case 16: 289 case 16:
171 rgbidx = RGB_16; 290 default:
291 if (readl(fbi->regs + LCDC_PCR) & PCR_TFT)
292 rgb = &def_rgb_16_tft;
293 else
294 rgb = &def_rgb_16_stn;
172 break; 295 break;
173 case 8: 296 case 8:
174 rgbidx = RGB_8; 297 rgb = &def_rgb_8;
175 break; 298 break;
176 default:
177 rgbidx = RGB_16;
178 } 299 }
179 300
180 /* 301 /*
181 * Copy the RGB parameters for this display 302 * Copy the RGB parameters for this display
182 * from the machine specific parameters. 303 * from the machine specific parameters.
183 */ 304 */
184 var->red = fbi->rgb[rgbidx]->red; 305 var->red = rgb->red;
185 var->green = fbi->rgb[rgbidx]->green; 306 var->green = rgb->green;
186 var->blue = fbi->rgb[rgbidx]->blue; 307 var->blue = rgb->blue;
187 var->transp = fbi->rgb[rgbidx]->transp; 308 var->transp = rgb->transp;
188 309
189 pr_debug("RGBT length = %d:%d:%d:%d\n", 310 pr_debug("RGBT length = %d:%d:%d:%d\n",
190 var->red.length, var->green.length, var->blue.length, 311 var->red.length, var->green.length, var->blue.length,
@@ -221,8 +342,7 @@ static int imxfb_set_par(struct fb_info *info)
221 info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR; 342 info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
222 } 343 }
223 344
224 info->fix.line_length = var->xres_virtual * 345 info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8;
225 var->bits_per_pixel / 8;
226 fbi->palette_size = var->bits_per_pixel == 8 ? 256 : 16; 346 fbi->palette_size = var->bits_per_pixel == 8 ? 256 : 16;
227 347
228 imxfb_activate_var(var, info); 348 imxfb_activate_var(var, info);
@@ -235,22 +355,27 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
235 pr_debug("Enabling LCD controller\n"); 355 pr_debug("Enabling LCD controller\n");
236 356
237 /* initialize LCDC */ 357 /* initialize LCDC */
238 LCDC_RMCR &= ~RMCR_LCDC_EN; /* just to be safe... */ 358 writel(readl(fbi->regs + LCDC_RMCR) & ~RMCR_LCDC_EN,
359 fbi->regs + LCDC_RMCR); /* just to be safe... */
360
361 writel(fbi->screen_dma, fbi->regs + LCDC_SSA);
239 362
240 LCDC_SSA = fbi->screen_dma;
241 /* physical screen start address */ 363 /* physical screen start address */
242 LCDC_VPW = VPW_VPW(fbi->max_xres * fbi->max_bpp / 8 / 4); 364 writel(VPW_VPW(fbi->max_xres * fbi->max_bpp / 8 / 4),
365 fbi->regs + LCDC_VPW);
243 366
244 LCDC_POS = 0x00000000; /* panning offset 0 (0 pixel offset) */ 367 /* panning offset 0 (0 pixel offset) */
368 writel(0x00000000, fbi->regs + LCDC_POS);
245 369
246 /* disable hardware cursor */ 370 /* disable hardware cursor */
247 LCDC_CPOS &= ~(CPOS_CC0 | CPOS_CC1); 371 writel(readl(fbi->regs + LCDC_CPOS) & ~(CPOS_CC0 | CPOS_CC1),
372 fbi->regs + LCDC_CPOS);
248 373
249 LCDC_RMCR = RMCR_LCDC_EN; 374 writel(RMCR_LCDC_EN, fbi->regs + LCDC_RMCR);
250 375
251 if(fbi->backlight_power) 376 if (fbi->backlight_power)
252 fbi->backlight_power(1); 377 fbi->backlight_power(1);
253 if(fbi->lcd_power) 378 if (fbi->lcd_power)
254 fbi->lcd_power(1); 379 fbi->lcd_power(1);
255} 380}
256 381
@@ -258,12 +383,12 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
258{ 383{
259 pr_debug("Disabling LCD controller\n"); 384 pr_debug("Disabling LCD controller\n");
260 385
261 if(fbi->backlight_power) 386 if (fbi->backlight_power)
262 fbi->backlight_power(0); 387 fbi->backlight_power(0);
263 if(fbi->lcd_power) 388 if (fbi->lcd_power)
264 fbi->lcd_power(0); 389 fbi->lcd_power(0);
265 390
266 LCDC_RMCR = 0; 391 writel(0, fbi->regs + LCDC_RMCR);
267} 392}
268 393
269static int imxfb_blank(int blank, struct fb_info *info) 394static int imxfb_blank(int blank, struct fb_info *info)
@@ -340,74 +465,26 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
340 info->fix.id, var->lower_margin); 465 info->fix.id, var->lower_margin);
341#endif 466#endif
342 467
343 LCDC_HCR = HCR_H_WIDTH(var->hsync_len) | 468 writel(HCR_H_WIDTH(var->hsync_len) |
344 HCR_H_WAIT_1(var->left_margin) | 469 HCR_H_WAIT_1(var->right_margin) |
345 HCR_H_WAIT_2(var->right_margin); 470 HCR_H_WAIT_2(var->left_margin),
471 fbi->regs + LCDC_HCR);
346 472
347 LCDC_VCR = VCR_V_WIDTH(var->vsync_len) | 473 writel(VCR_V_WIDTH(var->vsync_len) |
348 VCR_V_WAIT_1(var->upper_margin) | 474 VCR_V_WAIT_1(var->lower_margin) |
349 VCR_V_WAIT_2(var->lower_margin); 475 VCR_V_WAIT_2(var->upper_margin),
476 fbi->regs + LCDC_VCR);
350 477
351 LCDC_SIZE = SIZE_XMAX(var->xres) | SIZE_YMAX(var->yres); 478 writel(SIZE_XMAX(var->xres) | SIZE_YMAX(var->yres),
352 LCDC_PCR = fbi->pcr; 479 fbi->regs + LCDC_SIZE);
353 LCDC_PWMR = fbi->pwmr; 480 writel(fbi->pcr, fbi->regs + LCDC_PCR);
354 LCDC_LSCR1 = fbi->lscr1; 481 writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
355 LCDC_DMACR = fbi->dmacr; 482 writel(fbi->lscr1, fbi->regs + LCDC_LSCR1);
483 writel(fbi->dmacr, fbi->regs + LCDC_DMACR);
356 484
357 return 0; 485 return 0;
358} 486}
359 487
360static void imxfb_setup_gpio(struct imxfb_info *fbi)
361{
362 int width;
363
364 LCDC_RMCR &= ~(RMCR_LCDC_EN | RMCR_SELF_REF);
365
366 if( fbi->pcr & PCR_TFT )
367 width = 16;
368 else
369 width = 1 << ((fbi->pcr >> 28) & 0x3);
370
371 switch(width) {
372 case 16:
373 imx_gpio_mode(PD30_PF_LD15);
374 imx_gpio_mode(PD29_PF_LD14);
375 imx_gpio_mode(PD28_PF_LD13);
376 imx_gpio_mode(PD27_PF_LD12);
377 imx_gpio_mode(PD26_PF_LD11);
378 imx_gpio_mode(PD25_PF_LD10);
379 imx_gpio_mode(PD24_PF_LD9);
380 imx_gpio_mode(PD23_PF_LD8);
381 case 8:
382 imx_gpio_mode(PD22_PF_LD7);
383 imx_gpio_mode(PD21_PF_LD6);
384 imx_gpio_mode(PD20_PF_LD5);
385 imx_gpio_mode(PD19_PF_LD4);
386 case 4:
387 imx_gpio_mode(PD18_PF_LD3);
388 imx_gpio_mode(PD17_PF_LD2);
389 case 2:
390 imx_gpio_mode(PD16_PF_LD1);
391 case 1:
392 imx_gpio_mode(PD15_PF_LD0);
393 }
394
395 /* initialize GPIOs */
396 imx_gpio_mode(PD6_PF_LSCLK);
397 imx_gpio_mode(PD11_PF_CONTRAST);
398 imx_gpio_mode(PD14_PF_FLM_VSYNC);
399 imx_gpio_mode(PD13_PF_LP_HSYNC);
400 imx_gpio_mode(PD12_PF_ACD_OE);
401
402 /* These are only needed for Sharp HR TFT displays */
403 if (fbi->pcr & PCR_SHARP) {
404 imx_gpio_mode(PD7_PF_REV);
405 imx_gpio_mode(PD8_PF_CLS);
406 imx_gpio_mode(PD9_PF_PS);
407 imx_gpio_mode(PD10_PF_SPL_SPR);
408 }
409}
410
411#ifdef CONFIG_PM 488#ifdef CONFIG_PM
412/* 489/*
413 * Power management hooks. Note that we won't be called from IRQ context, 490 * Power management hooks. Note that we won't be called from IRQ context,
@@ -416,7 +493,8 @@ static void imxfb_setup_gpio(struct imxfb_info *fbi)
416static int imxfb_suspend(struct platform_device *dev, pm_message_t state) 493static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
417{ 494{
418 struct imxfb_info *fbi = platform_get_drvdata(dev); 495 struct imxfb_info *fbi = platform_get_drvdata(dev);
419 pr_debug("%s\n",__func__); 496
497 pr_debug("%s\n", __func__);
420 498
421 imxfb_disable_controller(fbi); 499 imxfb_disable_controller(fbi);
422 return 0; 500 return 0;
@@ -425,7 +503,8 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
425static int imxfb_resume(struct platform_device *dev) 503static int imxfb_resume(struct platform_device *dev)
426{ 504{
427 struct imxfb_info *fbi = platform_get_drvdata(dev); 505 struct imxfb_info *fbi = platform_get_drvdata(dev);
428 pr_debug("%s\n",__func__); 506
507 pr_debug("%s\n", __func__);
429 508
430 imxfb_enable_controller(fbi); 509 imxfb_enable_controller(fbi);
431 return 0; 510 return 0;
@@ -435,149 +514,136 @@ static int imxfb_resume(struct platform_device *dev)
435#define imxfb_resume NULL 514#define imxfb_resume NULL
436#endif 515#endif
437 516
438static int __init imxfb_init_fbinfo(struct device *dev) 517static int __init imxfb_init_fbinfo(struct platform_device *pdev)
439{ 518{
440 struct imxfb_mach_info *inf = dev->platform_data; 519 struct imx_fb_platform_data *pdata = pdev->dev.platform_data;
441 struct fb_info *info = dev_get_drvdata(dev); 520 struct fb_info *info = dev_get_drvdata(&pdev->dev);
442 struct imxfb_info *fbi = info->par; 521 struct imxfb_info *fbi = info->par;
443 522
444 pr_debug("%s\n",__func__); 523 pr_debug("%s\n",__func__);
445 524
446 info->pseudo_palette = kmalloc( sizeof(u32) * 16, GFP_KERNEL); 525 info->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
447 if (!info->pseudo_palette) 526 if (!info->pseudo_palette)
448 return -ENOMEM; 527 return -ENOMEM;
449 528
450 memset(fbi, 0, sizeof(struct imxfb_info)); 529 memset(fbi, 0, sizeof(struct imxfb_info));
451 fbi->dev = dev;
452 530
453 strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id)); 531 strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
454 532
455 info->fix.type = FB_TYPE_PACKED_PIXELS; 533 info->fix.type = FB_TYPE_PACKED_PIXELS;
456 info->fix.type_aux = 0; 534 info->fix.type_aux = 0;
457 info->fix.xpanstep = 0; 535 info->fix.xpanstep = 0;
458 info->fix.ypanstep = 0; 536 info->fix.ypanstep = 0;
459 info->fix.ywrapstep = 0; 537 info->fix.ywrapstep = 0;
460 info->fix.accel = FB_ACCEL_NONE; 538 info->fix.accel = FB_ACCEL_NONE;
461 539
462 info->var.nonstd = 0; 540 info->var.nonstd = 0;
463 info->var.activate = FB_ACTIVATE_NOW; 541 info->var.activate = FB_ACTIVATE_NOW;
464 info->var.height = -1; 542 info->var.height = -1;
465 info->var.width = -1; 543 info->var.width = -1;
466 info->var.accel_flags = 0; 544 info->var.accel_flags = 0;
467 info->var.vmode = FB_VMODE_NONINTERLACED; 545 info->var.vmode = FB_VMODE_NONINTERLACED;
468 546
469 info->fbops = &imxfb_ops; 547 info->fbops = &imxfb_ops;
470 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST; 548 info->flags = FBINFO_FLAG_DEFAULT |
471 549 FBINFO_READS_FAST;
472 fbi->rgb[RGB_16] = &def_rgb_16; 550
473 fbi->rgb[RGB_8] = &def_rgb_8; 551 fbi->max_xres = pdata->xres;
474 552 info->var.xres = pdata->xres;
475 fbi->max_xres = inf->xres; 553 info->var.xres_virtual = pdata->xres;
476 info->var.xres = inf->xres; 554 fbi->max_yres = pdata->yres;
477 info->var.xres_virtual = inf->xres; 555 info->var.yres = pdata->yres;
478 fbi->max_yres = inf->yres; 556 info->var.yres_virtual = pdata->yres;
479 info->var.yres = inf->yres; 557 fbi->max_bpp = pdata->bpp;
480 info->var.yres_virtual = inf->yres; 558 info->var.bits_per_pixel = pdata->bpp;
481 fbi->max_bpp = inf->bpp; 559 info->var.nonstd = pdata->nonstd;
482 info->var.bits_per_pixel = inf->bpp; 560 info->var.pixclock = pdata->pixclock;
483 info->var.nonstd = inf->nonstd; 561 info->var.hsync_len = pdata->hsync_len;
484 info->var.pixclock = inf->pixclock; 562 info->var.left_margin = pdata->left_margin;
485 info->var.hsync_len = inf->hsync_len; 563 info->var.right_margin = pdata->right_margin;
486 info->var.left_margin = inf->left_margin; 564 info->var.vsync_len = pdata->vsync_len;
487 info->var.right_margin = inf->right_margin; 565 info->var.upper_margin = pdata->upper_margin;
488 info->var.vsync_len = inf->vsync_len; 566 info->var.lower_margin = pdata->lower_margin;
489 info->var.upper_margin = inf->upper_margin; 567 info->var.sync = pdata->sync;
490 info->var.lower_margin = inf->lower_margin; 568 info->var.grayscale = pdata->cmap_greyscale;
491 info->var.sync = inf->sync; 569 fbi->cmap_inverse = pdata->cmap_inverse;
492 info->var.grayscale = inf->cmap_greyscale; 570 fbi->cmap_static = pdata->cmap_static;
493 fbi->cmap_inverse = inf->cmap_inverse; 571 fbi->pcr = pdata->pcr;
494 fbi->cmap_static = inf->cmap_static; 572 fbi->lscr1 = pdata->lscr1;
495 fbi->pcr = inf->pcr; 573 fbi->dmacr = pdata->dmacr;
496 fbi->lscr1 = inf->lscr1; 574 fbi->pwmr = pdata->pwmr;
497 fbi->dmacr = inf->dmacr; 575 fbi->lcd_power = pdata->lcd_power;
498 fbi->pwmr = inf->pwmr; 576 fbi->backlight_power = pdata->backlight_power;
499 fbi->lcd_power = inf->lcd_power;
500 fbi->backlight_power = inf->backlight_power;
501 info->fix.smem_len = fbi->max_xres * fbi->max_yres * 577 info->fix.smem_len = fbi->max_xres * fbi->max_yres *
502 fbi->max_bpp / 8; 578 fbi->max_bpp / 8;
503 579
504 return 0; 580 return 0;
505} 581}
506 582
507/*
508 * Allocates the DRAM memory for the frame buffer. This buffer is
509 * remapped into a non-cached, non-buffered, memory region to
510 * allow pixel writes to occur without flushing the cache.
511 * Once this area is remapped, all virtual memory access to the
512 * video memory should occur at the new region.
513 */
514static int __init imxfb_map_video_memory(struct fb_info *info)
515{
516 struct imxfb_info *fbi = info->par;
517
518 fbi->map_size = PAGE_ALIGN(info->fix.smem_len);
519 fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size,
520 &fbi->map_dma,GFP_KERNEL);
521
522 if (fbi->map_cpu) {
523 info->screen_base = fbi->map_cpu;
524 fbi->screen_cpu = fbi->map_cpu;
525 fbi->screen_dma = fbi->map_dma;
526 info->fix.smem_start = fbi->screen_dma;
527 }
528
529 return fbi->map_cpu ? 0 : -ENOMEM;
530}
531
532static int __init imxfb_probe(struct platform_device *pdev) 583static int __init imxfb_probe(struct platform_device *pdev)
533{ 584{
534 struct imxfb_info *fbi; 585 struct imxfb_info *fbi;
535 struct fb_info *info; 586 struct fb_info *info;
536 struct imxfb_mach_info *inf; 587 struct imx_fb_platform_data *pdata;
537 struct resource *res; 588 struct resource *res;
538 int ret; 589 int ret;
539 590
540 printk("i.MX Framebuffer driver\n"); 591 printk("i.MX Framebuffer driver\n");
541 592
542 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 593 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
543 if(!res) 594 if (!res)
544 return -ENODEV; 595 return -ENODEV;
545 596
546 inf = pdev->dev.platform_data; 597 pdata = pdev->dev.platform_data;
547 if(!inf) { 598 if (!pdata) {
548 dev_err(&pdev->dev,"No platform_data available\n"); 599 dev_err(&pdev->dev,"No platform_data available\n");
549 return -ENOMEM; 600 return -ENOMEM;
550 } 601 }
551 602
552 info = framebuffer_alloc(sizeof(struct imxfb_info), &pdev->dev); 603 info = framebuffer_alloc(sizeof(struct imxfb_info), &pdev->dev);
553 if(!info) 604 if (!info)
554 return -ENOMEM; 605 return -ENOMEM;
555 606
556 fbi = info->par; 607 fbi = info->par;
557 608
558 platform_set_drvdata(pdev, info); 609 platform_set_drvdata(pdev, info);
559 610
560 ret = imxfb_init_fbinfo(&pdev->dev); 611 ret = imxfb_init_fbinfo(pdev);
561 if( ret < 0 ) 612 if (ret < 0)
562 goto failed_init; 613 goto failed_init;
563 614
564 res = request_mem_region(res->start, res->end - res->start + 1, "IMXFB"); 615 res = request_mem_region(res->start, resource_size(res),
616 DRIVER_NAME);
565 if (!res) { 617 if (!res) {
566 ret = -EBUSY; 618 ret = -EBUSY;
567 goto failed_regs; 619 goto failed_req;
620 }
621
622 fbi->regs = ioremap(res->start, resource_size(res));
623 if (fbi->regs == NULL) {
624 printk(KERN_ERR"Cannot map frame buffer registers\n");
625 goto failed_ioremap;
568 } 626 }
569 627
570 if (!inf->fixed_screen_cpu) { 628 if (!pdata->fixed_screen_cpu) {
571 ret = imxfb_map_video_memory(info); 629 fbi->map_size = PAGE_ALIGN(info->fix.smem_len);
572 if (ret) { 630 fbi->map_cpu = dma_alloc_writecombine(&pdev->dev,
631 fbi->map_size, &fbi->map_dma, GFP_KERNEL);
632
633 if (!fbi->map_cpu) {
573 dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret); 634 dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
574 ret = -ENOMEM; 635 ret = -ENOMEM;
575 goto failed_map; 636 goto failed_map;
576 } 637 }
638
639 info->screen_base = fbi->map_cpu;
640 fbi->screen_cpu = fbi->map_cpu;
641 fbi->screen_dma = fbi->map_dma;
642 info->fix.smem_start = fbi->screen_dma;
577 } else { 643 } else {
578 /* Fixed framebuffer mapping enables location of the screen in eSRAM */ 644 /* Fixed framebuffer mapping enables location of the screen in eSRAM */
579 fbi->map_cpu = inf->fixed_screen_cpu; 645 fbi->map_cpu = pdata->fixed_screen_cpu;
580 fbi->map_dma = inf->fixed_screen_dma; 646 fbi->map_dma = pdata->fixed_screen_dma;
581 info->screen_base = fbi->map_cpu; 647 info->screen_base = fbi->map_cpu;
582 fbi->screen_cpu = fbi->map_cpu; 648 fbi->screen_cpu = fbi->map_cpu;
583 fbi->screen_dma = fbi->map_dma; 649 fbi->screen_dma = fbi->map_dma;
@@ -590,12 +656,10 @@ static int __init imxfb_probe(struct platform_device *pdev)
590 */ 656 */
591 imxfb_check_var(&info->var, info); 657 imxfb_check_var(&info->var, info);
592 658
593 ret = fb_alloc_cmap(&info->cmap, 1<<info->var.bits_per_pixel, 0); 659 ret = fb_alloc_cmap(&info->cmap, 1 << info->var.bits_per_pixel, 0);
594 if (ret < 0) 660 if (ret < 0)
595 goto failed_cmap; 661 goto failed_cmap;
596 662
597 imxfb_setup_gpio(fbi);
598
599 imxfb_set_par(info); 663 imxfb_set_par(info);
600 ret = register_framebuffer(info); 664 ret = register_framebuffer(info);
601 if (ret < 0) { 665 if (ret < 0) {
@@ -610,20 +674,22 @@ static int __init imxfb_probe(struct platform_device *pdev)
610failed_register: 674failed_register:
611 fb_dealloc_cmap(&info->cmap); 675 fb_dealloc_cmap(&info->cmap);
612failed_cmap: 676failed_cmap:
613 if (!inf->fixed_screen_cpu) 677 if (!pdata->fixed_screen_cpu)
614 dma_free_writecombine(&pdev->dev,fbi->map_size,fbi->map_cpu, 678 dma_free_writecombine(&pdev->dev,fbi->map_size,fbi->map_cpu,
615 fbi->map_dma); 679 fbi->map_dma);
616failed_map: 680failed_map:
617 kfree(info->pseudo_palette); 681 iounmap(fbi->regs);
618failed_regs: 682failed_ioremap:
619 release_mem_region(res->start, res->end - res->start); 683 release_mem_region(res->start, res->end - res->start);
684failed_req:
685 kfree(info->pseudo_palette);
620failed_init: 686failed_init:
621 platform_set_drvdata(pdev, NULL); 687 platform_set_drvdata(pdev, NULL);
622 framebuffer_release(info); 688 framebuffer_release(info);
623 return ret; 689 return ret;
624} 690}
625 691
626static int imxfb_remove(struct platform_device *pdev) 692static int __devexit imxfb_remove(struct platform_device *pdev)
627{ 693{
628 struct fb_info *info = platform_get_drvdata(pdev); 694 struct fb_info *info = platform_get_drvdata(pdev);
629 struct imxfb_info *fbi = info->par; 695 struct imxfb_info *fbi = info->par;
@@ -639,6 +705,7 @@ static int imxfb_remove(struct platform_device *pdev)
639 kfree(info->pseudo_palette); 705 kfree(info->pseudo_palette);
640 framebuffer_release(info); 706 framebuffer_release(info);
641 707
708 iounmap(fbi->regs);
642 release_mem_region(res->start, res->end - res->start + 1); 709 release_mem_region(res->start, res->end - res->start + 1);
643 platform_set_drvdata(pdev, NULL); 710 platform_set_drvdata(pdev, NULL);
644 711
@@ -653,19 +720,18 @@ void imxfb_shutdown(struct platform_device * dev)
653} 720}
654 721
655static struct platform_driver imxfb_driver = { 722static struct platform_driver imxfb_driver = {
656 .probe = imxfb_probe,
657 .suspend = imxfb_suspend, 723 .suspend = imxfb_suspend,
658 .resume = imxfb_resume, 724 .resume = imxfb_resume,
659 .remove = imxfb_remove, 725 .remove = __devexit_p(imxfb_remove),
660 .shutdown = imxfb_shutdown, 726 .shutdown = imxfb_shutdown,
661 .driver = { 727 .driver = {
662 .name = "imx-fb", 728 .name = DRIVER_NAME,
663 }, 729 },
664}; 730};
665 731
666int __init imxfb_init(void) 732int __init imxfb_init(void)
667{ 733{
668 return platform_driver_register(&imxfb_driver); 734 return platform_driver_probe(&imxfb_driver, imxfb_probe);
669} 735}
670 736
671static void __exit imxfb_cleanup(void) 737static void __exit imxfb_cleanup(void)
diff --git a/drivers/video/imxfb.h b/drivers/video/imxfb.h
deleted file mode 100644
index e837a8b48eb8..000000000000
--- a/drivers/video/imxfb.h
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * linux/drivers/video/imxfb.h
3 *
4 * Freescale i.MX Frame Buffer device driver
5 *
6 * Copyright (C) 2004 S.Hauer, Pengutronix
7 *
8 * Copyright (C) 1999 Eric A. Thomas
9 * Based on acornfb.c Copyright (C) Russell King.
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file COPYING in the main directory of this archive
13 * for more details.
14 */
15
16/*
17 * These are the bitfields for each
18 * display depth that we support.
19 */
20struct imxfb_rgb {
21 struct fb_bitfield red;
22 struct fb_bitfield green;
23 struct fb_bitfield blue;
24 struct fb_bitfield transp;
25};
26
27#define RGB_16 (0)
28#define RGB_8 (1)
29#define NR_RGB 2
30
31struct imxfb_info {
32 struct device *dev;
33 struct imxfb_rgb *rgb[NR_RGB];
34
35 u_int max_bpp;
36 u_int max_xres;
37 u_int max_yres;
38
39 /*
40 * These are the addresses we mapped
41 * the framebuffer memory region to.
42 */
43 dma_addr_t map_dma;
44 u_char * map_cpu;
45 u_int map_size;
46
47 u_char * screen_cpu;
48 dma_addr_t screen_dma;
49 u_int palette_size;
50
51 dma_addr_t dbar1;
52 dma_addr_t dbar2;
53
54 u_int pcr;
55 u_int pwmr;
56 u_int lscr1;
57 u_int dmacr;
58 u_int cmap_inverse:1,
59 cmap_static:1,
60 unused:30;
61
62 void (*lcd_power)(int);
63 void (*backlight_power)(int);
64};
65
66#define IMX_NAME "IMX"
67
68/*
69 * Minimum X and Y resolutions
70 */
71#define MIN_XRES 64
72#define MIN_YRES 64
73
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index cc59c52e1103..48ff701d3a72 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -20,6 +20,16 @@
20 * 20 *
21 * linux-arm-kernel@lists.arm.linux.org.uk 21 * linux-arm-kernel@lists.arm.linux.org.uk
22 * 22 *
23 * Add support for overlay1 and overlay2 based on pxafb_overlay.c:
24 *
25 * Copyright (C) 2004, Intel Corporation
26 *
27 * 2003/08/27: <yu.tang@intel.com>
28 * 2004/03/10: <stanley.cai@intel.com>
29 * 2004/10/28: <yan.yin@intel.com>
30 *
31 * Copyright (C) 2006-2008 Marvell International Ltd.
32 * All Rights Reserved
23 */ 33 */
24 34
25#include <linux/module.h> 35#include <linux/module.h>
@@ -50,7 +60,6 @@
50#include <asm/irq.h> 60#include <asm/irq.h>
51#include <asm/div64.h> 61#include <asm/div64.h>
52#include <mach/pxa-regs.h> 62#include <mach/pxa-regs.h>
53#include <mach/pxa2xx-gpio.h>
54#include <mach/bitfield.h> 63#include <mach/bitfield.h>
55#include <mach/pxafb.h> 64#include <mach/pxafb.h>
56 65
@@ -67,14 +76,16 @@
67 LCCR0_SFM | LCCR0_LDM | LCCR0_ENB) 76 LCCR0_SFM | LCCR0_LDM | LCCR0_ENB)
68 77
69#define LCCR3_INVALID_CONFIG_MASK (LCCR3_HSP | LCCR3_VSP |\ 78#define LCCR3_INVALID_CONFIG_MASK (LCCR3_HSP | LCCR3_VSP |\
70 LCCR3_PCD | LCCR3_BPP) 79 LCCR3_PCD | LCCR3_BPP(0xf))
71
72static void (*pxafb_backlight_power)(int);
73static void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *);
74 80
75static int pxafb_activate_var(struct fb_var_screeninfo *var, 81static int pxafb_activate_var(struct fb_var_screeninfo *var,
76 struct pxafb_info *); 82 struct pxafb_info *);
77static void set_ctrlr_state(struct pxafb_info *fbi, u_int state); 83static void set_ctrlr_state(struct pxafb_info *fbi, u_int state);
84static void setup_base_frame(struct pxafb_info *fbi, int branch);
85static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal,
86 unsigned long offset, size_t size);
87
88static unsigned long video_mem_size = 0;
78 89
79static inline unsigned long 90static inline unsigned long
80lcd_readl(struct pxafb_info *fbi, unsigned int off) 91lcd_readl(struct pxafb_info *fbi, unsigned int off)
@@ -156,6 +167,12 @@ pxafb_setpalettereg(u_int regno, u_int red, u_int green, u_int blue,
156 val |= ((blue >> 8) & 0x000000fc); 167 val |= ((blue >> 8) & 0x000000fc);
157 ((u32 *)(fbi->palette_cpu))[regno] = val; 168 ((u32 *)(fbi->palette_cpu))[regno] = val;
158 break; 169 break;
170 case LCCR4_PAL_FOR_3:
171 val = ((red << 8) & 0x00ff0000);
172 val |= ((green >> 0) & 0x0000ff00);
173 val |= ((blue >> 8) & 0x000000ff);
174 ((u32 *)(fbi->palette_cpu))[regno] = val;
175 break;
159 } 176 }
160 177
161 return 0; 178 return 0;
@@ -216,37 +233,110 @@ pxafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
216 return ret; 233 return ret;
217} 234}
218 235
219/* 236/* calculate pixel depth, transparency bit included, >=16bpp formats _only_ */
220 * pxafb_bpp_to_lccr3(): 237static inline int var_to_depth(struct fb_var_screeninfo *var)
221 * Convert a bits per pixel value to the correct bit pattern for LCCR3
222 */
223static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var)
224{ 238{
225 int ret = 0; 239 return var->red.length + var->green.length +
240 var->blue.length + var->transp.length;
241}
242
243/* calculate 4-bit BPP value for LCCR3 and OVLxC1 */
244static int pxafb_var_to_bpp(struct fb_var_screeninfo *var)
245{
246 int bpp = -EINVAL;
247
226 switch (var->bits_per_pixel) { 248 switch (var->bits_per_pixel) {
227 case 1: ret = LCCR3_1BPP; break; 249 case 1: bpp = 0; break;
228 case 2: ret = LCCR3_2BPP; break; 250 case 2: bpp = 1; break;
229 case 4: ret = LCCR3_4BPP; break; 251 case 4: bpp = 2; break;
230 case 8: ret = LCCR3_8BPP; break; 252 case 8: bpp = 3; break;
231 case 16: ret = LCCR3_16BPP; break; 253 case 16: bpp = 4; break;
232 case 24: 254 case 24:
233 switch (var->red.length + var->green.length + 255 switch (var_to_depth(var)) {
234 var->blue.length + var->transp.length) { 256 case 18: bpp = 6; break; /* 18-bits/pixel packed */
235 case 18: ret = LCCR3_18BPP_P | LCCR3_PDFOR_3; break; 257 case 19: bpp = 8; break; /* 19-bits/pixel packed */
236 case 19: ret = LCCR3_19BPP_P; break; 258 case 24: bpp = 9; break;
237 } 259 }
238 break; 260 break;
239 case 32: 261 case 32:
240 switch (var->red.length + var->green.length + 262 switch (var_to_depth(var)) {
241 var->blue.length + var->transp.length) { 263 case 18: bpp = 5; break; /* 18-bits/pixel unpacked */
242 case 18: ret = LCCR3_18BPP | LCCR3_PDFOR_3; break; 264 case 19: bpp = 7; break; /* 19-bits/pixel unpacked */
243 case 19: ret = LCCR3_19BPP; break; 265 case 25: bpp = 10; break;
244 case 24: ret = LCCR3_24BPP | LCCR3_PDFOR_3; break;
245 case 25: ret = LCCR3_25BPP; break;
246 } 266 }
247 break; 267 break;
248 } 268 }
249 return ret; 269 return bpp;
270}
271
272/*
273 * pxafb_var_to_lccr3():
274 * Convert a bits per pixel value to the correct bit pattern for LCCR3
275 *
276 * NOTE: for PXA27x with overlays support, the LCCR3_PDFOR_x bits have an
277 * implication of the acutal use of transparency bit, which we handle it
278 * here separatedly. See PXA27x Developer's Manual, Section <<7.4.6 Pixel
279 * Formats>> for the valid combination of PDFOR, PAL_FOR for various BPP.
280 *
281 * Transparency for palette pixel formats is not supported at the moment.
282 */
283static uint32_t pxafb_var_to_lccr3(struct fb_var_screeninfo *var)
284{
285 int bpp = pxafb_var_to_bpp(var);
286 uint32_t lccr3;
287
288 if (bpp < 0)
289 return 0;
290
291 lccr3 = LCCR3_BPP(bpp);
292
293 switch (var_to_depth(var)) {
294 case 16: lccr3 |= var->transp.length ? LCCR3_PDFOR_3 : 0; break;
295 case 18: lccr3 |= LCCR3_PDFOR_3; break;
296 case 24: lccr3 |= var->transp.length ? LCCR3_PDFOR_2 : LCCR3_PDFOR_3;
297 break;
298 case 19:
299 case 25: lccr3 |= LCCR3_PDFOR_0; break;
300 }
301 return lccr3;
302}
303
304#define SET_PIXFMT(v, r, g, b, t) \
305({ \
306 (v)->transp.offset = (t) ? (r) + (g) + (b) : 0; \
307 (v)->transp.length = (t) ? (t) : 0; \
308 (v)->blue.length = (b); (v)->blue.offset = 0; \
309 (v)->green.length = (g); (v)->green.offset = (b); \
310 (v)->red.length = (r); (v)->red.offset = (b) + (g); \
311})
312
313/* set the RGBT bitfields of fb_var_screeninf according to
314 * var->bits_per_pixel and given depth
315 */
316static void pxafb_set_pixfmt(struct fb_var_screeninfo *var, int depth)
317{
318 if (depth == 0)
319 depth = var->bits_per_pixel;
320
321 if (var->bits_per_pixel < 16) {
322 /* indexed pixel formats */
323 var->red.offset = 0; var->red.length = 8;
324 var->green.offset = 0; var->green.length = 8;
325 var->blue.offset = 0; var->blue.length = 8;
326 var->transp.offset = 0; var->transp.length = 8;
327 }
328
329 switch (depth) {
330 case 16: var->transp.length ?
331 SET_PIXFMT(var, 5, 5, 5, 1) : /* RGBT555 */
332 SET_PIXFMT(var, 5, 6, 5, 0); break; /* RGB565 */
333 case 18: SET_PIXFMT(var, 6, 6, 6, 0); break; /* RGB666 */
334 case 19: SET_PIXFMT(var, 6, 6, 6, 1); break; /* RGBT666 */
335 case 24: var->transp.length ?
336 SET_PIXFMT(var, 8, 8, 7, 1) : /* RGBT887 */
337 SET_PIXFMT(var, 8, 8, 8, 0); break; /* RGB888 */
338 case 25: SET_PIXFMT(var, 8, 8, 8, 1); break; /* RGBT888 */
339 }
250} 340}
251 341
252#ifdef CONFIG_CPU_FREQ 342#ifdef CONFIG_CPU_FREQ
@@ -308,8 +398,49 @@ static void pxafb_setmode(struct fb_var_screeninfo *var,
308 var->lower_margin = mode->lower_margin; 398 var->lower_margin = mode->lower_margin;
309 var->sync = mode->sync; 399 var->sync = mode->sync;
310 var->grayscale = mode->cmap_greyscale; 400 var->grayscale = mode->cmap_greyscale;
311 var->xres_virtual = var->xres; 401
312 var->yres_virtual = var->yres; 402 /* set the initial RGBA bitfields */
403 pxafb_set_pixfmt(var, mode->depth);
404}
405
406static int pxafb_adjust_timing(struct pxafb_info *fbi,
407 struct fb_var_screeninfo *var)
408{
409 int line_length;
410
411 var->xres = max_t(int, var->xres, MIN_XRES);
412 var->yres = max_t(int, var->yres, MIN_YRES);
413
414 if (!(fbi->lccr0 & LCCR0_LCDT)) {
415 clamp_val(var->hsync_len, 1, 64);
416 clamp_val(var->vsync_len, 1, 64);
417 clamp_val(var->left_margin, 1, 255);
418 clamp_val(var->right_margin, 1, 255);
419 clamp_val(var->upper_margin, 1, 255);
420 clamp_val(var->lower_margin, 1, 255);
421 }
422
423 /* make sure each line is aligned on word boundary */
424 line_length = var->xres * var->bits_per_pixel / 8;
425 line_length = ALIGN(line_length, 4);
426 var->xres = line_length * 8 / var->bits_per_pixel;
427
428 /* we don't support xpan, force xres_virtual to be equal to xres */
429 var->xres_virtual = var->xres;
430
431 if (var->accel_flags & FB_ACCELF_TEXT)
432 var->yres_virtual = fbi->fb.fix.smem_len / line_length;
433 else
434 var->yres_virtual = max(var->yres_virtual, var->yres);
435
436 /* check for limits */
437 if (var->xres > MAX_XRES || var->yres > MAX_YRES)
438 return -EINVAL;
439
440 if (var->yres > var->yres_virtual)
441 return -EINVAL;
442
443 return 0;
313} 444}
314 445
315/* 446/*
@@ -325,11 +456,7 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
325{ 456{
326 struct pxafb_info *fbi = (struct pxafb_info *)info; 457 struct pxafb_info *fbi = (struct pxafb_info *)info;
327 struct pxafb_mach_info *inf = fbi->dev->platform_data; 458 struct pxafb_mach_info *inf = fbi->dev->platform_data;
328 459 int err;
329 if (var->xres < MIN_XRES)
330 var->xres = MIN_XRES;
331 if (var->yres < MIN_YRES)
332 var->yres = MIN_YRES;
333 460
334 if (inf->fixed_modes) { 461 if (inf->fixed_modes) {
335 struct pxafb_mode_info *mode; 462 struct pxafb_mode_info *mode;
@@ -338,74 +465,18 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
338 if (!mode) 465 if (!mode)
339 return -EINVAL; 466 return -EINVAL;
340 pxafb_setmode(var, mode); 467 pxafb_setmode(var, mode);
341 } else {
342 if (var->xres > inf->modes->xres)
343 return -EINVAL;
344 if (var->yres > inf->modes->yres)
345 return -EINVAL;
346 if (var->bits_per_pixel > inf->modes->bpp)
347 return -EINVAL;
348 } 468 }
349 469
350 var->xres_virtual = 470 /* do a test conversion to BPP fields to check the color formats */
351 max(var->xres_virtual, var->xres); 471 err = pxafb_var_to_bpp(var);
352 var->yres_virtual = 472 if (err < 0)
353 max(var->yres_virtual, var->yres); 473 return err;
354 474
355 /* 475 pxafb_set_pixfmt(var, var_to_depth(var));
356 * Setup the RGB parameters for this display.
357 *
358 * The pixel packing format is described on page 7-11 of the
359 * PXA2XX Developer's Manual.
360 */
361 if (var->bits_per_pixel == 16) {
362 var->red.offset = 11; var->red.length = 5;
363 var->green.offset = 5; var->green.length = 6;
364 var->blue.offset = 0; var->blue.length = 5;
365 var->transp.offset = var->transp.length = 0;
366 } else if (var->bits_per_pixel > 16) {
367 struct pxafb_mode_info *mode;
368 476
369 mode = pxafb_getmode(inf, var); 477 err = pxafb_adjust_timing(fbi, var);
370 if (!mode) 478 if (err)
371 return -EINVAL; 479 return err;
372
373 switch (mode->depth) {
374 case 18: /* RGB666 */
375 var->transp.offset = var->transp.length = 0;
376 var->red.offset = 12; var->red.length = 6;
377 var->green.offset = 6; var->green.length = 6;
378 var->blue.offset = 0; var->blue.length = 6;
379 break;
380 case 19: /* RGBT666 */
381 var->transp.offset = 18; var->transp.length = 1;
382 var->red.offset = 12; var->red.length = 6;
383 var->green.offset = 6; var->green.length = 6;
384 var->blue.offset = 0; var->blue.length = 6;
385 break;
386 case 24: /* RGB888 */
387 var->transp.offset = var->transp.length = 0;
388 var->red.offset = 16; var->red.length = 8;
389 var->green.offset = 8; var->green.length = 8;
390 var->blue.offset = 0; var->blue.length = 8;
391 break;
392 case 25: /* RGBT888 */
393 var->transp.offset = 24; var->transp.length = 1;
394 var->red.offset = 16; var->red.length = 8;
395 var->green.offset = 8; var->green.length = 8;
396 var->blue.offset = 0; var->blue.length = 8;
397 break;
398 default:
399 return -EINVAL;
400 }
401 } else {
402 var->red.offset = var->green.offset = 0;
403 var->blue.offset = var->transp.offset = 0;
404 var->red.length = 8;
405 var->green.length = 8;
406 var->blue.length = 8;
407 var->transp.length = 0;
408 }
409 480
410#ifdef CONFIG_CPU_FREQ 481#ifdef CONFIG_CPU_FREQ
411 pr_debug("pxafb: dma period = %d ps\n", 482 pr_debug("pxafb: dma period = %d ps\n",
@@ -415,11 +486,6 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
415 return 0; 486 return 0;
416} 487}
417 488
418static inline void pxafb_set_truecolor(u_int is_true_color)
419{
420 /* do your machine-specific setup if needed */
421}
422
423/* 489/*
424 * pxafb_set_par(): 490 * pxafb_set_par():
425 * Set the user defined part of the display for the specified console 491 * Set the user defined part of the display for the specified console
@@ -452,11 +518,6 @@ static int pxafb_set_par(struct fb_info *info)
452 518
453 fbi->palette_cpu = (u16 *)&fbi->dma_buff->palette[0]; 519 fbi->palette_cpu = (u16 *)&fbi->dma_buff->palette[0];
454 520
455 /*
456 * Set (any) board control register to handle new color depth
457 */
458 pxafb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR);
459
460 if (fbi->fb.var.bits_per_pixel >= 16) 521 if (fbi->fb.var.bits_per_pixel >= 16)
461 fb_dealloc_cmap(&fbi->fb.cmap); 522 fb_dealloc_cmap(&fbi->fb.cmap);
462 else 523 else
@@ -467,6 +528,24 @@ static int pxafb_set_par(struct fb_info *info)
467 return 0; 528 return 0;
468} 529}
469 530
531static int pxafb_pan_display(struct fb_var_screeninfo *var,
532 struct fb_info *info)
533{
534 struct pxafb_info *fbi = (struct pxafb_info *)info;
535 int dma = DMA_MAX + DMA_BASE;
536
537 if (fbi->state != C_ENABLE)
538 return 0;
539
540 setup_base_frame(fbi, 1);
541
542 if (fbi->lccr0 & LCCR0_SDS)
543 lcd_writel(fbi, FBR1, fbi->fdadr[dma + 1] | 0x1);
544
545 lcd_writel(fbi, FBR0, fbi->fdadr[dma] | 0x1);
546 return 0;
547}
548
470/* 549/*
471 * pxafb_blank(): 550 * pxafb_blank():
472 * Blank the display by setting all palette values to zero. Note, the 551 * Blank the display by setting all palette values to zero. Note, the
@@ -502,32 +581,342 @@ static int pxafb_blank(int blank, struct fb_info *info)
502 return 0; 581 return 0;
503} 582}
504 583
505static int pxafb_mmap(struct fb_info *info,
506 struct vm_area_struct *vma)
507{
508 struct pxafb_info *fbi = (struct pxafb_info *)info;
509 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
510
511 if (off < info->fix.smem_len) {
512 vma->vm_pgoff += fbi->video_offset / PAGE_SIZE;
513 return dma_mmap_writecombine(fbi->dev, vma, fbi->map_cpu,
514 fbi->map_dma, fbi->map_size);
515 }
516 return -EINVAL;
517}
518
519static struct fb_ops pxafb_ops = { 584static struct fb_ops pxafb_ops = {
520 .owner = THIS_MODULE, 585 .owner = THIS_MODULE,
521 .fb_check_var = pxafb_check_var, 586 .fb_check_var = pxafb_check_var,
522 .fb_set_par = pxafb_set_par, 587 .fb_set_par = pxafb_set_par,
588 .fb_pan_display = pxafb_pan_display,
523 .fb_setcolreg = pxafb_setcolreg, 589 .fb_setcolreg = pxafb_setcolreg,
524 .fb_fillrect = cfb_fillrect, 590 .fb_fillrect = cfb_fillrect,
525 .fb_copyarea = cfb_copyarea, 591 .fb_copyarea = cfb_copyarea,
526 .fb_imageblit = cfb_imageblit, 592 .fb_imageblit = cfb_imageblit,
527 .fb_blank = pxafb_blank, 593 .fb_blank = pxafb_blank,
528 .fb_mmap = pxafb_mmap,
529}; 594};
530 595
596#ifdef CONFIG_FB_PXA_OVERLAY
597static void overlay1fb_setup(struct pxafb_layer *ofb)
598{
599 int size = ofb->fb.fix.line_length * ofb->fb.var.yres_virtual;
600 unsigned long start = ofb->video_mem_phys;
601 setup_frame_dma(ofb->fbi, DMA_OV1, PAL_NONE, start, size);
602}
603
604/* Depending on the enable status of overlay1/2, the DMA should be
605 * updated from FDADRx (when disabled) or FBRx (when enabled).
606 */
607static void overlay1fb_enable(struct pxafb_layer *ofb)
608{
609 int enabled = lcd_readl(ofb->fbi, OVL1C1) & OVLxC1_OEN;
610 uint32_t fdadr1 = ofb->fbi->fdadr[DMA_OV1] | (enabled ? 0x1 : 0);
611
612 lcd_writel(ofb->fbi, enabled ? FBR1 : FDADR1, fdadr1);
613 lcd_writel(ofb->fbi, OVL1C2, ofb->control[1]);
614 lcd_writel(ofb->fbi, OVL1C1, ofb->control[0] | OVLxC1_OEN);
615}
616
617static void overlay1fb_disable(struct pxafb_layer *ofb)
618{
619 uint32_t lccr5 = lcd_readl(ofb->fbi, LCCR5);
620
621 lcd_writel(ofb->fbi, OVL1C1, ofb->control[0] & ~OVLxC1_OEN);
622
623 lcd_writel(ofb->fbi, LCSR1, LCSR1_BS(1));
624 lcd_writel(ofb->fbi, LCCR5, lccr5 & ~LCSR1_BS(1));
625 lcd_writel(ofb->fbi, FBR1, ofb->fbi->fdadr[DMA_OV1] | 0x3);
626
627 if (wait_for_completion_timeout(&ofb->branch_done, 1 * HZ) == 0)
628 pr_warning("%s: timeout disabling overlay1\n", __func__);
629
630 lcd_writel(ofb->fbi, LCCR5, lccr5);
631}
632
633static void overlay2fb_setup(struct pxafb_layer *ofb)
634{
635 int size, div = 1, pfor = NONSTD_TO_PFOR(ofb->fb.var.nonstd);
636 unsigned long start[3] = { ofb->video_mem_phys, 0, 0 };
637
638 if (pfor == OVERLAY_FORMAT_RGB || pfor == OVERLAY_FORMAT_YUV444_PACKED) {
639 size = ofb->fb.fix.line_length * ofb->fb.var.yres_virtual;
640 setup_frame_dma(ofb->fbi, DMA_OV2_Y, -1, start[0], size);
641 } else {
642 size = ofb->fb.var.xres_virtual * ofb->fb.var.yres_virtual;
643 switch (pfor) {
644 case OVERLAY_FORMAT_YUV444_PLANAR: div = 1; break;
645 case OVERLAY_FORMAT_YUV422_PLANAR: div = 2; break;
646 case OVERLAY_FORMAT_YUV420_PLANAR: div = 4; break;
647 }
648 start[1] = start[0] + size;
649 start[2] = start[1] + size / div;
650 setup_frame_dma(ofb->fbi, DMA_OV2_Y, -1, start[0], size);
651 setup_frame_dma(ofb->fbi, DMA_OV2_Cb, -1, start[1], size / div);
652 setup_frame_dma(ofb->fbi, DMA_OV2_Cr, -1, start[2], size / div);
653 }
654}
655
656static void overlay2fb_enable(struct pxafb_layer *ofb)
657{
658 int pfor = NONSTD_TO_PFOR(ofb->fb.var.nonstd);
659 int enabled = lcd_readl(ofb->fbi, OVL2C1) & OVLxC1_OEN;
660 uint32_t fdadr2 = ofb->fbi->fdadr[DMA_OV2_Y] | (enabled ? 0x1 : 0);
661 uint32_t fdadr3 = ofb->fbi->fdadr[DMA_OV2_Cb] | (enabled ? 0x1 : 0);
662 uint32_t fdadr4 = ofb->fbi->fdadr[DMA_OV2_Cr] | (enabled ? 0x1 : 0);
663
664 if (pfor == OVERLAY_FORMAT_RGB || pfor == OVERLAY_FORMAT_YUV444_PACKED)
665 lcd_writel(ofb->fbi, enabled ? FBR2 : FDADR2, fdadr2);
666 else {
667 lcd_writel(ofb->fbi, enabled ? FBR2 : FDADR2, fdadr2);
668 lcd_writel(ofb->fbi, enabled ? FBR3 : FDADR3, fdadr3);
669 lcd_writel(ofb->fbi, enabled ? FBR4 : FDADR4, fdadr4);
670 }
671 lcd_writel(ofb->fbi, OVL2C2, ofb->control[1]);
672 lcd_writel(ofb->fbi, OVL2C1, ofb->control[0] | OVLxC1_OEN);
673}
674
675static void overlay2fb_disable(struct pxafb_layer *ofb)
676{
677 uint32_t lccr5 = lcd_readl(ofb->fbi, LCCR5);
678
679 lcd_writel(ofb->fbi, OVL2C1, ofb->control[0] & ~OVLxC1_OEN);
680
681 lcd_writel(ofb->fbi, LCSR1, LCSR1_BS(2));
682 lcd_writel(ofb->fbi, LCCR5, lccr5 & ~LCSR1_BS(2));
683 lcd_writel(ofb->fbi, FBR2, ofb->fbi->fdadr[DMA_OV2_Y] | 0x3);
684 lcd_writel(ofb->fbi, FBR3, ofb->fbi->fdadr[DMA_OV2_Cb] | 0x3);
685 lcd_writel(ofb->fbi, FBR4, ofb->fbi->fdadr[DMA_OV2_Cr] | 0x3);
686
687 if (wait_for_completion_timeout(&ofb->branch_done, 1 * HZ) == 0)
688 pr_warning("%s: timeout disabling overlay2\n", __func__);
689}
690
691static struct pxafb_layer_ops ofb_ops[] = {
692 [0] = {
693 .enable = overlay1fb_enable,
694 .disable = overlay1fb_disable,
695 .setup = overlay1fb_setup,
696 },
697 [1] = {
698 .enable = overlay2fb_enable,
699 .disable = overlay2fb_disable,
700 .setup = overlay2fb_setup,
701 },
702};
703
704static int overlayfb_open(struct fb_info *info, int user)
705{
706 struct pxafb_layer *ofb = (struct pxafb_layer *)info;
707
708 /* no support for framebuffer console on overlay */
709 if (user == 0)
710 return -ENODEV;
711
712 /* allow only one user at a time */
713 if (atomic_inc_and_test(&ofb->usage))
714 return -EBUSY;
715
716 /* unblank the base framebuffer */
717 fb_blank(&ofb->fbi->fb, FB_BLANK_UNBLANK);
718 return 0;
719}
720
721static int overlayfb_release(struct fb_info *info, int user)
722{
723 struct pxafb_layer *ofb = (struct pxafb_layer*) info;
724
725 atomic_dec(&ofb->usage);
726 ofb->ops->disable(ofb);
727
728 free_pages_exact(ofb->video_mem, ofb->video_mem_size);
729 ofb->video_mem = NULL;
730 ofb->video_mem_size = 0;
731 return 0;
732}
733
734static int overlayfb_check_var(struct fb_var_screeninfo *var,
735 struct fb_info *info)
736{
737 struct pxafb_layer *ofb = (struct pxafb_layer *)info;
738 struct fb_var_screeninfo *base_var = &ofb->fbi->fb.var;
739 int xpos, ypos, pfor, bpp;
740
741 xpos = NONSTD_TO_XPOS(var->nonstd);
742 ypos = NONSTD_TO_XPOS(var->nonstd);
743 pfor = NONSTD_TO_PFOR(var->nonstd);
744
745 bpp = pxafb_var_to_bpp(var);
746 if (bpp < 0)
747 return -EINVAL;
748
749 /* no support for YUV format on overlay1 */
750 if (ofb->id == OVERLAY1 && pfor != 0)
751 return -EINVAL;
752
753 /* for YUV packed formats, bpp = 'minimum bpp of YUV components' */
754 switch (pfor) {
755 case OVERLAY_FORMAT_RGB:
756 bpp = pxafb_var_to_bpp(var);
757 if (bpp < 0)
758 return -EINVAL;
759
760 pxafb_set_pixfmt(var, var_to_depth(var));
761 break;
762 case OVERLAY_FORMAT_YUV444_PACKED: bpp = 24; break;
763 case OVERLAY_FORMAT_YUV444_PLANAR: bpp = 8; break;
764 case OVERLAY_FORMAT_YUV422_PLANAR: bpp = 4; break;
765 case OVERLAY_FORMAT_YUV420_PLANAR: bpp = 2; break;
766 default:
767 return -EINVAL;
768 }
769
770 /* each line must start at a 32-bit word boundary */
771 if ((xpos * bpp) % 32)
772 return -EINVAL;
773
774 /* xres must align on 32-bit word boundary */
775 var->xres = roundup(var->xres * bpp, 32) / bpp;
776
777 if ((xpos + var->xres > base_var->xres) ||
778 (ypos + var->yres > base_var->yres))
779 return -EINVAL;
780
781 var->xres_virtual = var->xres;
782 var->yres_virtual = max(var->yres, var->yres_virtual);
783 return 0;
784}
785
786static int overlayfb_map_video_memory(struct pxafb_layer *ofb)
787{
788 struct fb_var_screeninfo *var = &ofb->fb.var;
789 int pfor = NONSTD_TO_PFOR(var->nonstd);
790 int size, bpp = 0;
791
792 switch (pfor) {
793 case OVERLAY_FORMAT_RGB: bpp = var->bits_per_pixel; break;
794 case OVERLAY_FORMAT_YUV444_PACKED: bpp = 24; break;
795 case OVERLAY_FORMAT_YUV444_PLANAR: bpp = 24; break;
796 case OVERLAY_FORMAT_YUV422_PLANAR: bpp = 16; break;
797 case OVERLAY_FORMAT_YUV420_PLANAR: bpp = 12; break;
798 }
799
800 ofb->fb.fix.line_length = var->xres_virtual * bpp / 8;
801
802 size = PAGE_ALIGN(ofb->fb.fix.line_length * var->yres_virtual);
803
804 /* don't re-allocate if the original video memory is enough */
805 if (ofb->video_mem) {
806 if (ofb->video_mem_size >= size)
807 return 0;
808
809 free_pages_exact(ofb->video_mem, ofb->video_mem_size);
810 }
811
812 ofb->video_mem = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
813 if (ofb->video_mem == NULL)
814 return -ENOMEM;
815
816 ofb->video_mem_phys = virt_to_phys(ofb->video_mem);
817 ofb->video_mem_size = size;
818
819 ofb->fb.fix.smem_start = ofb->video_mem_phys;
820 ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual;
821 ofb->fb.screen_base = ofb->video_mem;
822 return 0;
823}
824
825static int overlayfb_set_par(struct fb_info *info)
826{
827 struct pxafb_layer *ofb = (struct pxafb_layer *)info;
828 struct fb_var_screeninfo *var = &info->var;
829 int xpos, ypos, pfor, bpp, ret;
830
831 ret = overlayfb_map_video_memory(ofb);
832 if (ret)
833 return ret;
834
835 bpp = pxafb_var_to_bpp(var);
836 xpos = NONSTD_TO_XPOS(var->nonstd);
837 ypos = NONSTD_TO_XPOS(var->nonstd);
838 pfor = NONSTD_TO_PFOR(var->nonstd);
839
840 ofb->control[0] = OVLxC1_PPL(var->xres) | OVLxC1_LPO(var->yres) |
841 OVLxC1_BPP(bpp);
842 ofb->control[1] = OVLxC2_XPOS(xpos) | OVLxC2_YPOS(ypos);
843
844 if (ofb->id == OVERLAY2)
845 ofb->control[1] |= OVL2C2_PFOR(pfor);
846
847 ofb->ops->setup(ofb);
848 ofb->ops->enable(ofb);
849 return 0;
850}
851
852static struct fb_ops overlay_fb_ops = {
853 .owner = THIS_MODULE,
854 .fb_open = overlayfb_open,
855 .fb_release = overlayfb_release,
856 .fb_check_var = overlayfb_check_var,
857 .fb_set_par = overlayfb_set_par,
858};
859
860static void __devinit init_pxafb_overlay(struct pxafb_info *fbi,
861 struct pxafb_layer *ofb, int id)
862{
863 sprintf(ofb->fb.fix.id, "overlay%d", id + 1);
864
865 ofb->fb.fix.type = FB_TYPE_PACKED_PIXELS;
866 ofb->fb.fix.xpanstep = 0;
867 ofb->fb.fix.ypanstep = 1;
868
869 ofb->fb.var.activate = FB_ACTIVATE_NOW;
870 ofb->fb.var.height = -1;
871 ofb->fb.var.width = -1;
872 ofb->fb.var.vmode = FB_VMODE_NONINTERLACED;
873
874 ofb->fb.fbops = &overlay_fb_ops;
875 ofb->fb.flags = FBINFO_FLAG_DEFAULT;
876 ofb->fb.node = -1;
877 ofb->fb.pseudo_palette = NULL;
878
879 ofb->id = id;
880 ofb->ops = &ofb_ops[id];
881 atomic_set(&ofb->usage, 0);
882 ofb->fbi = fbi;
883 init_completion(&ofb->branch_done);
884}
885
886static int __devinit pxafb_overlay_init(struct pxafb_info *fbi)
887{
888 int i, ret;
889
890 for (i = 0; i < 2; i++) {
891 init_pxafb_overlay(fbi, &fbi->overlay[i], i);
892 ret = register_framebuffer(&fbi->overlay[i].fb);
893 if (ret) {
894 dev_err(fbi->dev, "failed to register overlay %d\n", i);
895 return ret;
896 }
897 }
898
899 /* mask all IU/BS/EOF/SOF interrupts */
900 lcd_writel(fbi, LCCR5, ~0);
901
902 /* place overlay(s) on top of base */
903 fbi->lccr0 |= LCCR0_OUC;
904 pr_info("PXA Overlay driver loaded successfully!\n");
905 return 0;
906}
907
908static void __devexit pxafb_overlay_exit(struct pxafb_info *fbi)
909{
910 int i;
911
912 for (i = 0; i < 2; i++)
913 unregister_framebuffer(&fbi->overlay[i].fb);
914}
915#else
916static inline void pxafb_overlay_init(struct pxafb_info *fbi) {}
917static inline void pxafb_overlay_exit(struct pxafb_info *fbi) {}
918#endif /* CONFIG_FB_PXA_OVERLAY */
919
531/* 920/*
532 * Calculate the PCD value from the clock rate (in picoseconds). 921 * Calculate the PCD value from the clock rate (in picoseconds).
533 * We take account of the PPCR clock setting. 922 * We take account of the PPCR clock setting.
@@ -607,22 +996,22 @@ unsigned long pxafb_get_hsync_time(struct device *dev)
607EXPORT_SYMBOL(pxafb_get_hsync_time); 996EXPORT_SYMBOL(pxafb_get_hsync_time);
608 997
609static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal, 998static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal,
610 unsigned int offset, size_t size) 999 unsigned long start, size_t size)
611{ 1000{
612 struct pxafb_dma_descriptor *dma_desc, *pal_desc; 1001 struct pxafb_dma_descriptor *dma_desc, *pal_desc;
613 unsigned int dma_desc_off, pal_desc_off; 1002 unsigned int dma_desc_off, pal_desc_off;
614 1003
615 if (dma < 0 || dma >= DMA_MAX) 1004 if (dma < 0 || dma >= DMA_MAX * 2)
616 return -EINVAL; 1005 return -EINVAL;
617 1006
618 dma_desc = &fbi->dma_buff->dma_desc[dma]; 1007 dma_desc = &fbi->dma_buff->dma_desc[dma];
619 dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[dma]); 1008 dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[dma]);
620 1009
621 dma_desc->fsadr = fbi->screen_dma + offset; 1010 dma_desc->fsadr = start;
622 dma_desc->fidr = 0; 1011 dma_desc->fidr = 0;
623 dma_desc->ldcmd = size; 1012 dma_desc->ldcmd = size;
624 1013
625 if (pal < 0 || pal >= PAL_MAX) { 1014 if (pal < 0 || pal >= PAL_MAX * 2) {
626 dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off; 1015 dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off;
627 fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off; 1016 fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off;
628 } else { 1017 } else {
@@ -648,6 +1037,27 @@ static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal,
648 return 0; 1037 return 0;
649} 1038}
650 1039
1040static void setup_base_frame(struct pxafb_info *fbi, int branch)
1041{
1042 struct fb_var_screeninfo *var = &fbi->fb.var;
1043 struct fb_fix_screeninfo *fix = &fbi->fb.fix;
1044 int nbytes, dma, pal, bpp = var->bits_per_pixel;
1045 unsigned long offset;
1046
1047 dma = DMA_BASE + (branch ? DMA_MAX : 0);
1048 pal = (bpp >= 16) ? PAL_NONE : PAL_BASE + (branch ? PAL_MAX : 0);
1049
1050 nbytes = fix->line_length * var->yres;
1051 offset = fix->line_length * var->yoffset + fbi->video_mem_phys;
1052
1053 if (fbi->lccr0 & LCCR0_SDS) {
1054 nbytes = nbytes / 2;
1055 setup_frame_dma(fbi, dma + 1, PAL_NONE, offset + nbytes, nbytes);
1056 }
1057
1058 setup_frame_dma(fbi, dma, pal, offset, nbytes);
1059}
1060
651#ifdef CONFIG_FB_PXA_SMARTPANEL 1061#ifdef CONFIG_FB_PXA_SMARTPANEL
652static int setup_smart_dma(struct pxafb_info *fbi) 1062static int setup_smart_dma(struct pxafb_info *fbi)
653{ 1063{
@@ -701,6 +1111,7 @@ int pxafb_smart_flush(struct fb_info *info)
701 lcd_writel(fbi, LCCR1, fbi->reg_lccr1); 1111 lcd_writel(fbi, LCCR1, fbi->reg_lccr1);
702 lcd_writel(fbi, LCCR2, fbi->reg_lccr2); 1112 lcd_writel(fbi, LCCR2, fbi->reg_lccr2);
703 lcd_writel(fbi, LCCR3, fbi->reg_lccr3); 1113 lcd_writel(fbi, LCCR3, fbi->reg_lccr3);
1114 lcd_writel(fbi, LCCR4, fbi->reg_lccr4);
704 lcd_writel(fbi, FDADR0, fbi->fdadr[0]); 1115 lcd_writel(fbi, FDADR0, fbi->fdadr[0]);
705 lcd_writel(fbi, FDADR6, fbi->fdadr[6]); 1116 lcd_writel(fbi, FDADR6, fbi->fdadr[6]);
706 1117
@@ -727,12 +1138,19 @@ int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int n_cmds)
727 int i; 1138 int i;
728 struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); 1139 struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb);
729 1140
730 /* leave 2 commands for INTERRUPT and WAIT_FOR_SYNC */ 1141 for (i = 0; i < n_cmds; i++, cmds++) {
731 for (i = 0; i < n_cmds; i++) { 1142 /* if it is a software delay, flush and delay */
1143 if ((*cmds & 0xff00) == SMART_CMD_DELAY) {
1144 pxafb_smart_flush(info);
1145 mdelay(*cmds & 0xff);
1146 continue;
1147 }
1148
1149 /* leave 2 commands for INTERRUPT and WAIT_FOR_SYNC */
732 if (fbi->n_smart_cmds == CMD_BUFF_SIZE - 8) 1150 if (fbi->n_smart_cmds == CMD_BUFF_SIZE - 8)
733 pxafb_smart_flush(info); 1151 pxafb_smart_flush(info);
734 1152
735 fbi->smart_cmds[fbi->n_smart_cmds++] = *cmds++; 1153 fbi->smart_cmds[fbi->n_smart_cmds++] = *cmds;
736 } 1154 }
737 1155
738 return 0; 1156 return 0;
@@ -764,7 +1182,9 @@ static void setup_smart_timing(struct pxafb_info *fbi,
764 LCCR1_HorSnchWdth(__smart_timing(t3, lclk)); 1182 LCCR1_HorSnchWdth(__smart_timing(t3, lclk));
765 1183
766 fbi->reg_lccr2 = LCCR2_DisHght(var->yres); 1184 fbi->reg_lccr2 = LCCR2_DisHght(var->yres);
767 fbi->reg_lccr3 = LCCR3_PixClkDiv(__smart_timing(t4, lclk)); 1185 fbi->reg_lccr3 = fbi->lccr3 | LCCR3_PixClkDiv(__smart_timing(t4, lclk));
1186 fbi->reg_lccr3 |= (var->sync & FB_SYNC_HOR_HIGH_ACT) ? LCCR3_HSP : 0;
1187 fbi->reg_lccr3 |= (var->sync & FB_SYNC_VERT_HIGH_ACT) ? LCCR3_VSP : 0;
768 1188
769 /* FIXME: make this configurable */ 1189 /* FIXME: make this configurable */
770 fbi->reg_cmdcr = 1; 1190 fbi->reg_cmdcr = 1;
@@ -789,11 +1209,15 @@ static int pxafb_smart_thread(void *arg)
789 if (try_to_freeze()) 1209 if (try_to_freeze())
790 continue; 1210 continue;
791 1211
1212 mutex_lock(&fbi->ctrlr_lock);
1213
792 if (fbi->state == C_ENABLE) { 1214 if (fbi->state == C_ENABLE) {
793 inf->smart_update(&fbi->fb); 1215 inf->smart_update(&fbi->fb);
794 complete(&fbi->refresh_done); 1216 complete(&fbi->refresh_done);
795 } 1217 }
796 1218
1219 mutex_unlock(&fbi->ctrlr_lock);
1220
797 set_current_state(TASK_INTERRUPTIBLE); 1221 set_current_state(TASK_INTERRUPTIBLE);
798 schedule_timeout(30 * HZ / 1000); 1222 schedule_timeout(30 * HZ / 1000);
799 } 1223 }
@@ -804,16 +1228,22 @@ static int pxafb_smart_thread(void *arg)
804 1228
805static int pxafb_smart_init(struct pxafb_info *fbi) 1229static int pxafb_smart_init(struct pxafb_info *fbi)
806{ 1230{
807 if (!(fbi->lccr0 | LCCR0_LCDT)) 1231 if (!(fbi->lccr0 & LCCR0_LCDT))
808 return 0; 1232 return 0;
809 1233
1234 fbi->smart_cmds = (uint16_t *) fbi->dma_buff->cmd_buff;
1235 fbi->n_smart_cmds = 0;
1236
1237 init_completion(&fbi->command_done);
1238 init_completion(&fbi->refresh_done);
1239
810 fbi->smart_thread = kthread_run(pxafb_smart_thread, fbi, 1240 fbi->smart_thread = kthread_run(pxafb_smart_thread, fbi,
811 "lcd_refresh"); 1241 "lcd_refresh");
812 if (IS_ERR(fbi->smart_thread)) { 1242 if (IS_ERR(fbi->smart_thread)) {
813 printk(KERN_ERR "%s: unable to create kernel thread\n", 1243 pr_err("%s: unable to create kernel thread\n", __func__);
814 __func__);
815 return PTR_ERR(fbi->smart_thread); 1244 return PTR_ERR(fbi->smart_thread);
816 } 1245 }
1246
817 return 0; 1247 return 0;
818} 1248}
819#else 1249#else
@@ -826,7 +1256,9 @@ int pxafb_smart_flush(struct fb_info *info)
826{ 1256{
827 return 0; 1257 return 0;
828} 1258}
829#endif /* CONFIG_FB_SMART_PANEL */ 1259
1260static inline int pxafb_smart_init(struct pxafb_info *fbi) { return 0; }
1261#endif /* CONFIG_FB_PXA_SMARTPANEL */
830 1262
831static void setup_parallel_timing(struct pxafb_info *fbi, 1263static void setup_parallel_timing(struct pxafb_info *fbi,
832 struct fb_var_screeninfo *var) 1264 struct fb_var_screeninfo *var)
@@ -874,51 +1306,7 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var,
874 struct pxafb_info *fbi) 1306 struct pxafb_info *fbi)
875{ 1307{
876 u_long flags; 1308 u_long flags;
877 size_t nbytes;
878
879#if DEBUG_VAR
880 if (!(fbi->lccr0 & LCCR0_LCDT)) {
881 if (var->xres < 16 || var->xres > 1024)
882 printk(KERN_ERR "%s: invalid xres %d\n",
883 fbi->fb.fix.id, var->xres);
884 switch (var->bits_per_pixel) {
885 case 1:
886 case 2:
887 case 4:
888 case 8:
889 case 16:
890 case 24:
891 case 32:
892 break;
893 default:
894 printk(KERN_ERR "%s: invalid bit depth %d\n",
895 fbi->fb.fix.id, var->bits_per_pixel);
896 break;
897 }
898 1309
899 if (var->hsync_len < 1 || var->hsync_len > 64)
900 printk(KERN_ERR "%s: invalid hsync_len %d\n",
901 fbi->fb.fix.id, var->hsync_len);
902 if (var->left_margin < 1 || var->left_margin > 255)
903 printk(KERN_ERR "%s: invalid left_margin %d\n",
904 fbi->fb.fix.id, var->left_margin);
905 if (var->right_margin < 1 || var->right_margin > 255)
906 printk(KERN_ERR "%s: invalid right_margin %d\n",
907 fbi->fb.fix.id, var->right_margin);
908 if (var->yres < 1 || var->yres > 1024)
909 printk(KERN_ERR "%s: invalid yres %d\n",
910 fbi->fb.fix.id, var->yres);
911 if (var->vsync_len < 1 || var->vsync_len > 64)
912 printk(KERN_ERR "%s: invalid vsync_len %d\n",
913 fbi->fb.fix.id, var->vsync_len);
914 if (var->upper_margin < 0 || var->upper_margin > 255)
915 printk(KERN_ERR "%s: invalid upper_margin %d\n",
916 fbi->fb.fix.id, var->upper_margin);
917 if (var->lower_margin < 0 || var->lower_margin > 255)
918 printk(KERN_ERR "%s: invalid lower_margin %d\n",
919 fbi->fb.fix.id, var->lower_margin);
920 }
921#endif
922 /* Update shadow copy atomically */ 1310 /* Update shadow copy atomically */
923 local_irq_save(flags); 1311 local_irq_save(flags);
924 1312
@@ -929,23 +1317,13 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var,
929#endif 1317#endif
930 setup_parallel_timing(fbi, var); 1318 setup_parallel_timing(fbi, var);
931 1319
1320 setup_base_frame(fbi, 0);
1321
932 fbi->reg_lccr0 = fbi->lccr0 | 1322 fbi->reg_lccr0 = fbi->lccr0 |
933 (LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM | 1323 (LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM |
934 LCCR0_QDM | LCCR0_BM | LCCR0_OUM); 1324 LCCR0_QDM | LCCR0_BM | LCCR0_OUM);
935 1325
936 fbi->reg_lccr3 |= pxafb_bpp_to_lccr3(var); 1326 fbi->reg_lccr3 |= pxafb_var_to_lccr3(var);
937
938 nbytes = var->yres * fbi->fb.fix.line_length;
939
940 if ((fbi->lccr0 & LCCR0_SDS) == LCCR0_Dual) {
941 nbytes = nbytes / 2;
942 setup_frame_dma(fbi, DMA_LOWER, PAL_NONE, nbytes, nbytes);
943 }
944
945 if ((var->bits_per_pixel >= 16) || (fbi->lccr0 & LCCR0_LCDT))
946 setup_frame_dma(fbi, DMA_BASE, PAL_NONE, 0, nbytes);
947 else
948 setup_frame_dma(fbi, DMA_BASE, PAL_BASE, 0, nbytes);
949 1327
950 fbi->reg_lccr4 = lcd_readl(fbi, LCCR4) & ~LCCR4_PAL_FOR_MASK; 1328 fbi->reg_lccr4 = lcd_readl(fbi, LCCR4) & ~LCCR4_PAL_FOR_MASK;
951 fbi->reg_lccr4 |= (fbi->lccr4 & LCCR4_PAL_FOR_MASK); 1329 fbi->reg_lccr4 |= (fbi->lccr4 & LCCR4_PAL_FOR_MASK);
@@ -959,6 +1337,7 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var,
959 (lcd_readl(fbi, LCCR1) != fbi->reg_lccr1) || 1337 (lcd_readl(fbi, LCCR1) != fbi->reg_lccr1) ||
960 (lcd_readl(fbi, LCCR2) != fbi->reg_lccr2) || 1338 (lcd_readl(fbi, LCCR2) != fbi->reg_lccr2) ||
961 (lcd_readl(fbi, LCCR3) != fbi->reg_lccr3) || 1339 (lcd_readl(fbi, LCCR3) != fbi->reg_lccr3) ||
1340 (lcd_readl(fbi, LCCR4) != fbi->reg_lccr4) ||
962 (lcd_readl(fbi, FDADR0) != fbi->fdadr[0]) || 1341 (lcd_readl(fbi, FDADR0) != fbi->fdadr[0]) ||
963 (lcd_readl(fbi, FDADR1) != fbi->fdadr[1])) 1342 (lcd_readl(fbi, FDADR1) != fbi->fdadr[1]))
964 pxafb_schedule_work(fbi, C_REENABLE); 1343 pxafb_schedule_work(fbi, C_REENABLE);
@@ -976,67 +1355,16 @@ static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on)
976{ 1355{
977 pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff"); 1356 pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff");
978 1357
979 if (pxafb_backlight_power) 1358 if (fbi->backlight_power)
980 pxafb_backlight_power(on); 1359 fbi->backlight_power(on);
981} 1360}
982 1361
983static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on) 1362static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on)
984{ 1363{
985 pr_debug("pxafb: LCD power o%s\n", on ? "n" : "ff"); 1364 pr_debug("pxafb: LCD power o%s\n", on ? "n" : "ff");
986 1365
987 if (pxafb_lcd_power) 1366 if (fbi->lcd_power)
988 pxafb_lcd_power(on, &fbi->fb.var); 1367 fbi->lcd_power(on, &fbi->fb.var);
989}
990
991static void pxafb_setup_gpio(struct pxafb_info *fbi)
992{
993 int gpio, ldd_bits;
994 unsigned int lccr0 = fbi->lccr0;
995
996 /*
997 * setup is based on type of panel supported
998 */
999
1000 /* 4 bit interface */
1001 if ((lccr0 & LCCR0_CMS) == LCCR0_Mono &&
1002 (lccr0 & LCCR0_SDS) == LCCR0_Sngl &&
1003 (lccr0 & LCCR0_DPD) == LCCR0_4PixMono)
1004 ldd_bits = 4;
1005
1006 /* 8 bit interface */
1007 else if (((lccr0 & LCCR0_CMS) == LCCR0_Mono &&
1008 ((lccr0 & LCCR0_SDS) == LCCR0_Dual ||
1009 (lccr0 & LCCR0_DPD) == LCCR0_8PixMono)) ||
1010 ((lccr0 & LCCR0_CMS) == LCCR0_Color &&
1011 (lccr0 & LCCR0_PAS) == LCCR0_Pas &&
1012 (lccr0 & LCCR0_SDS) == LCCR0_Sngl))
1013 ldd_bits = 8;
1014
1015 /* 16 bit interface */
1016 else if ((lccr0 & LCCR0_CMS) == LCCR0_Color &&
1017 ((lccr0 & LCCR0_SDS) == LCCR0_Dual ||
1018 (lccr0 & LCCR0_PAS) == LCCR0_Act))
1019 ldd_bits = 16;
1020
1021 else {
1022 printk(KERN_ERR "pxafb_setup_gpio: unable to determine "
1023 "bits per pixel\n");
1024 return;
1025 }
1026
1027 for (gpio = 58; ldd_bits; gpio++, ldd_bits--)
1028 pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT);
1029 /* 18 bit interface */
1030 if (fbi->fb.var.bits_per_pixel > 16) {
1031 pxa_gpio_mode(86 | GPIO_ALT_FN_2_OUT);
1032 pxa_gpio_mode(87 | GPIO_ALT_FN_2_OUT);
1033 }
1034 pxa_gpio_mode(GPIO74_LCD_FCLK_MD);
1035 pxa_gpio_mode(GPIO75_LCD_LCLK_MD);
1036 pxa_gpio_mode(GPIO76_LCD_PCLK_MD);
1037
1038 if ((lccr0 & LCCR0_PAS) == 0)
1039 pxa_gpio_mode(GPIO77_LCD_ACBIAS_MD);
1040} 1368}
1041 1369
1042static void pxafb_enable_controller(struct pxafb_info *fbi) 1370static void pxafb_enable_controller(struct pxafb_info *fbi)
@@ -1056,6 +1384,7 @@ static void pxafb_enable_controller(struct pxafb_info *fbi)
1056 return; 1384 return;
1057 1385
1058 /* Sequence from 11.7.10 */ 1386 /* Sequence from 11.7.10 */
1387 lcd_writel(fbi, LCCR4, fbi->reg_lccr4);
1059 lcd_writel(fbi, LCCR3, fbi->reg_lccr3); 1388 lcd_writel(fbi, LCCR3, fbi->reg_lccr3);
1060 lcd_writel(fbi, LCCR2, fbi->reg_lccr2); 1389 lcd_writel(fbi, LCCR2, fbi->reg_lccr2);
1061 lcd_writel(fbi, LCCR1, fbi->reg_lccr1); 1390 lcd_writel(fbi, LCCR1, fbi->reg_lccr1);
@@ -1097,8 +1426,9 @@ static void pxafb_disable_controller(struct pxafb_info *fbi)
1097static irqreturn_t pxafb_handle_irq(int irq, void *dev_id) 1426static irqreturn_t pxafb_handle_irq(int irq, void *dev_id)
1098{ 1427{
1099 struct pxafb_info *fbi = dev_id; 1428 struct pxafb_info *fbi = dev_id;
1100 unsigned int lccr0, lcsr = lcd_readl(fbi, LCSR); 1429 unsigned int lccr0, lcsr, lcsr1;
1101 1430
1431 lcsr = lcd_readl(fbi, LCSR);
1102 if (lcsr & LCSR_LDD) { 1432 if (lcsr & LCSR_LDD) {
1103 lccr0 = lcd_readl(fbi, LCCR0); 1433 lccr0 = lcd_readl(fbi, LCCR0);
1104 lcd_writel(fbi, LCCR0, lccr0 | LCCR0_LDM); 1434 lcd_writel(fbi, LCCR0, lccr0 | LCCR0_LDM);
@@ -1109,8 +1439,18 @@ static irqreturn_t pxafb_handle_irq(int irq, void *dev_id)
1109 if (lcsr & LCSR_CMD_INT) 1439 if (lcsr & LCSR_CMD_INT)
1110 complete(&fbi->command_done); 1440 complete(&fbi->command_done);
1111#endif 1441#endif
1112
1113 lcd_writel(fbi, LCSR, lcsr); 1442 lcd_writel(fbi, LCSR, lcsr);
1443
1444#ifdef CONFIG_FB_PXA_OVERLAY
1445 lcsr1 = lcd_readl(fbi, LCSR1);
1446 if (lcsr1 & LCSR1_BS(1))
1447 complete(&fbi->overlay[0].branch_done);
1448
1449 if (lcsr1 & LCSR1_BS(2))
1450 complete(&fbi->overlay[1].branch_done);
1451
1452 lcd_writel(fbi, LCSR1, lcsr1);
1453#endif
1114 return IRQ_HANDLED; 1454 return IRQ_HANDLED;
1115} 1455}
1116 1456
@@ -1181,7 +1521,6 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
1181 if (old_state == C_ENABLE) { 1521 if (old_state == C_ENABLE) {
1182 __pxafb_lcd_power(fbi, 0); 1522 __pxafb_lcd_power(fbi, 0);
1183 pxafb_disable_controller(fbi); 1523 pxafb_disable_controller(fbi);
1184 pxafb_setup_gpio(fbi);
1185 pxafb_enable_controller(fbi); 1524 pxafb_enable_controller(fbi);
1186 __pxafb_lcd_power(fbi, 1); 1525 __pxafb_lcd_power(fbi, 1);
1187 } 1526 }
@@ -1204,7 +1543,6 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
1204 */ 1543 */
1205 if (old_state != C_ENABLE) { 1544 if (old_state != C_ENABLE) {
1206 fbi->state = C_ENABLE; 1545 fbi->state = C_ENABLE;
1207 pxafb_setup_gpio(fbi);
1208 pxafb_enable_controller(fbi); 1546 pxafb_enable_controller(fbi);
1209 __pxafb_lcd_power(fbi, 1); 1547 __pxafb_lcd_power(fbi, 1);
1210 __pxafb_backlight_power(fbi, 1); 1548 __pxafb_backlight_power(fbi, 1);
@@ -1303,77 +1641,34 @@ static int pxafb_resume(struct platform_device *dev)
1303#define pxafb_resume NULL 1641#define pxafb_resume NULL
1304#endif 1642#endif
1305 1643
1306/* 1644static int __devinit pxafb_init_video_memory(struct pxafb_info *fbi)
1307 * pxafb_map_video_memory():
1308 * Allocates the DRAM memory for the frame buffer. This buffer is
1309 * remapped into a non-cached, non-buffered, memory region to
1310 * allow palette and pixel writes to occur without flushing the
1311 * cache. Once this area is remapped, all virtual memory
1312 * access to the video memory should occur at the new region.
1313 */
1314static int __devinit pxafb_map_video_memory(struct pxafb_info *fbi)
1315{ 1645{
1316 /* 1646 int size = PAGE_ALIGN(fbi->video_mem_size);
1317 * We reserve one page for the palette, plus the size
1318 * of the framebuffer.
1319 */
1320 fbi->video_offset = PAGE_ALIGN(sizeof(struct pxafb_dma_buff));
1321 fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + fbi->video_offset);
1322 fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size,
1323 &fbi->map_dma, GFP_KERNEL);
1324
1325 if (fbi->map_cpu) {
1326 /* prevent initial garbage on screen */
1327 memset(fbi->map_cpu, 0, fbi->map_size);
1328 fbi->fb.screen_base = fbi->map_cpu + fbi->video_offset;
1329 fbi->screen_dma = fbi->map_dma + fbi->video_offset;
1330
1331 /*
1332 * FIXME: this is actually the wrong thing to place in
1333 * smem_start. But fbdev suffers from the problem that
1334 * it needs an API which doesn't exist (in this case,
1335 * dma_writecombine_mmap)
1336 */
1337 fbi->fb.fix.smem_start = fbi->screen_dma;
1338 fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16;
1339
1340 fbi->dma_buff = (void *) fbi->map_cpu;
1341 fbi->dma_buff_phys = fbi->map_dma;
1342 fbi->palette_cpu = (u16 *) fbi->dma_buff->palette;
1343 1647
1344 pr_debug("pxafb: palette_mem_size = 0x%08x\n", fbi->palette_size*sizeof(u16)); 1648 fbi->video_mem = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
1649 if (fbi->video_mem == NULL)
1650 return -ENOMEM;
1345 1651
1346#ifdef CONFIG_FB_PXA_SMARTPANEL 1652 fbi->video_mem_phys = virt_to_phys(fbi->video_mem);
1347 fbi->smart_cmds = (uint16_t *) fbi->dma_buff->cmd_buff; 1653 fbi->video_mem_size = size;
1348 fbi->n_smart_cmds = 0;
1349#endif
1350 }
1351
1352 return fbi->map_cpu ? 0 : -ENOMEM;
1353}
1354 1654
1355static void pxafb_decode_mode_info(struct pxafb_info *fbi, 1655 fbi->fb.fix.smem_start = fbi->video_mem_phys;
1356 struct pxafb_mode_info *modes, 1656 fbi->fb.fix.smem_len = fbi->video_mem_size;
1357 unsigned int num_modes) 1657 fbi->fb.screen_base = fbi->video_mem;
1358{
1359 unsigned int i, smemlen;
1360
1361 pxafb_setmode(&fbi->fb.var, &modes[0]);
1362 1658
1363 for (i = 0; i < num_modes; i++) { 1659 return fbi->video_mem ? 0 : -ENOMEM;
1364 smemlen = modes[i].xres * modes[i].yres * modes[i].bpp / 8;
1365 if (smemlen > fbi->fb.fix.smem_len)
1366 fbi->fb.fix.smem_len = smemlen;
1367 }
1368} 1660}
1369 1661
1370static void pxafb_decode_mach_info(struct pxafb_info *fbi, 1662static void pxafb_decode_mach_info(struct pxafb_info *fbi,
1371 struct pxafb_mach_info *inf) 1663 struct pxafb_mach_info *inf)
1372{ 1664{
1373 unsigned int lcd_conn = inf->lcd_conn; 1665 unsigned int lcd_conn = inf->lcd_conn;
1666 struct pxafb_mode_info *m;
1667 int i;
1374 1668
1375 fbi->cmap_inverse = inf->cmap_inverse; 1669 fbi->cmap_inverse = inf->cmap_inverse;
1376 fbi->cmap_static = inf->cmap_static; 1670 fbi->cmap_static = inf->cmap_static;
1671 fbi->lccr4 = inf->lccr4;
1377 1672
1378 switch (lcd_conn & LCD_TYPE_MASK) { 1673 switch (lcd_conn & LCD_TYPE_MASK) {
1379 case LCD_TYPE_MONO_STN: 1674 case LCD_TYPE_MONO_STN:
@@ -1398,7 +1693,6 @@ static void pxafb_decode_mach_info(struct pxafb_info *fbi,
1398 /* fall back to backward compatibility way */ 1693 /* fall back to backward compatibility way */
1399 fbi->lccr0 = inf->lccr0; 1694 fbi->lccr0 = inf->lccr0;
1400 fbi->lccr3 = inf->lccr3; 1695 fbi->lccr3 = inf->lccr3;
1401 fbi->lccr4 = inf->lccr4;
1402 goto decode_mode; 1696 goto decode_mode;
1403 } 1697 }
1404 1698
@@ -1412,7 +1706,22 @@ static void pxafb_decode_mach_info(struct pxafb_info *fbi,
1412 fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0; 1706 fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0;
1413 1707
1414decode_mode: 1708decode_mode:
1415 pxafb_decode_mode_info(fbi, inf->modes, inf->num_modes); 1709 pxafb_setmode(&fbi->fb.var, &inf->modes[0]);
1710
1711 /* decide video memory size as follows:
1712 * 1. default to mode of maximum resolution
1713 * 2. allow platform to override
1714 * 3. allow module parameter to override
1715 */
1716 for (i = 0, m = &inf->modes[0]; i < inf->num_modes; i++, m++)
1717 fbi->video_mem_size = max_t(size_t, fbi->video_mem_size,
1718 m->xres * m->yres * m->bpp / 8);
1719
1720 if (inf->video_mem_size > fbi->video_mem_size)
1721 fbi->video_mem_size = inf->video_mem_size;
1722
1723 if (video_mem_size > fbi->video_mem_size)
1724 fbi->video_mem_size = video_mem_size;
1416} 1725}
1417 1726
1418static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev) 1727static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev)
@@ -1429,7 +1738,7 @@ static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev)
1429 memset(fbi, 0, sizeof(struct pxafb_info)); 1738 memset(fbi, 0, sizeof(struct pxafb_info));
1430 fbi->dev = dev; 1739 fbi->dev = dev;
1431 1740
1432 fbi->clk = clk_get(dev, "LCDCLK"); 1741 fbi->clk = clk_get(dev, NULL);
1433 if (IS_ERR(fbi->clk)) { 1742 if (IS_ERR(fbi->clk)) {
1434 kfree(fbi); 1743 kfree(fbi);
1435 return NULL; 1744 return NULL;
@@ -1440,7 +1749,7 @@ static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev)
1440 fbi->fb.fix.type = FB_TYPE_PACKED_PIXELS; 1749 fbi->fb.fix.type = FB_TYPE_PACKED_PIXELS;
1441 fbi->fb.fix.type_aux = 0; 1750 fbi->fb.fix.type_aux = 0;
1442 fbi->fb.fix.xpanstep = 0; 1751 fbi->fb.fix.xpanstep = 0;
1443 fbi->fb.fix.ypanstep = 0; 1752 fbi->fb.fix.ypanstep = 1;
1444 fbi->fb.fix.ywrapstep = 0; 1753 fbi->fb.fix.ywrapstep = 0;
1445 fbi->fb.fix.accel = FB_ACCEL_NONE; 1754 fbi->fb.fix.accel = FB_ACCEL_NONE;
1446 1755
@@ -1448,7 +1757,7 @@ static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev)
1448 fbi->fb.var.activate = FB_ACTIVATE_NOW; 1757 fbi->fb.var.activate = FB_ACTIVATE_NOW;
1449 fbi->fb.var.height = -1; 1758 fbi->fb.var.height = -1;
1450 fbi->fb.var.width = -1; 1759 fbi->fb.var.width = -1;
1451 fbi->fb.var.accel_flags = 0; 1760 fbi->fb.var.accel_flags = FB_ACCELF_TEXT;
1452 fbi->fb.var.vmode = FB_VMODE_NONINTERLACED; 1761 fbi->fb.var.vmode = FB_VMODE_NONINTERLACED;
1453 1762
1454 fbi->fb.fbops = &pxafb_ops; 1763 fbi->fb.fbops = &pxafb_ops;
@@ -1468,10 +1777,6 @@ static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev)
1468 INIT_WORK(&fbi->task, pxafb_task); 1777 INIT_WORK(&fbi->task, pxafb_task);
1469 mutex_init(&fbi->ctrlr_lock); 1778 mutex_init(&fbi->ctrlr_lock);
1470 init_completion(&fbi->disable_done); 1779 init_completion(&fbi->disable_done);
1471#ifdef CONFIG_FB_PXA_SMARTPANEL
1472 init_completion(&fbi->command_done);
1473 init_completion(&fbi->refresh_done);
1474#endif
1475 1780
1476 return fbi; 1781 return fbi;
1477} 1782}
@@ -1544,7 +1849,9 @@ static int __devinit parse_opt(struct device *dev, char *this_opt)
1544 1849
1545 s[0] = '\0'; 1850 s[0] = '\0';
1546 1851
1547 if (!strncmp(this_opt, "mode:", 5)) { 1852 if (!strncmp(this_opt, "vmem:", 5)) {
1853 video_mem_size = memparse(this_opt + 5, NULL);
1854 } else if (!strncmp(this_opt, "mode:", 5)) {
1548 return parse_opt_mode(dev, this_opt); 1855 return parse_opt_mode(dev, this_opt);
1549 } else if (!strncmp(this_opt, "pixclock:", 9)) { 1856 } else if (!strncmp(this_opt, "pixclock:", 9)) {
1550 mode->pixclock = simple_strtoul(this_opt+9, NULL, 0); 1857 mode->pixclock = simple_strtoul(this_opt+9, NULL, 0);
@@ -1748,8 +2055,7 @@ static int __devinit pxafb_probe(struct platform_device *dev)
1748 ret = -EINVAL; 2055 ret = -EINVAL;
1749 goto failed; 2056 goto failed;
1750 } 2057 }
1751 pxafb_backlight_power = inf->pxafb_backlight_power; 2058
1752 pxafb_lcd_power = inf->pxafb_lcd_power;
1753 fbi = pxafb_init_fbinfo(&dev->dev); 2059 fbi = pxafb_init_fbinfo(&dev->dev);
1754 if (!fbi) { 2060 if (!fbi) {
1755 /* only reason for pxafb_init_fbinfo to fail is kmalloc */ 2061 /* only reason for pxafb_init_fbinfo to fail is kmalloc */
@@ -1758,6 +2064,9 @@ static int __devinit pxafb_probe(struct platform_device *dev)
1758 goto failed; 2064 goto failed;
1759 } 2065 }
1760 2066
2067 fbi->backlight_power = inf->pxafb_backlight_power;
2068 fbi->lcd_power = inf->pxafb_lcd_power;
2069
1761 r = platform_get_resource(dev, IORESOURCE_MEM, 0); 2070 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
1762 if (r == NULL) { 2071 if (r == NULL) {
1763 dev_err(&dev->dev, "no I/O memory resource defined\n"); 2072 dev_err(&dev->dev, "no I/O memory resource defined\n");
@@ -1779,12 +2088,20 @@ static int __devinit pxafb_probe(struct platform_device *dev)
1779 goto failed_free_res; 2088 goto failed_free_res;
1780 } 2089 }
1781 2090
1782 /* Initialize video memory */ 2091 fbi->dma_buff_size = PAGE_ALIGN(sizeof(struct pxafb_dma_buff));
1783 ret = pxafb_map_video_memory(fbi); 2092 fbi->dma_buff = dma_alloc_coherent(fbi->dev, fbi->dma_buff_size,
2093 &fbi->dma_buff_phys, GFP_KERNEL);
2094 if (fbi->dma_buff == NULL) {
2095 dev_err(&dev->dev, "failed to allocate memory for DMA\n");
2096 ret = -ENOMEM;
2097 goto failed_free_io;
2098 }
2099
2100 ret = pxafb_init_video_memory(fbi);
1784 if (ret) { 2101 if (ret) {
1785 dev_err(&dev->dev, "Failed to allocate video RAM: %d\n", ret); 2102 dev_err(&dev->dev, "Failed to allocate video RAM: %d\n", ret);
1786 ret = -ENOMEM; 2103 ret = -ENOMEM;
1787 goto failed_free_io; 2104 goto failed_free_dma;
1788 } 2105 }
1789 2106
1790 irq = platform_get_irq(dev, 0); 2107 irq = platform_get_irq(dev, 0);
@@ -1801,13 +2118,12 @@ static int __devinit pxafb_probe(struct platform_device *dev)
1801 goto failed_free_mem; 2118 goto failed_free_mem;
1802 } 2119 }
1803 2120
1804#ifdef CONFIG_FB_PXA_SMARTPANEL
1805 ret = pxafb_smart_init(fbi); 2121 ret = pxafb_smart_init(fbi);
1806 if (ret) { 2122 if (ret) {
1807 dev_err(&dev->dev, "failed to initialize smartpanel\n"); 2123 dev_err(&dev->dev, "failed to initialize smartpanel\n");
1808 goto failed_free_irq; 2124 goto failed_free_irq;
1809 } 2125 }
1810#endif 2126
1811 /* 2127 /*
1812 * This makes sure that our colour bitfield 2128 * This makes sure that our colour bitfield
1813 * descriptors are correctly initialised. 2129 * descriptors are correctly initialised.
@@ -1833,6 +2149,8 @@ static int __devinit pxafb_probe(struct platform_device *dev)
1833 goto failed_free_cmap; 2149 goto failed_free_cmap;
1834 } 2150 }
1835 2151
2152 pxafb_overlay_init(fbi);
2153
1836#ifdef CONFIG_CPU_FREQ 2154#ifdef CONFIG_CPU_FREQ
1837 fbi->freq_transition.notifier_call = pxafb_freq_transition; 2155 fbi->freq_transition.notifier_call = pxafb_freq_transition;
1838 fbi->freq_policy.notifier_call = pxafb_freq_policy; 2156 fbi->freq_policy.notifier_call = pxafb_freq_policy;
@@ -1855,8 +2173,10 @@ failed_free_cmap:
1855failed_free_irq: 2173failed_free_irq:
1856 free_irq(irq, fbi); 2174 free_irq(irq, fbi);
1857failed_free_mem: 2175failed_free_mem:
1858 dma_free_writecombine(&dev->dev, fbi->map_size, 2176 free_pages_exact(fbi->video_mem, fbi->video_mem_size);
1859 fbi->map_cpu, fbi->map_dma); 2177failed_free_dma:
2178 dma_free_coherent(&dev->dev, fbi->dma_buff_size,
2179 fbi->dma_buff, fbi->dma_buff_phys);
1860failed_free_io: 2180failed_free_io:
1861 iounmap(fbi->mmio_base); 2181 iounmap(fbi->mmio_base);
1862failed_free_res: 2182failed_free_res:
@@ -1881,6 +2201,7 @@ static int __devexit pxafb_remove(struct platform_device *dev)
1881 2201
1882 info = &fbi->fb; 2202 info = &fbi->fb;
1883 2203
2204 pxafb_overlay_exit(fbi);
1884 unregister_framebuffer(info); 2205 unregister_framebuffer(info);
1885 2206
1886 pxafb_disable_controller(fbi); 2207 pxafb_disable_controller(fbi);
@@ -1891,8 +2212,10 @@ static int __devexit pxafb_remove(struct platform_device *dev)
1891 irq = platform_get_irq(dev, 0); 2212 irq = platform_get_irq(dev, 0);
1892 free_irq(irq, fbi); 2213 free_irq(irq, fbi);
1893 2214
1894 dma_free_writecombine(&dev->dev, fbi->map_size, 2215 free_pages_exact(fbi->video_mem, fbi->video_mem_size);
1895 fbi->map_cpu, fbi->map_dma); 2216
2217 dma_free_writecombine(&dev->dev, fbi->dma_buff_size,
2218 fbi->dma_buff, fbi->dma_buff_phys);
1896 2219
1897 iounmap(fbi->mmio_base); 2220 iounmap(fbi->mmio_base);
1898 2221
diff --git a/drivers/video/pxafb.h b/drivers/video/pxafb.h
index 31541b86f13d..2353521c5c8c 100644
--- a/drivers/video/pxafb.h
+++ b/drivers/video/pxafb.h
@@ -54,11 +54,55 @@ enum {
54#define PALETTE_SIZE (256 * 4) 54#define PALETTE_SIZE (256 * 4)
55#define CMD_BUFF_SIZE (1024 * 50) 55#define CMD_BUFF_SIZE (1024 * 50)
56 56
57/* NOTE: the palette and frame dma descriptors are doubled to allow
58 * the 2nd set for branch settings (FBRx)
59 */
57struct pxafb_dma_buff { 60struct pxafb_dma_buff {
58 unsigned char palette[PAL_MAX * PALETTE_SIZE]; 61 unsigned char palette[PAL_MAX * PALETTE_SIZE];
59 uint16_t cmd_buff[CMD_BUFF_SIZE]; 62 uint16_t cmd_buff[CMD_BUFF_SIZE];
60 struct pxafb_dma_descriptor pal_desc[PAL_MAX]; 63 struct pxafb_dma_descriptor pal_desc[PAL_MAX * 2];
61 struct pxafb_dma_descriptor dma_desc[DMA_MAX]; 64 struct pxafb_dma_descriptor dma_desc[DMA_MAX * 2];
65};
66
67enum {
68 OVERLAY1,
69 OVERLAY2,
70};
71
72enum {
73 OVERLAY_FORMAT_RGB = 0,
74 OVERLAY_FORMAT_YUV444_PACKED,
75 OVERLAY_FORMAT_YUV444_PLANAR,
76 OVERLAY_FORMAT_YUV422_PLANAR,
77 OVERLAY_FORMAT_YUV420_PLANAR,
78};
79
80#define NONSTD_TO_XPOS(x) (((x) >> 0) & 0x3ff)
81#define NONSTD_TO_YPOS(x) (((x) >> 10) & 0x3ff)
82#define NONSTD_TO_PFOR(x) (((x) >> 20) & 0x7)
83
84struct pxafb_layer;
85
86struct pxafb_layer_ops {
87 void (*enable)(struct pxafb_layer *);
88 void (*disable)(struct pxafb_layer *);
89 void (*setup)(struct pxafb_layer *);
90};
91
92struct pxafb_layer {
93 struct fb_info fb;
94 int id;
95 atomic_t usage;
96 uint32_t control[2];
97
98 struct pxafb_layer_ops *ops;
99
100 void __iomem *video_mem;
101 unsigned long video_mem_phys;
102 size_t video_mem_size;
103 struct completion branch_done;
104
105 struct pxafb_info *fbi;
62}; 106};
63 107
64struct pxafb_info { 108struct pxafb_info {
@@ -69,24 +113,15 @@ struct pxafb_info {
69 void __iomem *mmio_base; 113 void __iomem *mmio_base;
70 114
71 struct pxafb_dma_buff *dma_buff; 115 struct pxafb_dma_buff *dma_buff;
116 size_t dma_buff_size;
72 dma_addr_t dma_buff_phys; 117 dma_addr_t dma_buff_phys;
73 dma_addr_t fdadr[DMA_MAX]; 118 dma_addr_t fdadr[DMA_MAX * 2];
74 119
75 /* 120 void __iomem *video_mem; /* virtual address of frame buffer */
76 * These are the addresses we mapped 121 unsigned long video_mem_phys; /* physical address of frame buffer */
77 * the framebuffer memory region to. 122 size_t video_mem_size; /* size of the frame buffer */
78 */
79 /* raw memory addresses */
80 dma_addr_t map_dma; /* physical */
81 u_char * map_cpu; /* virtual */
82 u_int map_size;
83
84 /* addresses of pieces placed in raw buffer */
85 u_char * screen_cpu; /* virtual address of frame buffer */
86 dma_addr_t screen_dma; /* physical address of frame buffer */
87 u16 * palette_cpu; /* virtual address of palette memory */ 123 u16 * palette_cpu; /* virtual address of palette memory */
88 u_int palette_size; 124 u_int palette_size;
89 ssize_t video_offset;
90 125
91 u_int lccr0; 126 u_int lccr0;
92 u_int lccr3; 127 u_int lccr3;
@@ -120,10 +155,17 @@ struct pxafb_info {
120 struct task_struct *smart_thread; 155 struct task_struct *smart_thread;
121#endif 156#endif
122 157
158#ifdef CONFIG_FB_PXA_OVERLAY
159 struct pxafb_layer overlay[2];
160#endif
161
123#ifdef CONFIG_CPU_FREQ 162#ifdef CONFIG_CPU_FREQ
124 struct notifier_block freq_transition; 163 struct notifier_block freq_transition;
125 struct notifier_block freq_policy; 164 struct notifier_block freq_policy;
126#endif 165#endif
166
167 void (*lcd_power)(int, struct fb_var_screeninfo *);
168 void (*backlight_power)(int);
127}; 169};
128 170
129#define TO_INF(ptr,member) container_of(ptr,struct pxafb_info,member) 171#define TO_INF(ptr,member) container_of(ptr,struct pxafb_info,member)
@@ -148,4 +190,10 @@ struct pxafb_info {
148#define MIN_XRES 64 190#define MIN_XRES 64
149#define MIN_YRES 64 191#define MIN_YRES 64
150 192
193/* maximum X and Y resolutions - note these are limits from the register
194 * bits length instead of the real ones
195 */
196#define MAX_XRES 1024
197#define MAX_YRES 1024
198
151#endif /* __PXAFB_H__ */ 199#endif /* __PXAFB_H__ */
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index c052bd4c0b06..076f946fa0f5 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -114,7 +114,7 @@
114 * - convert dma address types to dma_addr_t 114 * - convert dma address types to dma_addr_t
115 * - remove unused 'montype' stuff 115 * - remove unused 'montype' stuff
116 * - remove redundant zero inits of init_var after the initial 116 * - remove redundant zero inits of init_var after the initial
117 * memzero. 117 * memset.
118 * - remove allow_modeset (acornfb idea does not belong here) 118 * - remove allow_modeset (acornfb idea does not belong here)
119 * 119 *
120 * 2001/05/28: <rmk@arm.linux.org.uk> 120 * 2001/05/28: <rmk@arm.linux.org.uk>
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 5b78fd0aff0a..018c070a357f 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -176,7 +176,7 @@ int register_virtio_device(struct virtio_device *dev)
176 176
177 /* Assign a unique device index and hence name. */ 177 /* Assign a unique device index and hence name. */
178 dev->index = dev_index++; 178 dev->index = dev_index++;
179 sprintf(dev->dev.bus_id, "virtio%u", dev->index); 179 dev_set_name(&dev->dev, "virtio%u", dev->index);
180 180
181 /* We always start by resetting the device, in case a previous 181 /* We always start by resetting the device, in case a previous
182 * driver messed it up. This also tests that code path a little. */ 182 * driver messed it up. This also tests that code path a little. */
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 62eab43152d2..59268266b79a 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -56,6 +56,15 @@ static struct virtio_device_id id_table[] = {
56 { 0 }, 56 { 0 },
57}; 57};
58 58
59static u32 page_to_balloon_pfn(struct page *page)
60{
61 unsigned long pfn = page_to_pfn(page);
62
63 BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT);
64 /* Convert pfn from Linux page size to balloon page size. */
65 return pfn >> (PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT);
66}
67
59static void balloon_ack(struct virtqueue *vq) 68static void balloon_ack(struct virtqueue *vq)
60{ 69{
61 struct virtio_balloon *vb; 70 struct virtio_balloon *vb;
@@ -99,7 +108,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
99 msleep(200); 108 msleep(200);
100 break; 109 break;
101 } 110 }
102 vb->pfns[vb->num_pfns] = page_to_pfn(page); 111 vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page);
103 totalram_pages--; 112 totalram_pages--;
104 vb->num_pages++; 113 vb->num_pages++;
105 list_add(&page->lru, &vb->pages); 114 list_add(&page->lru, &vb->pages);
@@ -132,7 +141,7 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
132 for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { 141 for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
133 page = list_first_entry(&vb->pages, struct page, lru); 142 page = list_first_entry(&vb->pages, struct page, lru);
134 list_del(&page->lru); 143 list_del(&page->lru);
135 vb->pfns[vb->num_pfns] = page_to_pfn(page); 144 vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page);
136 vb->num_pages--; 145 vb->num_pages--;
137 } 146 }
138 147
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index c7dc37c7cce9..265fdf2d1276 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -75,7 +75,7 @@ MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
75 * would make more sense for virtio to not insist on having it's own device. */ 75 * would make more sense for virtio to not insist on having it's own device. */
76static struct device virtio_pci_root = { 76static struct device virtio_pci_root = {
77 .parent = NULL, 77 .parent = NULL,
78 .bus_id = "virtio-pci", 78 .init_name = "virtio-pci",
79}; 79};
80 80
81/* Convert a generic virtio device to our structure */ 81/* Convert a generic virtio device to our structure */
@@ -216,7 +216,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
216 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 216 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
217 struct virtio_pci_vq_info *info; 217 struct virtio_pci_vq_info *info;
218 struct virtqueue *vq; 218 struct virtqueue *vq;
219 unsigned long flags; 219 unsigned long flags, size;
220 u16 num; 220 u16 num;
221 int err; 221 int err;
222 222
@@ -237,19 +237,20 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
237 info->queue_index = index; 237 info->queue_index = index;
238 info->num = num; 238 info->num = num;
239 239
240 info->queue = kzalloc(PAGE_ALIGN(vring_size(num,PAGE_SIZE)), GFP_KERNEL); 240 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
241 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
241 if (info->queue == NULL) { 242 if (info->queue == NULL) {
242 err = -ENOMEM; 243 err = -ENOMEM;
243 goto out_info; 244 goto out_info;
244 } 245 }
245 246
246 /* activate the queue */ 247 /* activate the queue */
247 iowrite32(virt_to_phys(info->queue) >> PAGE_SHIFT, 248 iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
248 vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 249 vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
249 250
250 /* create the vring */ 251 /* create the vring */
251 vq = vring_new_virtqueue(info->num, vdev, info->queue, 252 vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN,
252 vp_notify, callback); 253 vdev, info->queue, vp_notify, callback);
253 if (!vq) { 254 if (!vq) {
254 err = -ENOMEM; 255 err = -ENOMEM;
255 goto out_activate_queue; 256 goto out_activate_queue;
@@ -266,7 +267,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
266 267
267out_activate_queue: 268out_activate_queue:
268 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 269 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
269 kfree(info->queue); 270 free_pages_exact(info->queue, size);
270out_info: 271out_info:
271 kfree(info); 272 kfree(info);
272 return ERR_PTR(err); 273 return ERR_PTR(err);
@@ -277,7 +278,7 @@ static void vp_del_vq(struct virtqueue *vq)
277{ 278{
278 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 279 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
279 struct virtio_pci_vq_info *info = vq->priv; 280 struct virtio_pci_vq_info *info = vq->priv;
280 unsigned long flags; 281 unsigned long flags, size;
281 282
282 spin_lock_irqsave(&vp_dev->lock, flags); 283 spin_lock_irqsave(&vp_dev->lock, flags);
283 list_del(&info->node); 284 list_del(&info->node);
@@ -289,7 +290,8 @@ static void vp_del_vq(struct virtqueue *vq)
289 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); 290 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
290 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 291 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
291 292
292 kfree(info->queue); 293 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
294 free_pages_exact(info->queue, size);
293 kfree(info); 295 kfree(info);
294} 296}
295 297
@@ -305,6 +307,20 @@ static struct virtio_config_ops virtio_pci_config_ops = {
305 .finalize_features = vp_finalize_features, 307 .finalize_features = vp_finalize_features,
306}; 308};
307 309
310static void virtio_pci_release_dev(struct device *_d)
311{
312 struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
313 struct virtio_pci_device *vp_dev = to_vp_device(dev);
314 struct pci_dev *pci_dev = vp_dev->pci_dev;
315
316 free_irq(pci_dev->irq, vp_dev);
317 pci_set_drvdata(pci_dev, NULL);
318 pci_iounmap(pci_dev, vp_dev->ioaddr);
319 pci_release_regions(pci_dev);
320 pci_disable_device(pci_dev);
321 kfree(vp_dev);
322}
323
308/* the PCI probing function */ 324/* the PCI probing function */
309static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, 325static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
310 const struct pci_device_id *id) 326 const struct pci_device_id *id)
@@ -328,6 +344,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
328 return -ENOMEM; 344 return -ENOMEM;
329 345
330 vp_dev->vdev.dev.parent = &virtio_pci_root; 346 vp_dev->vdev.dev.parent = &virtio_pci_root;
347 vp_dev->vdev.dev.release = virtio_pci_release_dev;
331 vp_dev->vdev.config = &virtio_pci_config_ops; 348 vp_dev->vdev.config = &virtio_pci_config_ops;
332 vp_dev->pci_dev = pci_dev; 349 vp_dev->pci_dev = pci_dev;
333 INIT_LIST_HEAD(&vp_dev->virtqueues); 350 INIT_LIST_HEAD(&vp_dev->virtqueues);
@@ -357,7 +374,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
357 374
358 /* register a handler for the queue with the PCI device's interrupt */ 375 /* register a handler for the queue with the PCI device's interrupt */
359 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, 376 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
360 vp_dev->vdev.dev.bus_id, vp_dev); 377 dev_name(&vp_dev->vdev.dev), vp_dev);
361 if (err) 378 if (err)
362 goto out_set_drvdata; 379 goto out_set_drvdata;
363 380
@@ -387,12 +404,6 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
387 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 404 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
388 405
389 unregister_virtio_device(&vp_dev->vdev); 406 unregister_virtio_device(&vp_dev->vdev);
390 free_irq(pci_dev->irq, vp_dev);
391 pci_set_drvdata(pci_dev, NULL);
392 pci_iounmap(pci_dev, vp_dev->ioaddr);
393 pci_release_regions(pci_dev);
394 pci_disable_device(pci_dev);
395 kfree(vp_dev);
396} 407}
397 408
398#ifdef CONFIG_PM 409#ifdef CONFIG_PM
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 6eb5303fed11..5777196bf6c9 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -274,6 +274,7 @@ static struct virtqueue_ops vring_vq_ops = {
274}; 274};
275 275
276struct virtqueue *vring_new_virtqueue(unsigned int num, 276struct virtqueue *vring_new_virtqueue(unsigned int num,
277 unsigned int vring_align,
277 struct virtio_device *vdev, 278 struct virtio_device *vdev,
278 void *pages, 279 void *pages,
279 void (*notify)(struct virtqueue *), 280 void (*notify)(struct virtqueue *),
@@ -292,7 +293,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
292 if (!vq) 293 if (!vq)
293 return NULL; 294 return NULL;
294 295
295 vring_init(&vq->vring, num, pages, PAGE_SIZE); 296 vring_init(&vq->vring, num, pages, vring_align);
296 vq->vq.callback = callback; 297 vq->vq.callback = callback;
297 vq->vq.vdev = vdev; 298 vq->vq.vdev = vdev;
298 vq->vq.vq_ops = &vring_vq_ops; 299 vq->vq.vq_ops = &vring_vq_ops;
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index f7f6ce82a5e2..e31925ee8346 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -42,7 +42,7 @@
42#undef S3C_VA_WATCHDOG 42#undef S3C_VA_WATCHDOG
43#define S3C_VA_WATCHDOG (0) 43#define S3C_VA_WATCHDOG (0)
44 44
45#include <asm/plat-s3c/regs-watchdog.h> 45#include <plat/regs-watchdog.h>
46 46
47#define PFX "s3c2410-wdt: " 47#define PFX "s3c2410-wdt: "
48 48
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index ed01e4c2beff..e19b45794717 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -27,6 +27,7 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30#include <linux/timex.h>
30 31
31#ifdef CONFIG_ARCH_PXA 32#ifdef CONFIG_ARCH_PXA
32#include <mach/pxa-regs.h> 33#include <mach/pxa-regs.h>
@@ -35,8 +36,7 @@
35#include <mach/reset.h> 36#include <mach/reset.h>
36#include <mach/hardware.h> 37#include <mach/hardware.h>
37 38
38#define OSCR_FREQ CLOCK_TICK_RATE 39static unsigned long oscr_freq;
39
40static unsigned long sa1100wdt_users; 40static unsigned long sa1100wdt_users;
41static int pre_margin; 41static int pre_margin;
42static int boot_status; 42static int boot_status;
@@ -123,12 +123,12 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
123 break; 123 break;
124 } 124 }
125 125
126 pre_margin = OSCR_FREQ * time; 126 pre_margin = oscr_freq * time;
127 OSMR3 = OSCR + pre_margin; 127 OSMR3 = OSCR + pre_margin;
128 /*fall through*/ 128 /*fall through*/
129 129
130 case WDIOC_GETTIMEOUT: 130 case WDIOC_GETTIMEOUT:
131 ret = put_user(pre_margin / OSCR_FREQ, p); 131 ret = put_user(pre_margin / oscr_freq, p);
132 break; 132 break;
133 } 133 }
134 return ret; 134 return ret;
@@ -155,6 +155,8 @@ static int __init sa1100dog_init(void)
155{ 155{
156 int ret; 156 int ret;
157 157
158 oscr_freq = get_clock_tick_rate();
159
158 /* 160 /*
159 * Read the reset status, and save it for later. If 161 * Read the reset status, and save it for later. If
160 * we suspend, RCSR will be cleared, and the watchdog 162 * we suspend, RCSR will be cleared, and the watchdog
@@ -162,7 +164,7 @@ static int __init sa1100dog_init(void)
162 */ 164 */
163 boot_status = (reset_status & RESET_STATUS_WATCHDOG) ? 165 boot_status = (reset_status & RESET_STATUS_WATCHDOG) ?
164 WDIOF_CARDRESET : 0; 166 WDIOF_CARDRESET : 0;
165 pre_margin = OSCR_FREQ * margin; 167 pre_margin = oscr_freq * margin;
166 168
167 ret = misc_register(&sa1100dog_miscdev); 169 ret = misc_register(&sa1100dog_miscdev);
168 if (ret == 0) 170 if (ret == 0)
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index eba5ec5b020e..add640ff5c6c 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -141,8 +141,12 @@ static void init_evtchn_cpu_bindings(void)
141 int i; 141 int i;
142 142
143 /* By default all event channels notify CPU#0. */ 143 /* By default all event channels notify CPU#0. */
144 for_each_irq_desc(i, desc) 144 for_each_irq_desc(i, desc) {
145 if (!desc)
146 continue;
147
145 desc->affinity = cpumask_of_cpu(0); 148 desc->affinity = cpumask_of_cpu(0);
149 }
146#endif 150#endif
147 151
148 memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); 152 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
@@ -229,15 +233,20 @@ static void unmask_evtchn(int port)
229static int find_unbound_irq(void) 233static int find_unbound_irq(void)
230{ 234{
231 int irq; 235 int irq;
236 struct irq_desc *desc;
232 237
233 /* Only allocate from dynirq range */ 238 /* Only allocate from dynirq range */
234 for_each_irq_nr(irq) 239 for (irq = 0; irq < nr_irqs; irq++)
235 if (irq_bindcount[irq] == 0) 240 if (irq_bindcount[irq] == 0)
236 break; 241 break;
237 242
238 if (irq == nr_irqs) 243 if (irq == nr_irqs)
239 panic("No available IRQ to bind to: increase nr_irqs!\n"); 244 panic("No available IRQ to bind to: increase nr_irqs!\n");
240 245
246 desc = irq_to_desc_alloc_cpu(irq, 0);
247 if (WARN_ON(desc == NULL))
248 return -1;
249
241 return irq; 250 return irq;
242} 251}
243 252
@@ -792,7 +801,7 @@ void xen_irq_resume(void)
792 mask_evtchn(evtchn); 801 mask_evtchn(evtchn);
793 802
794 /* No IRQ <-> event-channel mappings. */ 803 /* No IRQ <-> event-channel mappings. */
795 for_each_irq_nr(irq) 804 for (irq = 0; irq < nr_irqs; irq++)
796 irq_info[irq].evtchn = 0; /* zap event-channel binding */ 805 irq_info[irq].evtchn = 0; /* zap event-channel binding */
797 806
798 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 807 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
@@ -824,7 +833,7 @@ void __init xen_init_IRQ(void)
824 mask_evtchn(i); 833 mask_evtchn(i);
825 834
826 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ 835 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
827 for_each_irq_nr(i) 836 for (i = 0; i < nr_irqs; i++)
828 irq_bindcount[i] = 0; 837 irq_bindcount[i] = 0;
829 838
830 irq_ctx_init(smp_processor_id()); 839 irq_ctx_init(smp_processor_id());